python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import argparse
import os
import copy
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import sys
import time
from multiprocessing import Process, Value
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../..")
from examples.tensorflow.decoding.translate_example import translate
class TestDecoding(unittest.TestCase):
common_args_dict = {'batch_size' : 128,
'max_seq_len': 128,
'encoder_head_number': 8,
'encoder_size_per_head': 64,
'decoder_head_number': 8,
'decoder_size_per_head': 64,
'encoder_num_layer': 6,
'decoder_num_layer': 6,
'beam_search_diversity_rate': 0.0,
'sampling_topk': 1,
'sampling_topp': 0.0,
'source_vocabulary': "../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
'target_vocabulary': "../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
'source': "../examples/tensorflow/decoding/utils/translation/test.en",
'target': "../examples/tensorflow/decoding/utils/translation/test.de",
"remove_padding": "True",
"max_iteration": 10,
}
def check_result(self, beam_width, datatype, test_time, topk=4, topp=0.0, batch_size=-1,
decoder_bleu_score_threshold=None, decoding_bleu_score_threshold=None):
p = Process(target=self.run_translate, args=(beam_width, datatype, test_time, topk, topp,
batch_size, decoder_bleu_score_threshold, decoding_bleu_score_threshold))
p.start()
p.join()
def run_translate(self, beam_width, datatype, test_time, topk=4, topp=0.0, batch_size=-1,
decoder_bleu_score_threshold=None, decoding_bleu_score_threshold=None):
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['beam_width'] = beam_width
args_dict['data_type'] = datatype
args_dict['test_time'] = test_time
args_dict['sampling_topk'] = topk
args_dict['sampling_topp'] = topp
args_dict['model_dir'] = "../translation/ckpt"
if batch_size != -1:
args_dict['batch_size'] = batch_size
tf.reset_default_graph()
translation_result_list = translate(args_dict)
# translation_result_list[0] is warmup, skip it
op_decoder_bleu_score = translation_result_list[1].bleu_score.score
op_decoding_bleu_score = translation_result_list[2].bleu_score.score
if decoder_bleu_score_threshold != None:
self.assertTrue(op_decoder_bleu_score >= decoder_bleu_score_threshold)
if decoding_bleu_score_threshold != None:
self.assertTrue(op_decoding_bleu_score >= decoding_bleu_score_threshold)
sys.stdout.flush()
def test_decoding_beamsearch_fp32(self):
os.system("./bin/decoding_gemm 32 4 8 64 2048 32001 128 512 0 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(4, 'fp32', '12', batch_size=32, decoder_bleu_score_threshold=37.0, decoding_bleu_score_threshold=37.0)
def test_decoding_beamsearch_fp16(self):
os.system("./bin/decoding_gemm 32 4 8 64 2048 32001 128 512 1 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(4, 'fp16', '12', batch_size=32, decoder_bleu_score_threshold=37.0, decoding_bleu_score_threshold=37.0)
def test_decoding_beamsearch_fp32_2(self):
os.system("./bin/decoding_gemm 16 32 8 64 2048 32001 128 512 0 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(32, 'fp32', '12', batch_size=16, decoder_bleu_score_threshold=35.0, decoding_bleu_score_threshold=35.0)
def test_decoding_beamsearch_fp16_2(self):
os.system("./bin/decoding_gemm 16 32 8 64 2048 32001 128 512 1 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(32, 'fp16', '12', batch_size=16, decoder_bleu_score_threshold=35.0, decoding_bleu_score_threshold=35.0)
def test_decoding_topk_sampling_fp32(self):
os.system("./bin/decoding_gemm 128 1 8 64 2048 32001 128 512 0 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(1, 'fp32', '45', 4, 0.0, decoder_bleu_score_threshold=25.0, decoding_bleu_score_threshold=25.0)
def test_decoding_topk_sampling_fp16(self):
os.system("./bin/decoding_gemm 128 1 8 64 2048 32001 128 512 1 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(1, 'fp16', '45', 4, 0.0, decoder_bleu_score_threshold=25.0, decoding_bleu_score_threshold=25.0)
def test_decoding_topk_sampling_fp32_2(self):
os.system("./bin/decoding_gemm 128 1 8 64 2048 32001 128 512 0 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(1, 'fp32', '45', 64, 0.0, decoder_bleu_score_threshold=19.0, decoding_bleu_score_threshold=17.0)
def test_decoding_topk_sampling_fp16_2(self):
os.system("./bin/decoding_gemm 128 1 8 64 2048 32001 128 512 1 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(1, 'fp16', '45', 64, 0.0, decoder_bleu_score_threshold=19.0, decoding_bleu_score_threshold=17.0)
def test_decoding_topp_sampling_fp32(self):
os.system("./bin/decoding_gemm 128 1 8 64 2048 32001 128 512 0 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(1, 'fp32', '45', 0, 0.5, decoder_bleu_score_threshold=30.0, decoding_bleu_score_threshold=29.0)
def test_decoding_topp_sampling_fp16(self):
os.system("./bin/decoding_gemm 128 1 8 64 2048 32001 128 512 1 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(1, 'fp16', '45', 0, 0.5, decoder_bleu_score_threshold=30.0, decoding_bleu_score_threshold=29.0)
def test_decoding_topp_sampling_fp32_2(self):
os.system("./bin/decoding_gemm 128 1 8 64 2048 32001 128 512 0 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(1, 'fp32', '45', 0, 0.9, decoder_bleu_score_threshold=16.0, decoding_bleu_score_threshold=14.5)
def test_decoding_topp_sampling_fp16_2(self):
os.system("./bin/decoding_gemm 128 1 8 64 2048 32001 128 512 1 > .tmp.gemm.log && cat gemm_config.in")
self.check_result(1, 'fp16', '45', 0, 0.9, decoder_bleu_score_threshold=16.0, decoding_bleu_score_threshold=14.5)
if __name__ == "__main__":
unittest.main()
| FasterTransformer-main | tests/decoding/tf_decoding_unit_test.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import tensorflow as tf
import numpy as np
import unittest
import sys
import os
import math
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
USE_CACHE_BATCH_MAJOR_ATTENTION = True
class TestFusedQKVMutiheadAttention(unittest.TestCase):
def test_attn_batch_fp32(self):
for b in [1, 4, 32, 128]:
tf.reset_default_graph()
self.run_attn(b, 128, 12, 64, tf.float32)
def test_attn_batch_fp16(self):
for b in [1, 4, 32, 128]:
tf.reset_default_graph()
self.run_attn(b, 128, 12, 64, tf.float16)
def test_attn_seq_fp32(self):
for seq in [64, 96, 128, 384]:
tf.reset_default_graph()
self.run_attn(4, seq, 12, 64, tf.float32)
def test_attn_seq_fp16(self):
for seq in [64, 96, 128, 384]:
tf.reset_default_graph()
self.run_attn(4, seq, 12, 64, tf.float16)
def test_attn_head_fp32(self):
for head in [8, 12, 16]:
tf.reset_default_graph()
self.run_attn(4, 128, head, 64, tf.float32)
def test_attn_head_fp16(self):
for head in [8, 12, 16]:
tf.reset_default_graph()
self.run_attn(4, 128, head, 64, tf.float16)
def test_attn_size_fp32(self):
for size in [32, 64, 80, 96, 112, 128, 144, 160, 192, 224, 256]:
tf.reset_default_graph()
self.run_attn(4, 128, 12, size, tf.float32)
def test_attn_size_fp16(self):
for size in [32, 64, 80, 96, 112, 128, 144, 160, 192, 224, 256]:
tf.reset_default_graph()
self.run_attn(4, 128, 12, size, tf.float16)
def run_attn(self, batch_size, seq_len, head_num, size_per_head, data_type):
threshold = 3e-5
if data_type == tf.float16:
threshold = 4e-3
# Inputs: qkv_buf and k/v cache
# Do: update k/v cache, and compute attention (Q*K, QK*V)
# Output: attention result, new k/v cache
# Notes: Only used for decoder, so seqlen of q is always 1.
np.random.seed(1)
tf.set_random_seed(1)
qkv_buf = tf.random.normal([batch_size, 3, head_num, size_per_head], dtype=data_type)
qkv_bias = tf.random.normal([3, head_num, size_per_head], dtype=data_type)
k_cache = tf.random.normal([batch_size, head_num, seq_len - 1, size_per_head], dtype=data_type)
v_cache = tf.random.normal([batch_size, head_num, seq_len - 1, size_per_head], dtype=data_type)
q, k, v = tf.split(qkv_buf + qkv_bias, 3, axis=1)
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
keys = tf.concat([k_cache, k], axis=2)
values = tf.concat([v_cache, v], axis=2)
tf_k_cache = keys
tf_v_cache = values
q *= (size_per_head)**-0.5
dot = tf.matmul(q, keys, transpose_b=True)
attn = tf.cast(tf.nn.softmax(tf.cast(dot, data_type)), dot.dtype)
context = tf.matmul(attn, values)
tf_attn_result = tf.transpose(context, [0, 2, 1, 3])
fused_multihead_attention_op = tf.load_op_library(os.path.join('./lib/libtf_fused_self_attention.so'))
# if USE_CACHE_BATCH_MAJOR_ATTENTION == True
# The layout of the cache buffer for the keys is [batch_size, head_num, size_per_head/x, seq_len, x]
# where x == 8 for FP16 and x == 4 for FP32 where the fastest moving dimension (contiguous data)
# is the rightmost one. The values for x are chosen to create chunks of 16 bytes.
# The layout of the cache buffer for the values is [batch_size, head_num, seq_len, size_per_head].
if USE_CACHE_BATCH_MAJOR_ATTENTION == True:
x = 8 if data_type == tf.float16 else 4
assert size_per_head % x == 0
ft_k_cache = tf.concat([k_cache, tf.zeros_like(k)], axis=2)
ft_k_cache_shape = np.array([batch_size, head_num, seq_len, size_per_head / x, x], dtype=np.int32)
ft_k_cache = tf.reshape(ft_k_cache, ft_k_cache_shape)
ft_k_cache = tf.transpose(ft_k_cache, [0, 1, 3, 2, 4])
ft_v_cache = tf.concat([v_cache, tf.zeros_like(v)], axis=2)
else :
ft_k_cache = tf.concat([k_cache, tf.zeros_like(k)], axis=2) # [batch_size, head_num, seq_len + 1, size_per_head]
ft_k_cache = tf.transpose(ft_k_cache, [2, 0, 1, 3]) # [seq_len + 1, batch_size, head_num, size_per_head]
ft_v_cache = tf.concat([v_cache, tf.zeros_like(v)], axis=2)
ft_v_cache = tf.transpose(ft_v_cache, [2, 0, 1, 3])
ft_attn_result, ft_k_cache, ft_v_cache = fused_multihead_attention_op.fused_qkv_multi_head_attention(qkv_buf,
qkv_bias,
ft_k_cache,
ft_v_cache,
batch_size,
seq_len,
head_num,
size_per_head)
if USE_CACHE_BATCH_MAJOR_ATTENTION == True:
ft_k_cache = tf.transpose(ft_k_cache, [0, 1, 3, 2, 4])
ft_k_cache_shape = np.array([batch_size, head_num, seq_len, size_per_head], dtype=np.int32)
ft_k_cache = tf.reshape(ft_k_cache, ft_k_cache_shape)
else:
ft_k_cache = tf.transpose(ft_k_cache, [1, 2, 0, 3]) # [batch_size, head_num, seq_len + 1, size_per_head]
ft_v_cache = tf.transpose(ft_v_cache, [1, 2, 0, 3])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
print(batch_size, seq_len, head_num, size_per_head)
sess.run(tf.global_variables_initializer())
tf_attn_result_val, ft_attn_result_val, k_cache_diff_val, v_cache_diff_val = sess.run([tf_attn_result,
ft_attn_result,
tf_k_cache - ft_k_cache,
tf_v_cache - ft_v_cache])
attn_diff_val = tf_attn_result_val - ft_attn_result_val
attn_max_diff = abs(attn_diff_val).max()
attn_max_diff_id = abs(attn_diff_val).argmax()
print("attn_max_diff_id = ", attn_max_diff_id)
k_cache_max_diff = abs(k_cache_diff_val).max()
v_cache_max_diff = abs(v_cache_diff_val).max()
print("tf_attn_result_val at max diff = ", tf_attn_result_val.flatten()[attn_max_diff_id])
print("ft_attn_result_val at max diff = ", ft_attn_result_val.flatten()[attn_max_diff_id])
print("threshold = ", threshold)
print(attn_max_diff)
print(k_cache_max_diff)
print(v_cache_max_diff)
sys.stdout.flush()
assert(attn_max_diff < threshold)
assert(k_cache_max_diff < threshold)
assert(v_cache_max_diff < threshold)
if __name__ == "__main__":
unittest.main()
| FasterTransformer-main | tests/decoding/tf_fused_self_multihead_attention_unit_test.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import os
import copy
os.environ['NVIDIA_TF32_OVERRIDE'] = '0'
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../..")
from examples.pytorch.bert.bert_example import bert_example
from examples.pytorch.encoder.encoder_example import encoder_example
class TestEncoder(unittest.TestCase):
common_args_dict = {'batch_size' : 4,
'layer_num' : 12,
'seq_len': 32,
'head_num': 12,
'head_size': 64,
'inter_size': 12 * 64 * 4,
'allow_gemm_test': False,
'sparse': False,
'time': False,
'data_type': 'fp32',
'remove_padding': False,
'avg_seq_len': -1,
'thread_num': 1,
'ths_path': 'lib/libth_transformer.so',
'weight_path': None,
'int8_mode': 0,
'tensor_para_size': 1,
'pipeline_para_size': 1,
'error_threshold': None,
}
threshold = {'fp32': 4e-5, 'fp16': 4e-2, 'bf16': 5e-2 }
def test_batch_fp32(self):
args_dict = copy.deepcopy(self.common_args_dict)
for batch in [1, 8, 64, 128]:
args_dict['batch_size'] = batch
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['seq_len'],
args_dict['head_num'], args_dict['head_size'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_batch_fp16(self):
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp16'
for batch in [1, 8, 64, 128]:
args_dict['batch_size'] = batch
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['seq_len'],
args_dict['head_num'], args_dict['head_size'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_hidden_fp32(self):
args_dict = copy.deepcopy(self.common_args_dict)
for p in [tuple([12, 64]), tuple([16, 64]), tuple([4, 32]), tuple([8, 96])]:
args_dict['head_num'] = p[0]
args_dict['head_size'] = p[1]
args_dict['inter_size'] = p[0] * p[1] * 4
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['seq_len'],
args_dict['head_num'], args_dict['head_size'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_hidden_fp16(self):
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp16'
for p in [tuple([12, 64]), tuple([16, 64]), tuple([4, 32]), tuple([8, 96])]:
args_dict['head_num'] = p[0]
args_dict['head_size'] = p[1]
args_dict['inter_size'] = p[0] * p[1] * 4
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['seq_len'],
args_dict['head_num'], args_dict['head_size'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_seqlen_fp32(self):
args_dict = copy.deepcopy(self.common_args_dict)
for seqlen in [32, 130, 511, 1024, 1536]:
args_dict['seq_len'] = seqlen
if seqlen == 1536:
args_dict['layer_num'] = 6
threshold_tmp = {'fp32': 1e-4, 'fp16': 4e-2, 'bf16': 5e-2} # The error of encoder on this test is larger
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['seq_len'],
args_dict['head_num'], args_dict['head_size'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < threshold_tmp[args_dict['data_type']])
max_diff = encoder_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < threshold_tmp[args_dict['data_type']])
def test_seqlen_fp16(self):
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp16'
for seqlen in [32, 130, 511, 1024, 1536]:
args_dict['seq_len'] = seqlen
if seqlen == 1536:
args_dict['layer_num'] = 6
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['seq_len'],
args_dict['head_num'], args_dict['head_size'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
sys.stdout.flush()
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
if __name__ == "__main__":
unittest.main()
| FasterTransformer-main | tests/bert/th_bert_unit_test.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import os
import copy
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['NVIDIA_TF32_OVERRIDE'] = '0'
import tensorflow as tf
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../..")
from examples.tensorflow.bert.bert_example import bert_example
from examples.tensorflow.encoder.encoder_example import encoder_example
class TestEncoder(unittest.TestCase):
common_args_dict = {'batch_size' : 4,
'num_layer' : 12,
'max_seq_len': 32,
'head_number': 12,
'size_per_head': 64,
'inter_size': 12 * 64 * 4,
'allow_gemm_test': 'False',
'test_time': 0,
'data_type': 'fp32',
'remove_padding': 'False',
'avg_seq_len': -1,
'thread_num': 1,
'int8_mode': 0
}
threshold = {'fp32': 3e-5, 'fp16': 4e-2, 'bf16': 5e-2 }
test_level = 1
def test_batch_fp32(self):
if self.test_level >= 3:
print(f"[INFO] test level {self.test_level}, run unit test test_batch_fp32 (level {3})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_batch_fp32 (level {3})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp32'
for batch in [1, 8, 64, 128]:
args_dict['batch_size'] = batch
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_batch_fp16(self):
if self.test_level >= 2:
print(f"[INFO] test level {self.test_level}, run unit test test_batch_fp16 (level {2})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_batch_fp16 (level {2})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp16'
for batch in [1, 8, 64, 128]:
args_dict['batch_size'] = batch
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_batch_bf16(self):
if self.test_level >= 2:
print(f"[INFO] test level {self.test_level}, run unit test test_batch_bf16 (level {2})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_batch_bf16 (level {2})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'bf16'
for batch in [1, 8, 64, 128]:
args_dict['batch_size'] = batch
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} 2 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head']))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_size_fp32(self):
if self.test_level >= 3:
print(f"[INFO] test level {self.test_level}, run unit test test_size_fp32 (level {3})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_size_fp32 (level {3})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp32'
args_dict['head_number'] = 8
for size in [32, 40, 64, 120, 128]:
args_dict['size_per_head'] = size
args_dict['inter_size'] = args_dict['head_number'] * size * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_size_fp16(self):
if self.test_level >= 2:
print(f"[INFO] test level {self.test_level}, run unit test test_size_fp16 (level {2})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_size_fp16 (level {2})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp16'
args_dict['head_number'] = 12
for size in [32, 40, 64, 120, 128]:
args_dict['size_per_head'] = size
args_dict['inter_size'] = args_dict['head_number'] * size * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_size_bf16(self):
if self.test_level >= 2:
print(f"[INFO] test level {self.test_level}, run unit test test_size_bf16 (level {2})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_size_bf16 (level {2})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'bf16'
args_dict['head_number'] = 12
for size in [32, 40, 64, 120, 128]:
args_dict['size_per_head'] = size
args_dict['inter_size'] = args_dict['head_number'] * size * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} 2 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head']))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_head_fp32(self):
if self.test_level >= 3:
print(f"[INFO] test level {self.test_level}, run unit test test_head_fp32 (level {3})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_head_fp32 (level {3})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp32'
args_dict['size_per_head'] = 64
for h in [8, 12, 17, 24, 29, 32]:
args_dict['head_number'] = h
args_dict['inter_size'] = h * args_dict['size_per_head'] * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_head_fp16(self):
if self.test_level >= 2:
print(f"[INFO] test level {self.test_level}, run unit test test_head_fp16 (level {2})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_head_fp16 (level {2})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp16'
args_dict['size_per_head'] = 64
for h in [8, 12, 17, 24, 29, 32]:
args_dict['head_number'] = h
args_dict['inter_size'] = h * args_dict['size_per_head'] * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_head_bf16(self):
if self.test_level >= 2:
print(f"[INFO] test level {self.test_level}, run unit test test_head_bf16 (level {2})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_head_bf16 (level {2})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'bf16'
args_dict['size_per_head'] = 64
for h in [8, 12, 17, 24, 29, 32]:
args_dict['head_number'] = h
args_dict['inter_size'] = h * args_dict['size_per_head'] * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} 2 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head']))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_hidden_fp32(self):
if self.test_level >= 3:
print(f"[INFO] test level {self.test_level}, run unit test test_hidden_fp32 (level {3})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_hidden_fp32 (level {3})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp32'
for p in [tuple([12, 64]), tuple([16, 64]), tuple([4, 32]), tuple([8, 96]), tuple([12, 120])]:
args_dict['head_number'] = p[0]
args_dict['size_per_head'] = p[1]
args_dict['inter_size'] = p[0] * p[1] * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_hidden_fp16(self):
if self.test_level >= 1:
print(f"[INFO] test level {self.test_level}, run unit test test_hidden_fp16 (level {1})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_hidden_fp16 (level {1})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp16'
for p in [tuple([12, 64]), tuple([16, 64]), tuple([4, 32]), tuple([8, 96]), tuple([12, 120])]:
args_dict['head_number'] = p[0]
args_dict['size_per_head'] = p[1]
args_dict['inter_size'] = p[0] * p[1] * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_hidden_bf16(self):
if self.test_level >= 1:
print(f"[INFO] test level {self.test_level}, run unit test test_hidden_bf16 (level {1})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_hidden_bf16 (level {1})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'bf16'
for p in [tuple([12, 64]), tuple([16, 64]), tuple([4, 32]), tuple([8, 96]), tuple([12, 120])]:
args_dict['head_number'] = p[0]
args_dict['size_per_head'] = p[1]
args_dict['inter_size'] = p[0] * p[1] * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} 2 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head']))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_seqlen_fp32(self):
if self.test_level >= 3:
print(f"[INFO] test level {self.test_level}, run unit test test_seqlen_fp32 (level {3})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_seqlen_fp32 (level {3})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp32'
for seqlen in [32, 130, 511, 1024, 1536]:
args_dict['max_seq_len'] = seqlen
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_seqlen_fp16(self):
if self.test_level >= 1:
print(f"[INFO] test level {self.test_level}, run unit test test_seqlen_fp16 (level {1})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_seqlen_fp16 (level {1})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp16'
for seqlen in [32, 130, 511, 1024, 1536]:
args_dict['max_seq_len'] = seqlen
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_seqlen_bf16(self):
if self.test_level >= 1:
print(f"[INFO] test level {self.test_level}, run unit test test_seqlen_bf16 (level {1})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_seqlen_bf16 (level {1})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'bf16'
for seqlen in [32, 130, 511, 1024, 1536]:
args_dict['max_seq_len'] = seqlen
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} 2 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head']))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_large_model_fp32(self):
if self.test_level >= 3:
print(f"[INFO] test level {self.test_level}, run unit test test_large_model_fp32 (level {3})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_large_model_fp32 (level {3})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp32'
args_dict['num_layer'] = 4
for p in [tuple([32, 64]), tuple([64, 64]), tuple([32, 128])]:
args_dict['head_number'] = p[0]
args_dict['size_per_head'] = p[1]
args_dict['inter_size'] = p[0] * p[1] * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < self.threshold[args_dict['data_type']])
def test_large_model_fp16(self):
if self.test_level >= 2:
print(f"[INFO] test level {self.test_level}, run unit test test_large_model_fp16 (level {2})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_large_model_fp16 (level {2})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'fp16'
args_dict['num_layer'] = 4
threshold = 0.08 # Use larger threshold for larger model, need to check it makes sense or not
for p in [tuple([32, 64]), tuple([64, 64]), tuple([32, 128])]:
args_dict['head_number'] = p[0]
args_dict['size_per_head'] = p[1]
args_dict['inter_size'] = p[0] * p[1] * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} {} 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head'],
args_dict['data_type'] == 'fp16'))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < threshold)
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < threshold)
def test_large_model_bf16(self):
if self.test_level >= 2:
print(f"[INFO] test level {self.test_level}, run unit test test_large_model_bf16 (level {2})")
else:
print(f"[INFO] test level {self.test_level}, skip unit test test_large_model_bf16 (level {2})")
return
args_dict = copy.deepcopy(self.common_args_dict)
args_dict['data_type'] = 'bf16'
args_dict['num_layer'] = 4
threshold = 0.08 # Use larger threshold for larger model, need to check it makes sense or not
for p in [tuple([32, 64]), tuple([64, 64]), tuple([32, 128])]:
args_dict['head_number'] = p[0]
args_dict['size_per_head'] = p[1]
args_dict['inter_size'] = p[0] * p[1] * 4
tf.reset_default_graph()
os.system("./bin/bert_gemm {} {} {} {} 2 0 > .tmp.gemm.log && cat gemm_config.in".format(args_dict['batch_size'], args_dict['max_seq_len'],
args_dict['head_number'], args_dict['size_per_head']))
max_diff = bert_example(args_dict)
self.assertTrue(max_diff < threshold)
max_diff = encoder_example(args_dict)
self.assertTrue(max_diff < threshold)
if __name__ == "__main__":
test_level = 1
if len(sys.argv) > 1:
test_level = sys.argv.pop()
TestEncoder.test_level = int(test_level)
unittest.main()
| FasterTransformer-main | tests/bert/tf_bert_unit_test.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import sys
import torch
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../..")
from examples.pytorch.longformer.longformer_qa import parse_from_config, build_ft_longformer, prepare_input, decode_output
class TestLongformerPytorchQA(unittest.TestCase):
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
self.passage_texts = [
"Jim Henson was a nice puppet",
"Tom went to the swamphack yesterday.",
"The Amazon rainforest (Portuguese: Floresta Amazônica or Amazônia; "
"Spanish: Selva Amazónica, Amazonía or usually Amazonia; "
"French: Forêt amazonienne; Dutch: Amazoneregenwoud), "
"also known in English as Amazonia or the Amazon Jungle, "
"is a moist broadleaf forest that covers most of the Amazon basin of South America. "
"This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), "
"of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. "
"This region includes territory belonging to nine nations. "
"The majority of the forest is contained within Brazil, with 60% of the rainforest, "
"followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, "
"Ecuador, Bolivia, Guyana, Suriname and French Guiana. "
"States or departments in four nations contain 'Amazonas' in their names. "
"The Amazon represents over half of the planet's remaining rainforests, "
"and comprises the largest and most biodiverse tract of tropical rainforest in the world, "
"with an estimated 390 billion individual trees divided into 16,000 species."
]
self.questions = [
"Who was Jim Henson?",
"When did Tom go to the swamphack?",
"Which name is also used to describe the Amazon rainforest in English?"
]
self.answers = [
"puppet",
"yesterday",
"Jungle"
]
self.model_dir = "examples/pytorch/longformer/longformer-large-4096-finetuned-triviaqa"
self.ft_longformer_lib = os.path.join('build', 'lib', 'libth_transformer.so')
def run_all_qa(self, seq_len, batch_size, ft_longformer, data_type):
for idx in range(len(self.passage_texts)):
passage_text = self.passage_texts[idx]
question = self.questions[idx]
answer = self.answers[idx]
input_ids_b, local_attn_mask_b, global_attn_mask_b, input_ids, actual_seq_len = prepare_input(
question, passage_text, seq_len, batch_size, self.model_dir, data_type)
with torch.no_grad():
outputs = ft_longformer(input_ids_b,
attention_mask=local_attn_mask_b,
global_attention_mask=global_attn_mask_b)
answer_predict = decode_output(outputs, self.model_dir, input_ids, actual_seq_len)
self.assertTrue(answer_predict.strip() == answer)
def test_fp32_with_qa_answer(self):
seq_len = 1024
batch_size = 1
max_global_token_num = 128
(layer_num, _, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(self.model_dir)
ft_longformer = build_ft_longformer(self.model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, self.ft_longformer_lib, data_type='fp32')
self.run_all_qa(seq_len, batch_size, ft_longformer, 'fp32')
def test_fp32_with_qa_answer_2(self):
seq_len = 2048
batch_size = 5
max_global_token_num = 96
(layer_num, _, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(self.model_dir)
ft_longformer = build_ft_longformer(self.model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, self.ft_longformer_lib, data_type='fp32')
self.run_all_qa(seq_len, batch_size, ft_longformer, 'fp32')
def test_fp32_with_qa_answer_3(self):
seq_len = 4096
batch_size = 3
max_global_token_num = 512
(layer_num, _, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(self.model_dir)
ft_longformer = build_ft_longformer(self.model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, self.ft_longformer_lib, data_type='fp32')
self.run_all_qa(seq_len, batch_size, ft_longformer, 'fp32')
def test_fp16_with_qa_answer(self):
seq_len = 1024
batch_size = 1
max_global_token_num = 128
(layer_num, _, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(self.model_dir)
ft_longformer = build_ft_longformer(self.model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, self.ft_longformer_lib, data_type='fp16')
self.run_all_qa(seq_len, batch_size, ft_longformer, 'fp16')
def test_fp16_with_qa_answer_2(self):
seq_len = 1536
batch_size = 4
max_global_token_num = 64
(layer_num, _, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(self.model_dir)
ft_longformer = build_ft_longformer(self.model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, self.ft_longformer_lib, data_type='fp16')
self.run_all_qa(seq_len, batch_size, ft_longformer, 'fp16')
def test_fp16_with_qa_answer_3(self):
seq_len = 4096
batch_size = 8
max_global_token_num = 256
(layer_num, _, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(self.model_dir)
ft_longformer = build_ft_longformer(self.model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, self.ft_longformer_lib, data_type='fp16')
self.run_all_qa(seq_len, batch_size, ft_longformer, 'fp16')
def test_bf16_with_qa_answer(self):
seq_len = 1024
batch_size = 1
max_global_token_num = 128
(layer_num, _, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(self.model_dir)
ft_longformer = build_ft_longformer(self.model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, self.ft_longformer_lib, data_type='bf16')
self.run_all_qa(seq_len, batch_size, ft_longformer, 'bf16')
def test_bf16_with_qa_answer_2(self):
seq_len = 1536
batch_size = 4
max_global_token_num = 64
(layer_num, _, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(self.model_dir)
ft_longformer = build_ft_longformer(self.model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, self.ft_longformer_lib, data_type='bf16')
self.run_all_qa(seq_len, batch_size, ft_longformer, 'bf16')
def test_bf16_with_qa_answer_3(self):
seq_len = 4096
batch_size = 8
max_global_token_num = 256
(layer_num, _, head_num, size_per_head,
intermediate_size, local_attn_window_size, attn_scaler) = parse_from_config(self.model_dir)
ft_longformer = build_ft_longformer(self.model_dir, layer_num, head_num, size_per_head,
intermediate_size, local_attn_window_size,
max_global_token_num, batch_size, seq_len,
attn_scaler, self.ft_longformer_lib, data_type='bf16')
self.run_all_qa(seq_len, batch_size, ft_longformer, 'bf16')
if __name__ == "__main__":
unittest.main()
| FasterTransformer-main | tests/longformer/py_longformer_unit_test.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
import numpy as np
import unittest
def random_cuda_tensor(shape, dtype, mean=0, std=1):
return torch.empty(shape, dtype=dtype, device="cuda").normal_(mean, std)
def basic_moe_fc(activations, expert_for_row, weights, scales, biases):
if weights.dtype == torch.int8:
weights = torch.multiply(weights, scales.unsqueeze(1))
weights = weights.to(activations.dtype)
elif weights.dtype != torch.bfloat16 and weights.dtype != torch.float16 and weights.dtype != torch.float32:
raise ValueError("Invalid data type for weights")
res = torch.zeros(size=[activations.shape[0], weights.shape[-1]], dtype=activations.dtype, device='cuda')
for row in range(activations.shape[0]):
row_expert = expert_for_row[row]
torch.matmul(activations[row], weights[row_expert], out=res[row : row + 1, :])
res[row] += biases[row_expert]
return res
def apply_act(inp, act_str):
if act_str == "identity":
return inp
elif act_str == "silu":
return torch.nn.SiLU()(inp)
elif act_str == "relu":
return torch.nn.ReLU()(inp)
elif act_str == "gelu":
return torch.nn.GELU(approximate="tanh")(inp)
else:
assert False, "Unsupported activation"
class TestMoeSoftmax(unittest.TestCase):
def setUp(self) -> None:
torch.classes.load_library("lib/libmoe_unit_ops.so")
self.gating_softmax = torch.ops.moe_unit_ops.gating_softmax
self.hidden = 1024
torch.manual_seed(5258732)
def gating_softmax_test_helper(self, dtype, rtol=1e-05, atol=1e-08):
batch_sizes = [128, 111, 75, 64, 44, 32, 23, 16, 5, 1]
seq_lens = [511, 250, 127, 64, 5, 1]
ks = range(4, 0, -1)
experts = [1024, 700, 512, 256, 200, 128, 64, 32, 18, 16, 10, 8, 5, 4, 2]
# Reference impl
for batch_size in batch_sizes:
for seq_len in seq_lens:
for num_experts in experts:
for k in ks:
if k > num_experts:
continue
# Some indices will mismatch due to FP arithmetic differences. We will fail if more than 1/500th do not match
allowed_idx_mismatches = max(5, batch_size * seq_len * k // 500)
inp = random_cuda_tensor([batch_size, seq_len, num_experts], dtype)
# Run ref in FP32 to keep softmax result in fp32 when doing top-k
gates = F.softmax(inp, dim=-1)
ref_vals, ref_idxs = torch.topk(gates, k, dim=-1)
ref_vals = ref_vals.to(dtype)
ref_rows = torch.arange(0, k*batch_size*seq_len, device="cuda")
# optimized impl
act_vals, act_idxs, act_rows = self.gating_softmax(inp.to(dtype), k)
val_err_msg = "Val failed on batch_size={}, seq_len={}, experts={}, k={}".format(batch_size, seq_len, num_experts, k)
idx_err_msg = "Idx failed on batch_size={}, seq_len={}, experts={}, k={}".format(batch_size, seq_len, num_experts, k)
row_err_msg = "Row failed on batch_size={}, seq_len={}, experts={}, k={}".format(batch_size, seq_len, num_experts, k)
torch.testing.assert_close(act_vals, ref_vals, rtol=rtol, atol=atol, msg=val_err_msg, check_dtype=False)
row_differences = torch.where(act_idxs != ref_idxs)[:-1]
sorted_ref_idxs_where_mismatched = torch.sort(ref_idxs[row_differences])[0]
sorted_act_idxs_where_mismatched = torch.sort(act_idxs[row_differences])[0]
values_equal = (ref_vals[row_differences] == act_vals[row_differences])
sorted_idxs_equal = (sorted_ref_idxs_where_mismatched == sorted_act_idxs_where_mismatched)
# These are not real mismatches because the output values are the same, but indices were reported in a different order.
false_mismatches = torch.all(torch.logical_and(values_equal, sorted_idxs_equal), dim=-1)
num_false_mismatches = torch.sum(false_mismatches)
mismatches = torch.count_nonzero(act_idxs != ref_idxs) - num_false_mismatches
np.testing.assert_array_less(mismatches.cpu().numpy(), allowed_idx_mismatches + 1, err_msg=idx_err_msg)
torch.testing.assert_close(act_rows.permute([2, 0,1]).reshape(-1), ref_rows, rtol=0, atol=0, msg=row_err_msg, check_dtype=False)
def test_fp32_gating_softmax(self):
self.gating_softmax_test_helper(torch.float32, rtol=1e-05, atol=1e-08)
def test_fp16_gating_softmax(self):
# Allow tolerance for fp16 since our implementation keeps fp32 after the softmax while torch does not.
self.gating_softmax_test_helper(torch.float16, rtol=1e-03, atol=1e-05)
def test_bf16_gating_softmax(self):
self.gating_softmax_test_helper(torch.bfloat16, rtol=1e-03, atol=0.005)
class TestGroupedGemmBias(unittest.TestCase):
def setUp(self) -> None:
torch.classes.load_library("lib/libth_transformer.so")
torch.classes.load_library("lib/libmoe_unit_ops.so")
self.grouped_gemm_bias = torch.ops.moe_unit_ops.grouped_gemm_bias
self.unpack_packed_int4s = torch.ops.fastertransformer.unpack_int4_packed_tensor_to_int8
self.pack_int4s = torch.ops.fastertransformer.pack_int8_tensor_to_packed_int4
self.preprocess_weights_for_mixed_gemm = torch.ops.fastertransformer.preprocess_weights_for_mixed_gemm
self.symmetric_quantizer = torch.ops.fastertransformer._symmetric_quantize_last_axis_of_batched_matrix
self.add_bias_and_interleave_int4s = torch.ops.fastertransformer._add_bias_and_interleave_int4s
self.add_bias_and_interleave_int8s = torch.ops.fastertransformer._add_bias_and_interleave_int8s
torch.manual_seed(734876213)
self.np_rng = np.random.default_rng(seed=82587632419)
def ref_moe_fc(self, activations, expert_for_row, weights, scales, biases, activation_str):
gemm_out = basic_moe_fc(activations, expert_for_row, weights, scales, biases)
return apply_act(gemm_out, activation_str)
def custom_moe_fc(self, torch_activations, torch_experts_for_rows, torch_weights, torch_scales, torch_biases, activation_str):
num_experts = torch_weights.size(0)
row_permutation = torch.argsort(torch_experts_for_rows)
torch_rows_per_expert = torch.zeros(size=[num_experts], dtype=torch.int32)
for expert in torch_experts_for_rows:
torch_rows_per_expert[expert] += 1
permutated_activations = torch_activations[row_permutation]
torch_rows_per_expert = torch_rows_per_expert.to('cuda')
res = self.grouped_gemm_bias(permutated_activations, torch_weights, torch_scales, torch_biases, torch_rows_per_expert, activation_str)
res[row_permutation] = res[torch.arange(res.shape[0])]
return res
def setup_experts_for_row(self, num_rows, num_experts, active_experts):
# We use numpy here as torch does not have a way to express random choice as elegantly to my knowledge.
experts_arr = np.arange(num_experts)
selected_experts = self.np_rng.choice(experts_arr, size=[active_experts], replace=False)
# Ensure assign at least 1 row to an active expert
expert_for_rows = self.np_rng.choice(selected_experts, size=[num_rows - active_experts], replace=True)
return torch.tensor(np.concatenate([expert_for_rows, selected_experts]))
def dequantize_test_helper(self, weight_type, quant_type):
assert quant_type == torch.int8 or quant_type == torch.quint4x2
lower_bound = -128 if quant_type == torch.int8 else -8
upper_bound = 127 if quant_type == torch.int8 else 7
m, n, k = 128, 128, 128
weights = torch.randint(lower_bound, upper_bound, [k, n], dtype=torch.int8, device="cpu")
packed_weight = self.pack_int4s(weights) if quant_type == torch.quint4x2 else weights
cuda_weights = self.preprocess_weights_for_mixed_gemm(packed_weight, quant_type).to("cuda")
weights = weights.to("cuda")
act = torch.eye(m, dtype=weight_type, device="cuda")
bias = torch.zeros([n], dtype=weight_type, device='cuda')
torch_weight_scales = torch.ones_like(bias)
experts_for_rows = self.setup_experts_for_row(m, 1, 1)
actual = self.custom_moe_fc(act, experts_for_rows, cuda_weights, torch_weight_scales, bias, "identity")
torch.testing.assert_close(actual, weights, atol=0.0, rtol=0.0, check_dtype=False)
def test_fp16_int8_dequantize(self):
self.dequantize_test_helper(torch.float16, torch.int8)
def test_bf16_int8_dequantize(self):
self.dequantize_test_helper(torch.bfloat16, torch.int8)
def test_fp16_int4_dequantize(self):
self.dequantize_test_helper(torch.float16, torch.quint4x2)
def test_bf16_int4_dequantize(self):
self.dequantize_test_helper(torch.bfloat16, torch.quint4x2)
def moe_fc1_test_helper(self, compute_type, weight_dtype, rtol, atol, activation_str):
torch.cuda.empty_cache() # Empty the cache here so a bad ordering does not cause OOM.
rows = list(range(40, 0, -1))
experts = [32, 128]
active_experts = list(range(32, 0, -1))
hidden_sizes = torch.tensor([1024])
inter_sizes = 4 * hidden_sizes
quantize = weight_dtype == torch.int8 or weight_dtype == torch.quint4x2
for num_experts in experts:
for hidden_size in hidden_sizes:
for inter_size in inter_sizes:
torch_weights = random_cuda_tensor((num_experts, hidden_size, inter_size), dtype=compute_type, mean=0, std=0.002)
torch_biases = torch.randn(size=(num_experts, inter_size), device="cuda", dtype=compute_type)
torch_weight_scales = torch.ones_like(torch_biases, dtype=torch_weights.dtype, device="cuda")
cpu_weights = torch_weights.cpu()
if quantize:
ref_torch_weights, act_torch_weights, torch_weight_scales = self.symmetric_quantizer(cpu_weights, weight_dtype)
ref_torch_weights = self.unpack_packed_int4s(ref_torch_weights) if weight_dtype == torch.quint4x2 else ref_torch_weights
ref_torch_weights = ref_torch_weights.to("cuda")
act_torch_weights = act_torch_weights.to("cuda")
torch_weight_scales = torch_weight_scales.to("cuda")
for num_rows in rows:
torch_activations = torch.randn(size=(num_rows, hidden_size), dtype=compute_type, device="cuda")
# torch_activations = torch.ones_like(torch_activations)
# act_torch_weights = torch.ones_like(act_torch_weights) + 128
# ref_torch_weights = torch.ones_like(ref_torch_weights)
# torch_biases = torch.zeros_like(torch_biases)
# torch_weight_scales = torch.ones_like(torch_weight_scales)
for num_active_experts in active_experts:
clipped_active_experts = min(num_rows, min(num_active_experts, num_experts))
torch_experts_for_rows = self.setup_experts_for_row(num_rows, num_experts, clipped_active_experts)
ref_wt = ref_torch_weights if quantize else torch_weights
reference = self.ref_moe_fc(torch_activations, torch_experts_for_rows, ref_wt, torch_weight_scales, torch_biases, activation_str)
act_wt = act_torch_weights if quantize else torch_weights
act_scales = torch_weight_scales if quantize else torch.empty(0, device="cuda", dtype=compute_type)
actual = self.custom_moe_fc(torch_activations, torch_experts_for_rows, act_wt, act_scales, torch_biases, activation_str)
msg = "FC1 Failed on rows={}, experts={}, active_experts={}, hidden_size={}, inter_size={}" \
.format(num_rows, num_experts, clipped_active_experts, hidden_size, inter_size)
torch.testing.assert_close(actual, reference, rtol=rtol, atol=atol, msg=msg, check_dtype=False)
def test_fp32_moe_fc(self):
self.moe_fc1_test_helper(torch.float32, torch.float32, rtol=1e-04, atol=1e-04, activation_str="identity")
def test_fp32_moe_fc_gelu(self):
self.moe_fc1_test_helper(torch.float32, torch.float32, rtol=1e-04, atol=1e-04, activation_str="gelu")
def test_fp32_moe_fc_relu(self):
self.moe_fc1_test_helper(torch.float32, torch.float32, rtol=1e-04, atol=1e-04, activation_str="relu")
def test_fp32_moe_fc_silu(self):
self.moe_fc1_test_helper(torch.float32, torch.float32, rtol=1e-04, atol=1e-04, activation_str="silu")
def test_fp16_moe_fc(self):
self.moe_fc1_test_helper(torch.float16, torch.float16, rtol=0.01, atol=0.005, activation_str="identity")
def test_fp16_moe_fc_gelu(self):
self.moe_fc1_test_helper(torch.float16, torch.float16, rtol=0.01, atol=0.005, activation_str="gelu")
def test_fp16_moe_fc_relu(self):
self.moe_fc1_test_helper(torch.float16, torch.float16, rtol=0.01, atol=0.005, activation_str="relu")
def test_fp16_moe_fc_silu(self):
self.moe_fc1_test_helper(torch.float16, torch.float16, rtol=0.01, atol=0.005, activation_str="silu")
def test_int8_fp16_moe_fc(self):
self.moe_fc1_test_helper(torch.float16, torch.int8, rtol=0.01, atol=0.005, activation_str="identity")
def test_int4_fp16_moe_fc_gelu(self):
self.moe_fc1_test_helper(torch.float16, torch.quint4x2, rtol=0.01, atol=0.005, activation_str="gelu")
def test_bf16_moe_fc_relu(self):
self.moe_fc1_test_helper(torch.bfloat16, torch.bfloat16, rtol=0.01, atol=0.005, activation_str="relu")
def test_int8_bf16_moe_fc_silu(self):
self.moe_fc1_test_helper(torch.bfloat16, torch.int8, rtol=0.01, atol=0.005, activation_str="silu")
def test_int4_bf16_moe_fc(self):
self.moe_fc1_test_helper(torch.bfloat16, torch.quint4x2, rtol=0.01, atol=0.005, activation_str="identity")
class TestMoe(unittest.TestCase):
def setUp(self) -> None:
torch.classes.load_library("lib/libth_transformer.so")
torch.classes.load_library("lib/libmoe_unit_ops.so")
self.run_moe_fc = torch.ops.moe_unit_ops.run_moe_fc
self.preprocess_weights_for_mixed_gemm = torch.ops.fastertransformer.preprocess_weights_for_mixed_gemm
self.unpack_packed_int4s = torch.ops.fastertransformer.unpack_int4_packed_tensor_to_int8
self.symmetric_quantizer = torch.ops.fastertransformer._symmetric_quantize_last_axis_of_batched_matrix
torch.manual_seed(734876213)
def generate_inputs(self, num_rows, active_rows, hidden_size, num_experts, dtype, quant_type):
inputs = dict()
inputs["input_activations"] = random_cuda_tensor([num_rows, hidden_size], dtype, mean=0, std=0.002)
inputs["gating_output"] = random_cuda_tensor([num_rows, num_experts], dtype)
inputs["skip_layer"] = random_cuda_tensor([num_rows, hidden_size], dtype)
num_finished_sentences = num_rows - active_rows
finished_sentences = torch.randint(0, num_rows, [num_finished_sentences], device="cuda")
inputs["finished"] = torch.zeros([num_rows], dtype=torch.bool, device="cuda")
inputs["finished"][finished_sentences] = True
return inputs
def generate_weights(self, hidden_size, inter_size, num_experts, dtype, quant_type):
weights = dict()
quantize = quant_type == torch.int8 or quant_type == torch.quint4x2
weights["fc1_expert_weights_for_ref"] = random_cuda_tensor([num_experts, hidden_size, inter_size], dtype, mean=0, std=0.002)
weights["fc1_expert_weights_for_ft"] = weights["fc1_expert_weights_for_ref"]
weights["fc1_scales"] = torch.ones(size=[num_experts, inter_size], dtype=dtype, device="cuda")
weights["fc1_expert_biases"] = random_cuda_tensor([num_experts, inter_size], dtype, mean=0, std=0.002)
weights["fc2_expert_weights_for_ref"] = random_cuda_tensor([num_experts, inter_size, hidden_size], dtype, mean=0, std=0.002)
weights["fc2_expert_weights_for_ft"] = weights["fc2_expert_weights_for_ref"]
weights["fc2_scales"] = torch.ones(size=[num_experts, hidden_size], dtype=dtype, device="cuda")
weights["fc2_expert_biases"] = random_cuda_tensor([num_experts, hidden_size], dtype, mean=0, std=0.002)
if quantize:
ref_torch_weights_fc1, act_torch_weights_fc1, torch_weight_scales_fc1 = self.symmetric_quantizer(weights["fc1_expert_weights_for_ft"].cpu(), quant_type)
ref_torch_weights_fc2, act_torch_weights_fc2, torch_weight_scales_fc2 = self.symmetric_quantizer(weights["fc2_expert_weights_for_ft"].cpu(), quant_type)
if quant_type == torch.quint4x2:
ref_torch_weights_fc1 = self.unpack_packed_int4s(ref_torch_weights_fc1)
ref_torch_weights_fc2 = self.unpack_packed_int4s(ref_torch_weights_fc2)
weights["fc1_expert_weights_for_ref"] = ref_torch_weights_fc1.to("cuda")
weights["fc1_expert_weights_for_ft"] = act_torch_weights_fc1.to("cuda")
weights["fc1_scales"] = torch_weight_scales_fc1.to("cuda")
weights["fc2_expert_weights_for_ref"] = ref_torch_weights_fc2.to("cuda")
weights["fc2_expert_weights_for_ft"] = act_torch_weights_fc2.to("cuda")
weights["fc2_scales"] = torch_weight_scales_fc2.to("cuda")
return weights
def run_ft_moe(self, input_dict, active_rows, k, activation_str):
moe_output = self.run_moe_fc(input_dict["input_activations"], input_dict["gating_output"], \
input_dict["fc1_expert_weights_for_ft"], input_dict["fc1_scales"], input_dict["fc1_expert_biases"], \
activation_str, \
input_dict["fc2_expert_weights_for_ft"], input_dict["fc2_scales"], input_dict["fc2_expert_biases"], \
input_dict["skip_layer"], input_dict["finished"], active_rows, k)
return moe_output
def run_ref_moe(self, input_dict, k, activation_str):
gates = F.softmax(input_dict["gating_output"].to(torch.float32), dim=-1).to(input_dict["gating_output"].dtype)
expert_scales, experts_for_row = torch.topk(gates, k, dim=-1)
output = torch.zeros_like(input_dict["input_activations"])
output += input_dict["skip_layer"]
for k_idx in range(k):
current_expert_scales = expert_scales[:, k_idx].unsqueeze(1)
current_experts_for_row = experts_for_row[:, k_idx]
moe_fc_1_result = basic_moe_fc(input_dict["input_activations"], current_experts_for_row,
input_dict["fc1_expert_weights_for_ref"], input_dict["fc1_scales"], input_dict["fc1_expert_biases"])
moe_fc_1_result = apply_act(moe_fc_1_result, activation_str)
moe_fc_2_result = basic_moe_fc(moe_fc_1_result, current_experts_for_row,
input_dict["fc2_expert_weights_for_ref"], input_dict["fc2_scales"], input_dict["fc2_expert_biases"])
output = output + current_expert_scales * moe_fc_2_result
return output
def moe_test_helper(self, dtype, quant_type, rtol, atol, activation_str="gelu", experts_list=[32], hidden_sizes=[1024], inter_sizes=[4096]):
torch.cuda.empty_cache() # Empty the cache here so a bad ordering does not cause OOM.
rows = [40, 1000]
ks = range(1, 9)
for hidden_size in hidden_sizes:
for inter_size in inter_sizes:
for experts in experts_list:
weights = self.generate_weights(hidden_size, inter_size, experts, dtype, quant_type)
for row in rows:
for active_rows in [1, row // 2, row]:
for k in ks:
if k > experts:
continue
input_dict = self.generate_inputs(row, active_rows, hidden_size, experts, dtype, quant_type)
input_dict.update(weights)
rows_to_check = torch.logical_not(input_dict["finished"])
# Only take unfinished rows. We can write anything to the output of rows that already complete.
act_output = self.run_ft_moe(input_dict, row, k, activation_str)[rows_to_check]
ref_output = self.run_ref_moe(input_dict, k, activation_str)[rows_to_check]
msg = "Moe Failed on rows={}, active_rows={}, experts={}, k={}, hidden_size={}, inter_size={}" \
.format(row, active_rows, experts, k, hidden_size, inter_size)
torch.testing.assert_close(act_output, ref_output, rtol=rtol, atol=atol, msg=msg, check_dtype=False)
def test_moe_fp32_relu(self):
self.moe_test_helper(torch.float32, torch.float32, rtol=1e-3, atol=1e-6, \
activation_str="relu", \
experts_list=[64, 32, 16, 8, 4, 2], hidden_sizes=[2048, 1024], \
inter_sizes=[4096])
def test_moe_fp16_gelu(self):
self.moe_test_helper(torch.float16, torch.float16, rtol=1e-3, atol=0.005, \
activation_str="gelu", \
experts_list=[128, 30, 7, 5, 3], hidden_sizes=[2048, 1024], \
inter_sizes=[4096])
# We limit the configs in the quantization code-path only because quantizing is quite slow
# which makes testing very slow when using FP32/FP16 configs.
def test_moe_fp16_int8_gelu(self):
self.moe_test_helper(torch.float16, torch.int8, rtol=1e-3, atol=1e-3, \
activation_str="gelu", \
experts_list=[135], hidden_sizes=[2048], \
inter_sizes=[4096])
def test_moe_fp16_int4_silu(self):
self.moe_test_helper(torch.float16, torch.quint4x2, rtol=1e-3, atol=1e-3, \
activation_str="silu", \
experts_list=[196], hidden_sizes=[1024], \
inter_sizes=[8192])
def test_moe_bf16_gelu(self):
self.moe_test_helper(torch.bfloat16, torch.bfloat16, rtol=1e-2, atol=0.005, \
activation_str="gelu", \
experts_list=[64, 32], hidden_sizes=[1024], \
inter_sizes=[4096])
# We limit the configs in the quantization code-path only because quantizing is quite slow
# which makes testing very slow when using FP32/FP16 configs.
def test_moe_bf16_int8_relu(self):
self.moe_test_helper(torch.bfloat16, torch.int8, rtol=1e-2, atol=0.005, \
activation_str="relu", \
experts_list=[48], hidden_sizes=[1024], \
inter_sizes=[4096])
def test_moe_bf16_int4_identity(self):
self.moe_test_helper(torch.bfloat16, torch.quint4x2, rtol=1e-2, atol=0.005, \
activation_str="identity", \
experts_list=[256, 63], hidden_sizes=[1024], \
inter_sizes=[8192])
if __name__ == '__main__':
unittest.main() | FasterTransformer-main | tests/moe/th_moe_unit_tests.py |
FasterTransformer-main | examples/__init__.py |
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import onnx
from onnx import numpy_helper
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
from multiprocessing import Process
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
def split_and_convert_process(i, saved_dir,factor,key,args, val):
if key.find("input_layernorm.weight") != -1 or key.find("input_layernorm.bias") != -1 or \
key.find("attention.dense.bias") != -1 or key.find("post_attention_layernorm.weight") != -1 or \
key.find("post_attention_layernorm.bias") != -1 or key.find("mlp.dense_4h_to_h.bias") != -1 or \
key.find("final_layernorm.weight") != -1 or key.find("final_layernorm.bias") != -1:
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir + "/model." + key + ".bin"
val.tofile(saved_path)
elif key.find("attention.dense.weight") != -1 or key.find("mlp.dense_4h_to_h.weight") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.bias") != -1:
local_dim = (int)(val.shape[-1] / 3)
val = val.reshape(3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.weight") != -1:
hidden_dim = val.shape[0]
local_dim = (int)(val.shape[-1] / 3)
val = val.reshape(hidden_dim, 3, local_dim)
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
else:
print("[ERROR] cannot find key '{}'".format(key))
def split_and_convert(args):
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_gpu_num
if(os.path.exists(saved_dir) == False):
os.makedirs(saved_dir)
ckpt_name = args.in_file
t_gpu_num = args.trained_gpu_num
i_gpu_num = args.infer_gpu_num
assert(i_gpu_num % t_gpu_num == 0)
factor = (int)(i_gpu_num / t_gpu_num)
# load position_embedding from rank 0
model = onnx.load(ckpt_name)
onnx_model_name_pattern = [
"ln_1.bias",
"ln_1.weight",
"attn.c_attn.bias",
"attn.c_attn.weight",
"attn.c_proj.bias",
"attn.c_proj.weight",
"ln_2.bias",
"ln_2.weight",
"mlp.c_fc.bias",
"mlp.c_fc.weight",
"mlp.c_proj.bias",
"mlp.c_proj.weight",
]
ft_model_name_pattern = [
"input_layernorm.bias",
"input_layernorm.weight",
"attention.query_key_value.bias",
"attention.query_key_value.weight",
"attention.dense.bias",
"attention.dense.weight",
"post_attention_layernorm.bias",
"post_attention_layernorm.weight",
"mlp.dense_h_to_4h.bias",
"mlp.dense_h_to_4h.weight",
"mlp.dense_4h_to_h.bias",
"mlp.dense_4h_to_h.weight",
]
proccess_list = []
for t in model.graph.initializer:
if t.name.find("weight") == -1 and t.name.find("bias") == -1:
continue
if t.name == 'wpe.weight':
numpy_helper.to_array(t).astype(np.float32).tofile(saved_dir + "model.wpe.bin")
elif t.name == 'wte.weight':
numpy_helper.to_array(t).astype(np.float32).tofile(saved_dir + "model.wte.bin")
elif t.name == 'ln_f.bias':
numpy_helper.to_array(t).astype(np.float32).tofile(saved_dir + "model.final_layernorm.bias.bin")
elif t.name == 'ln_f.weight':
numpy_helper.to_array(t).astype(np.float32).tofile(saved_dir + "model.final_layernorm.weight.bin")
else:
for i in range(len(onnx_model_name_pattern)):
if t.name.find(onnx_model_name_pattern[i]) != -1:
new_name = t.name.replace("h.", "layers.").replace(onnx_model_name_pattern[i], ft_model_name_pattern[i])
proccess_list.append(Process(target=split_and_convert_process, args=(0,saved_dir,factor, new_name, args, numpy_helper.to_array(t).astype(np.float32))))
proccess_list[-1].start()
for t in proccess_list:
t.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-saved_dir', '-o', type=str, help='file name of output file', required=True)
parser.add_argument('-in_file', '-i', type=str, help='file name of input checkpoint file', required=True)
parser.add_argument('-trained_gpu_num', '-t_g', type=int, help='How many gpus for inference', default=1)
parser.add_argument('-infer_gpu_num', '-i_g', type=int, help='How many gpus for inference', required=True)
# parser.add_argument('-head_num', '-h_n', type=int, help='Number of heads', required=True)
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
split_and_convert(args)
| FasterTransformer-main | examples/onnx/multi_gpu_gpt/onnx_ckpt_convert.py |
#! /usr/bin/env python3
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from argparse import ArgumentParser
from transformers import AutoTokenizer
def main(in_file, model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name)
sentences = []
with open(in_file) as f:
for line in f:
sentences.append(json.dumps(tokenizer.decode([int(tok) for tok in line.split(" ") if tok != "\n"])))
for sentence in sentences:
print(sentence)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("in_file")
parser.add_argument("model_name", nargs="?", default="gpt2")
main(**vars(parser.parse_args()))
| FasterTransformer-main | examples/utils/hf_detokenize.py |
#! /usr/bin/env python3
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from argparse import ArgumentParser
from transformers import AutoTokenizer
def main(in_file, model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
tokens = []
with open(in_file) as f:
for line in f:
tokens.append(json.loads(line))
tokens = tokenizer(tokens, padding=True)["input_ids"]
token_string = "\n".join((",".join(str(token) for token in token_line)) for token_line in tokens)
print(token_string)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("in_file")
parser.add_argument("model_name", nargs="?", default="gpt2")
main(**vars(parser.parse_args()))
| FasterTransformer-main | examples/utils/hf_tokenize.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ctypes
import json
import numpy as np
import os
import os.path
import re
import sys
import time
import pycuda.autoinit
import tensorrt as trt
import torch
import sys
sys.path.insert(0, "../../pytorch/swin/Swin-Transformer-Quantization")
sys.path.insert(0, "../../pytorch/swin")
from SwinTransformer.config import get_config
from SwinTransformerINT8Weight import SwinTransformerINT8Weight
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--version', type=int, default=1, help='version of swin', )
parser.add_argument('--disable_amp', type=bool, default=True, help='disable amp', )
parser.add_argument('--fused_window_process', type=bool, default=False, help='whether use fused window process', )
parser.add_argument('--batch-size', type=int, help="max batch size")
parser.add_argument('--th-path', type=str, help='path to pytorch library')
parser.add_argument('--data-path', type=str, help='path to dataset', default=None)
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset', default=None)
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only', default=True)
parser.add_argument('--int8-mode', type=int, help='int8 mode', default=1, choices=[1, 2])
parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel', default=-1)
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config
#TensorRT Initialization
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
handle = ctypes.CDLL("../../../build/lib/libswinTransformer_plugin.so", mode=ctypes.RTLD_GLOBAL)
if not handle:
raise RuntimeError("Fail to load plugin library")
trt.init_libnvinfer_plugins(TRT_LOGGER, "")
plg_registry = trt.get_plugin_registry()
swinTransformer_plg_creator = plg_registry.get_plugin_creator("CustomSwinTransformerINT8Plugin", "1", "")
def set_tensor_name(tensor, prefix, name):
tensor.name = prefix + name
def set_output_name(layer, prefix, name, out_idx = 0):
set_tensor_name(layer.get_output(out_idx), prefix, name)
def swin_transformer(network, config, args, input_img, weights_dict):
if args.version == 1:
depths = config.MODEL.SWIN.DEPTHS
num_heads = config.MODEL.SWIN.NUM_HEADS
window_size = config.MODEL.SWIN.WINDOW_SIZE
patch_size = config.MODEL.SWIN.PATCH_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
ape = config.MODEL.SWIN.APE
patch_norm = config.MODEL.SWIN.PATCH_NORM
mlp_ratio = config.MODEL.SWIN.MLP_RATIO
qkv_bias = config.MODEL.SWIN.QKV_BIAS
if config.MODEL.SWIN.QK_SCALE is not None:
qk_scale = config.MODEL.SWIN.QK_SCALE
else:
qk_scale = 1.0
elif args.version == 2:
depths = config.MODEL.SWINV2.DEPTHS
num_heads = config.MODEL.SWINV2.NUM_HEADS
window_size = config.MODEL.SWINV2.WINDOW_SIZE
patch_size = config.MODEL.SWINV2.PATCH_SIZE
in_chans = config.MODEL.SWINV2.IN_CHANS
embed_dim = config.MODEL.SWINV2.EMBED_DIM
ape = config.MODEL.SWINV2.APE
patch_norm = config.MODEL.SWINV2.PATCH_NORM
mlp_ratio = config.MODEL.SWINV2.MLP_RATIO
qkv_bias = config.MODEL.SWINV2.QKV_BIAS
qk_scale = 1.0
version = args.version
th_path = args.th_path
layer_num = len(depths)
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
int8_mode = trt.PluginField("int8_mode", np.array([args.int8_mode]).astype(np.int32), trt.PluginFieldType.INT32)
max_batch_size = trt.PluginField("max_batch_size", np.array([max_batch]).astype(np.int32), trt.PluginFieldType.INT32)
img_size = trt.PluginField("img_size", np.array([img_size]).astype(np.int32), trt.PluginFieldType.INT32)
patch_size = trt.PluginField("patch_size", np.array([patch_size]).astype(np.int32), trt.PluginFieldType.INT32)
in_chans = trt.PluginField("in_chans", np.array([in_chans]).astype(np.int32), trt.PluginFieldType.INT32)
embed_dim = trt.PluginField("embed_dim", np.array([embed_dim]).astype(np.int32), trt.PluginFieldType.INT32)
window_size_f = trt.PluginField("window_size", np.array([window_size]).astype(np.int32), trt.PluginFieldType.INT32)
ape = trt.PluginField("ape", np.array([ape]).astype(np.int32), trt.PluginFieldType.INT32)
patch_norm = trt.PluginField("patch_norm", np.array([patch_norm]).astype(np.int32), trt.PluginFieldType.INT32)
layer_num_f = trt.PluginField("layer_num", np.array([layer_num]).astype(np.int32), trt.PluginFieldType.INT32)
mlp_ratio = trt.PluginField("mlp_ratio", np.array([mlp_ratio]).astype(np.float32), trt.PluginFieldType.FLOAT32)
qkv_bias = trt.PluginField("qkv_bias", np.array([qkv_bias]).astype(np.int32), trt.PluginFieldType.INT32)
qk_scale = trt.PluginField("qk_scale", np.array([qk_scale]).astype(np.float32), trt.PluginFieldType.FLOAT32)
version_f = trt.PluginField("version", np.array([version]).astype(np.int32), trt.PluginFieldType.INT32)
depths_f = trt.PluginField("depths", np.array(depths).astype(np.int32), trt.PluginFieldType.INT32)
num_heads_f = trt.PluginField("num_heads", np.array(num_heads).astype(np.int32), trt.PluginFieldType.INT32)
sw_weights = SwinTransformerINT8Weight(
layer_num, window_size, depths, num_heads, th_path, weights_dict, version)
for i in range(len(sw_weights.weights)):
sw_weights.weights[i] = sw_weights.weights[i].cpu()
part_fc = []
weight_idx = 0
for l in range(len(depths)):
for b in range(depths[l]):
part_fc.append(trt.PluginField("attention_qkv_kernel_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("attention_qkv_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("attention_proj_kernel_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("attention_proj_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("mlp_linear_kernel_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("mlp_linear_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("mlp_linear2_kernel_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("mlp_linear2_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("block_norm_gamma_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("block_norm_beta_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("block_norm2_gamma_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("block_norm2_beta_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("block_d_amaxlist_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float32), trt.PluginFieldType.FLOAT32))
weight_idx += 1
part_fc.append(trt.PluginField("block_h_amaxlist_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np.float32), trt.PluginFieldType.FLOAT32))
weight_idx += 1
part_fc.append(trt.PluginField("attention_relative_pos_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx].cpu()).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("trt_relative_position_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx].cpu()).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
if version == 2:
part_fc.append(trt.PluginField("attention_logit_scale_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx].cpu()).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("patchMerge_norm_gamma_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("patchMerge_norm_beta_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("patchMerge_linear_kernel_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("attn_mask_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("trt_attn_mask_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("patchEmbed_proj_kernel", np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("patchEmbed_proj_bias", np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("patchEmbed_norm_gamma", np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("patchEmbed_norm_beta", np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("norm_gamma", np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
part_fc.append(trt.PluginField("norm_beta", np.array(sw_weights.weights[weight_idx]).astype(np.float16), trt.PluginFieldType.FLOAT16))
weight_idx += 1
pfc = trt.PluginFieldCollection([int8_mode, max_batch_size, img_size, patch_size, in_chans, embed_dim, window_size_f, ape, patch_norm, layer_num_f, mlp_ratio, qkv_bias, qk_scale, version_f, depths_f, num_heads_f] + part_fc)
fn = swinTransformer_plg_creator.create_plugin("swin_transformer", pfc)
inputs = [input_img]
sw = network.add_plugin_v2(inputs, fn)
set_output_name(sw, "swin_transformer_", "output")
return sw
def load_weights(inputbase, config):
weights_dict = dict()
try:
tensor_dict = torch.load(inputbase,
map_location='cpu')
# tensor_dict = tensor_dict['model']
# remove training-related variables in the checkpoint
param_names = [key for key in sorted(tensor_dict)]
for pn in param_names:
if isinstance(tensor_dict[pn], np.ndarray):
tensor = tensor_dict[pn]
else:
tensor = tensor_dict[pn].numpy()
shape = tensor.shape
##to be compatible with SwinTransformerWeight
if "index" in pn:
flat_tensor = tensor.astype(dtype=np.int64)
weights_dict[pn] = torch.tensor(flat_tensor, dtype=torch.int64).cuda()
elif ("table" in pn or "cpb_mlp" in pn):
flat_tensor = tensor.astype(dtype=np.float32)
weights_dict[pn] = torch.tensor(flat_tensor, dtype=torch.float32).cuda()
else:
flat_tensor = tensor.astype(dtype=np.float32)
weights_dict[pn] = torch.tensor(flat_tensor, dtype=torch.float32).cuda()
shape_str = "{} ".format(len(shape)) + " ".join([str(d) for d in shape])
#print("TensorRT name: {:}, shape: {:}".format("module."+pn, shape_str))
except Exception as error:
TRT_LOGGER.log(TRT_LOGGER.ERROR, str(error))
return weights_dict
def build_engine(config, args, weights_dict):
explicit_batch_flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(explicit_batch_flag) as network, builder.create_builder_config() as builder_config:
builder_config.max_workspace_size = 8 << 30
builder_config.set_flag(trt.BuilderFlag.FP16)
builder_config.set_flag(trt.BuilderFlag.STRICT_TYPES)
# Create the network
input_img = network.add_input(name="input_img", dtype=trt.float16, shape=(-1, config.MODEL.SWIN.IN_CHANS, config.DATA.IMG_SIZE, config.DATA.IMG_SIZE))
# Specify profiles
profile = builder.create_optimization_profile()
min_shape = (1, config.MODEL.SWIN.IN_CHANS, config.DATA.IMG_SIZE, config.DATA.IMG_SIZE)
##TODO: There is a bug in TRT when opt batch is large
max_shape = (config.DATA.BATCH_SIZE, config.MODEL.SWIN.IN_CHANS, config.DATA.IMG_SIZE, config.DATA.IMG_SIZE)
profile.set_shape("input_img", min=min_shape, opt=min_shape, max=max_shape)
builder_config.add_optimization_profile(profile)
#import pdb;pdb.set_trace()
sw_output = swin_transformer(network, config, args, input_img, weights_dict)
sw_output.precision = trt.float16
sw_output.set_output_type(0, trt.float16)
network.mark_output(sw_output.get_output(0))
print("Before build_engine")
engine = builder.build_engine(network, builder_config)
print("After build_engine")
return engine
def main():
args, config = parse_option()
weights_dict = load_weights(config.MODEL.RESUME, config)
with build_engine(config, args, weights_dict) as engine:
TRT_LOGGER.log(TRT_LOGGER.VERBOSE, "Serializing Engine...")
serialized_engine = engine.serialize()
TRT_LOGGER.log(TRT_LOGGER.INFO, "Saving Engine to {:}".format(args.output))
with open(args.output, "wb") as fout:
fout.write(serialized_engine)
TRT_LOGGER.log(TRT_LOGGER.INFO, "Done.")
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/tensorrt/swin/builder_int8.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ctypes
import json
import numpy as np
import os
import os.path
import re
import sys
import time
import tensorrt as trt
import torch
import sys
sys.path.insert(0, "../../pytorch/swin/Swin-Transformer-Quantization")
sys.path.insert(0, "../../pytorch/swin")
from SwinTransformer.config import get_config
from SwinTransformerWeightTransposeQKVWeight import SwinTransformerWeightTransposeQKVWeight
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer TensorRT plugin build script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--version', type=int, default=1, help='version of swin', )
parser.add_argument('--disable_amp', type=bool, default=True, help='disable amp', )
parser.add_argument('--fused_window_process', type=bool, default=False, help='whether use fused window process', )
parser.add_argument('--batch-size', type=int, help="max batch size")
parser.add_argument('--th-path', type=str, help='path to pytorch library')
parser.add_argument('--data-path', type=str, help='path to dataset', default=None)
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset', default=None)
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument('--fp16', action='store_true', help='Build FP16 engine instead of FP32')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only', default=True)
parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel', default=-1)
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config
#TensorRT Initialization
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
handle = ctypes.CDLL("../../../build/lib/libswinTransformer_plugin.so", mode=ctypes.RTLD_GLOBAL)
if not handle:
raise RuntimeError("Fail to load plugin library")
trt.init_libnvinfer_plugins(TRT_LOGGER, "")
plg_registry = trt.get_plugin_registry()
swinTransformer_plg_creator = plg_registry.get_plugin_creator("CustomSwinTransformerPlugin", "1", "")
def set_tensor_name(tensor, prefix, name):
tensor.name = prefix + name
def set_output_name(layer, prefix, name, out_idx = 0):
set_tensor_name(layer.get_output(out_idx), prefix, name)
def swin_transformer(network, config, args, input_img, weights_dict):
if args.version == 1:
depths = config.MODEL.SWIN.DEPTHS
num_heads = config.MODEL.SWIN.NUM_HEADS
window_size = config.MODEL.SWIN.WINDOW_SIZE
patch_size = config.MODEL.SWIN.PATCH_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
ape = config.MODEL.SWIN.APE
patch_norm = config.MODEL.SWIN.PATCH_NORM
mlp_ratio = config.MODEL.SWIN.MLP_RATIO
qkv_bias = config.MODEL.SWIN.QKV_BIAS
if config.MODEL.SWIN.QK_SCALE is not None:
qk_scale = config.MODEL.SWIN.QK_SCALE
else:
qk_scale = 1.0
elif args.version == 2:
depths = config.MODEL.SWINV2.DEPTHS
num_heads = config.MODEL.SWINV2.NUM_HEADS
window_size = config.MODEL.SWINV2.WINDOW_SIZE
patch_size = config.MODEL.SWINV2.PATCH_SIZE
in_chans = config.MODEL.SWINV2.IN_CHANS
embed_dim = config.MODEL.SWINV2.EMBED_DIM
ape = config.MODEL.SWINV2.APE
patch_norm = config.MODEL.SWINV2.PATCH_NORM
mlp_ratio = config.MODEL.SWINV2.MLP_RATIO
qkv_bias = config.MODEL.SWINV2.QKV_BIAS
qk_scale = 1.0
version = args.version
th_path = args.th_path
layer_num = len(depths)
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
max_batch_size = trt.PluginField("max_batch_size", np.array([max_batch]).astype(np.int32), trt.PluginFieldType.INT32)
img_size = trt.PluginField("img_size", np.array([img_size]).astype(np.int32), trt.PluginFieldType.INT32)
patch_size = trt.PluginField("patch_size", np.array([patch_size]).astype(np.int32), trt.PluginFieldType.INT32)
in_chans = trt.PluginField("in_chans", np.array([in_chans]).astype(np.int32), trt.PluginFieldType.INT32)
embed_dim = trt.PluginField("embed_dim", np.array([embed_dim]).astype(np.int32), trt.PluginFieldType.INT32)
window_size_f = trt.PluginField("window_size", np.array([window_size]).astype(np.int32), trt.PluginFieldType.INT32)
ape = trt.PluginField("ape", np.array([ape]).astype(np.int32), trt.PluginFieldType.INT32)
patch_norm = trt.PluginField("patch_norm", np.array([patch_norm]).astype(np.int32), trt.PluginFieldType.INT32)
layer_num_f = trt.PluginField("layer_num", np.array([layer_num]).astype(np.int32), trt.PluginFieldType.INT32)
mlp_ratio = trt.PluginField("mlp_ratio", np.array([mlp_ratio]).astype(np.float32), trt.PluginFieldType.FLOAT32)
qkv_bias = trt.PluginField("qkv_bias", np.array([qkv_bias]).astype(np.int32), trt.PluginFieldType.INT32)
qk_scale = trt.PluginField("qk_scale", np.array([qk_scale]).astype(np.float32), trt.PluginFieldType.FLOAT32)
version_f = trt.PluginField("version", np.array([version]).astype(np.int32), trt.PluginFieldType.INT32)
depths_f = trt.PluginField("depths", np.array(depths).astype(np.int32), trt.PluginFieldType.INT32)
num_heads_f = trt.PluginField("num_heads", np.array(num_heads).astype(np.int32), trt.PluginFieldType.INT32)
sw_weights = SwinTransformerWeightTransposeQKVWeight(
layer_num, window_size, depths, num_heads, th_path, weights_dict, version)
for i in range(len(sw_weights.weights)):
sw_weights.weights[i] = sw_weights.weights[i].cpu()
if args.fp16:
np_dtype = np.float16
trt_pf_dtype = trt.PluginFieldType.FLOAT16
else:
np_dtype = np.float32
trt_pf_dtype = trt.PluginFieldType.FLOAT32
part_fc = []
weight_idx = 0
for l in range(len(depths)):
for b in range(depths[l]):
part_fc.append(trt.PluginField("attention_qkv_kernel_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("attention_qkv_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("attention_proj_kernel_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("attention_proj_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("mlp_linear_kernel_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("mlp_linear_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("mlp_linear2_kernel_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("mlp_linear2_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("block_norm_gamma_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("block_norm_beta_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("block_norm2_gamma_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("block_norm2_beta_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("attention_relative_pos_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx].cpu()).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("trt_relative_position_bias_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx].cpu()).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
if version == 2:
part_fc.append(trt.PluginField("attention_logit_scale_{}_{}".format(l, b), np.array(sw_weights.weights[weight_idx].cpu()).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("patchMerge_norm_gamma_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("patchMerge_norm_beta_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("patchMerge_linear_kernel_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("attn_mask_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("trt_attn_mask_{}".format(l), np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("patchEmbed_proj_kernel", np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("patchEmbed_proj_bias", np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("patchEmbed_norm_gamma", np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("patchEmbed_norm_beta", np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("norm_gamma", np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
part_fc.append(trt.PluginField("norm_beta", np.array(sw_weights.weights[weight_idx]).astype(np_dtype), trt_pf_dtype))
weight_idx += 1
pfc = trt.PluginFieldCollection([max_batch_size, img_size, patch_size, in_chans, embed_dim, window_size_f, ape, patch_norm, layer_num_f, mlp_ratio, qkv_bias, qk_scale, version_f, depths_f, num_heads_f] + part_fc)
fn = swinTransformer_plg_creator.create_plugin("swin_transformer", pfc)
inputs = [input_img]
sw = network.add_plugin_v2(inputs, fn)
set_output_name(sw, "swin_transformer_", "output")
return sw
def load_weights(inputbase, config):
weights_dict = dict()
try:
tensor_dict = torch.load(inputbase,
map_location='cpu')
tensor_dict = tensor_dict['model']
# remove training-related variables in the checkpoint
param_names = [key for key in sorted(tensor_dict)]
for pn in param_names:
if isinstance(tensor_dict[pn], np.ndarray):
tensor = tensor_dict[pn]
else:
tensor = tensor_dict[pn].numpy()
shape = tensor.shape
##to be compatible with SwinTransformerWeightTransposeQKVWeight
if "index" in pn:
flat_tensor = tensor.astype(dtype=np.int64)
weights_dict[pn] = torch.tensor(flat_tensor, dtype=torch.int64).cuda()
elif ("table" in pn or "cpb_mlp" in pn):
flat_tensor = tensor.astype(dtype=np.float32)
weights_dict[pn] = torch.tensor(flat_tensor, dtype=torch.float32).cuda()
else:
flat_tensor = tensor.astype(dtype=np.float32)
weights_dict[pn] = torch.tensor(flat_tensor, dtype=torch.float32).cuda()
shape_str = "{} ".format(len(shape)) + " ".join([str(d) for d in shape])
#print("TensorRT name: {:}, shape: {:}".format(pn, shape_str))
except Exception as error:
TRT_LOGGER.log(TRT_LOGGER.ERROR, str(error))
return weights_dict
def build_engine(config, args, weights_dict):
explicit_batch_flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(explicit_batch_flag) as network, builder.create_builder_config() as builder_config:
builder_config.max_workspace_size = 8 << 30
if args.fp16:
builder_config.set_flag(trt.BuilderFlag.FP16)
builder_config.set_flag(trt.BuilderFlag.STRICT_TYPES)
trt_dtype = trt.float16
else:
trt_dtype = trt.float32
# Create the network
input_img = network.add_input(name="input_img", dtype=trt_dtype, shape=(-1, config.MODEL.SWIN.IN_CHANS, config.DATA.IMG_SIZE, config.DATA.IMG_SIZE))
# Specify profiles
profile = builder.create_optimization_profile()
min_shape = (1, config.MODEL.SWIN.IN_CHANS, config.DATA.IMG_SIZE, config.DATA.IMG_SIZE)
##TODO: There is a bug in TRT when opt batch is large
max_shape = (config.DATA.BATCH_SIZE, config.MODEL.SWIN.IN_CHANS, config.DATA.IMG_SIZE, config.DATA.IMG_SIZE)
profile.set_shape("input_img", min=min_shape, opt=min_shape, max=max_shape)
builder_config.add_optimization_profile(profile)
#import pdb;pdb.set_trace()
sw_output = swin_transformer(network, config, args, input_img, weights_dict)
if args.fp16:
sw_output.precision = trt.float16
sw_output.set_output_type(0, trt.float16)
sw_output = network.add_identity(sw_output.get_output(0))
network.mark_output(sw_output.get_output(0))
engine = builder.build_engine(network, builder_config)
return engine
def main():
args, config = parse_option()
weights_dict = load_weights(config.MODEL.RESUME, config)
with build_engine(config, args, weights_dict) as engine:
TRT_LOGGER.log(TRT_LOGGER.VERBOSE, "Serializing Engine...")
serialized_engine = engine.serialize()
TRT_LOGGER.log(TRT_LOGGER.INFO, "Saving Engine to {:}".format(args.output))
with open(args.output, "wb") as fout:
fout.write(serialized_engine)
TRT_LOGGER.log(TRT_LOGGER.INFO, "Done.")
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/tensorrt/swin/builder.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import ctypes
import tensorrt as trt
import sys
sys.path.insert(0, "../../pytorch/swin/Swin-Transformer-Quantization")
sys.path.insert(0, "../../pytorch/swin")
from SwinTransformer.config import get_config
from models import build_model
import quant_utils
test_time = 100
warmup_time = 10
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--version', type=int, default=1, help='version of swin', )
parser.add_argument('--disable_amp', type=bool, default=True, help='disable amp', )
parser.add_argument('--fused_window_process', type=bool, default=False, help='whether use fused window process', )
parser.add_argument('--engine', type=str, help='path to TRT engine')
parser.add_argument('--th-path', type=str, help='path to pytorch library')
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
parser.add_argument('--int8-mode', type=int, help='int8 mode', choices=[1, 2])
# distributed training
parser.add_argument("--local_rank", type=int, default=0, help='local rank for DistributedDataParallel')
quant_utils.add_arguments(parser)
args, unparsed = parser.parse_known_args()
args = quant_utils.set_args(args)
quant_utils.set_default_quantizers(args)
config = get_config(args)
return args, config
def main(config, args):
model = build_model(config)
model.cuda()
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
model.load_state_dict(checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint, strict=False)
quant_utils.configure_model(model, args, calib=False)
validate_with_random_data(config, args, model)
@torch.no_grad()
def run_swintransformer_plugin(args, config, model, images):
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
# Import necessary plugins for BERT TensorRT
ctypes.CDLL("../../../build/lib/libswinTransformer_plugin.so", mode=ctypes.RTLD_GLOBAL)
depths = config.MODEL.SWIN.DEPTHS
layer_num = len(depths)
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
with open(args.engine, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, \
runtime.deserialize_cuda_engine(f.read()) as engine, \
engine.create_execution_context() as context:
context.active_optimization_profile = 0
stream = torch.cuda.Stream()
context.set_binding_shape(0, (max_batch, in_chans, img_size, img_size))
output_shape = tuple(context.get_binding_shape(1))
print('output_shape binding:', output_shape)
d_inputs = [images]
d_output = torch.empty(output_shape, dtype=torch.float32).cuda()
# warm up
for i in range(warmup_time):
context.execute_async_v2(bindings=[d_inp.data_ptr() for d_inp in d_inputs] + [d_output.data_ptr()], stream_handle=stream.cuda_stream)
#ignore the last fc layer
torch.cuda.synchronize()
op_end = time.time()
for i in range(test_time):
context.execute_async_v2(bindings=[d_inp.data_ptr() for d_inp in d_inputs] + [d_output.data_ptr()], stream_handle=stream.cuda_stream)
stream.synchronize()
torch.cuda.synchronize()
print("plugin time : ", (time.time() - op_end)/test_time*1000.0, "ms")
return d_output.cpu().numpy()
@torch.no_grad()
def run_torch(model, images, mark):
torch_output = model.forward_features(images)
return torch_output.cpu().numpy()
@torch.no_grad()
def validate_with_random_data(config, args, model):
model.eval()
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
image = np.random.rand(1, in_chans, img_size, img_size)
images = np.repeat(image, max_batch, axis=0)
print(images.shape)
images_half = torch.tensor(images, dtype=torch.half)
images_float = torch.tensor(images, dtype=torch.float)
images_half = images_half.cuda(non_blocking=True)
images_float = images_float.cuda(non_blocking=True)
## run pytorch plugin
plugin_output = run_swintransformer_plugin(args, config, model, images_half)
# warm up
model.half()
torch_output = run_torch(model, images_half, "torch")
# torch_output = model.forward_features(images_half)
# torch_output = torch_output.cpu().numpy()
diff = abs(torch_output - plugin_output.reshape(max_batch, -1))
print(diff.shape)
print("torch_output vs plugin_output , avg diff : ", diff.mean((1)), "max diff : ", diff.max((1)))
if __name__ == '__main__':
args, config = parse_option()
seed = config.SEED + int(time.time())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
main(config, args)
| FasterTransformer-main | examples/tensorrt/swin/infer_swintransformer_plugin_int8.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import ctypes
import tensorrt as trt
import sys
sys.path.insert(0, "../../pytorch/swin/Swin-Transformer-Quantization")
from SwinTransformer.config import get_config
from SwinTransformer.models import build_model
test_time = 100
warmup_time = 10
def maxdiff(A, B):
A = A.flatten()
B = B.flatten()
maxDiff = -1000000
maxDiff_a = 0
maxDiff_b = 0
avgDiff = 0
num = 0
for a, b in zip(A, B):
diff = abs(a-b)
if diff > maxDiff:
maxDiff = diff
maxDiff_a = a
maxDiff_b = b
avgDiff += diff
avgDiff /= len(A)
print("torch_output vs plugin_output , avg diff : ", avgDiff, "maxDiff : ", maxDiff, "at {} vs {}".format(maxDiff_a, maxDiff_b))
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--version', type=int, default=1, help='version of swin', )
parser.add_argument('--disable_amp', type=bool, default=True, help='disable amp', )
parser.add_argument('--fused_window_process', type=bool, default=False, help='whether use fused window process', )
parser.add_argument('--engine', type=str, help='path to TRT engine')
parser.add_argument('--th-path', type=str, help='path to pytorch library')
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--pretrained',
help='pretrained weight from checkpoint, could be imagenet22k pretrained weight')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O0', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
# distributed training
parser.add_argument("--local_rank", type=int, default=0, help='local rank for DistributedDataParallel')
parser.add_argument("--use-fp16", action='store_true',
help='Use FP16 if set true, otherwise FP32')
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config
def main(config, args):
model = build_model(config)
model.cuda()
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
model.load_state_dict(checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint, strict=False)
validate_with_random_data(config, args, model)
@torch.no_grad()
def run_swintransformer_plugin(args, config, model, images):
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
# Import necessary plugins for BERT TensorRT
ctypes.CDLL("../../../build/lib/libswinTransformer_plugin.so", mode=ctypes.RTLD_GLOBAL)
depths = config.MODEL.SWIN.DEPTHS
layer_num = len(depths)
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
embed_dim = config.MODEL.SWIN.EMBED_DIM
with open(args.engine, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, \
runtime.deserialize_cuda_engine(f.read()) as engine, \
engine.create_execution_context() as context:
context.active_optimization_profile = 0
context.set_binding_shape(0, (max_batch, in_chans, img_size, img_size))
output_shape = tuple(context.get_binding_shape(1))
print('output_shape binding:', output_shape)
d_inputs = [images]
d_output = torch.empty(output_shape, dtype=torch.float32).cuda()
stream = torch.cuda.Stream()
# warm up
for i in range(warmup_time):
context.execute_async_v2(bindings=[d_inp.data_ptr() for d_inp in d_inputs] + [d_output.data_ptr()], stream_handle=stream.cuda_stream)
#ignore the last fc layer
torch.cuda.synchronize()
op_end = time.time()
for i in range(test_time):
context.execute_async_v2(bindings=[d_inp.data_ptr() for d_inp in d_inputs] + [d_output.data_ptr()], stream_handle=stream.cuda_stream)
stream.synchronize()
torch.cuda.synchronize()
print("plugin time : ", (time.time() - op_end)/test_time*1000.0, "ms")
return d_output.cpu().numpy()
@torch.no_grad()
def run_torch(model, images, mark):
# warm up
for i in range(warmup_time):
output = model.forward_features(images)
torch.cuda.synchronize()
torch_start = time.time()
for i in range(test_time):
torch_output = model.forward_features(images)
#_nvtx.rangePop()
torch.cuda.synchronize()
print(mark + " time : ", (time.time() - torch_start)/test_time*1000.0, "ms")
return torch_output.cpu().numpy()
@torch.no_grad()
def validate_with_random_data(config, args, model):
model.eval()
max_batch = config.DATA.BATCH_SIZE
img_size = config.DATA.IMG_SIZE
in_chans = config.MODEL.SWIN.IN_CHANS
image = np.random.rand(1, in_chans, img_size, img_size)
images = np.repeat(image, max_batch, axis=0)
if args.use_fp16:
images = torch.tensor(images, dtype=torch.half)
model.half()
else:
images = torch.tensor(images, dtype=torch.float)
images = images.cuda(non_blocking=True)
## run pytorch plugin
plugin_output = run_swintransformer_plugin(args, config, model, images)
torch_output = run_torch(model, images, "torch")
diff = abs(torch_output - plugin_output.reshape(max_batch, -1))
print('plugin_output', plugin_output.mean((1, 2, 3)), 'torch_output',torch_output.mean((1)))
print("torch_output vs plugin_output , avg diff : ", diff.mean((1)), "max diff : ", diff.max((1)))
assert diff.mean() < 0.001, "[ERROR] SWIN PLUGIN TEST FAIL !"
print("[INFO] SWIN TRT PLUGIN TEST PASS !")
if __name__ == '__main__':
args, config = parse_option()
seed = config.SEED + int(time.time())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
main(config, args)
| FasterTransformer-main | examples/tensorrt/swin/infer_swintransformer_plugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
np.random.seed(97)
data = {}
fpList = [32,16]
bsList = [1,8,32,128]
slList = [32,128,384]
for bs in bsList:
for sl in slList:
for fp in fpList:
name = '-fp'+str(fp)+'-bs'+str(bs)+'-sl'+str(sl)
data['encoder'+name] = np.random.randint(0,32128,[bs,sl]).astype(np.int32)
data['decoding'+name] = np.random.rand(bs,sl,512).astype([np.float32,np.float16][int(fp==16)])*2-1
data['seqLen'+name] = np.full([bs],sl,dtype=np.int32)
np.savez("T5PluginTestIO.npz",**data)
#for k in data.keys():
# print(k,data[k].shape,data[k].dtype,data[k].reshape(-1)[:10])
print("create T5 test data finish!")
| FasterTransformer-main | examples/tensorrt/t5/createT5TestData.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import configparser
import numpy as np
import torch
from transformers import T5ForConditionalGeneration
from pathlib import Path
rename_mapping={"relative_attention_num_buckets":"relative_attention_num_buckets_or_max_pos_seq_len"}
new_configs={"structure":{"t5_with_bias":"false", "use_gated_activation":"false", "position_embedding_type":"relative"}}
def fuse_decoder_qkv(model, factor, saved_dir):
model_dict = {}
for name, param in model.named_parameters():
if name.find("decoder") != -1 and name.find("SelfAttention") != -1:
model_dict[name] = param
for i in range(model.decoder.config.num_layers):
shape = model_dict[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"].transpose(1, 0).shape
qkv = torch.cat([model_dict[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"].transpose(1, 0),
model_dict[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"].transpose(1, 0),
model_dict[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"].transpose(1, 0)], dim=-1)
qkv = qkv.reshape([shape[0], 3, shape[1]])
qkv = qkv.float().cpu().detach().numpy()
split_vals = np.split(qkv, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"decoder.block.{i}.layer.0.SelfAttention.qkv.weight.{j}.bin"
split_vals[j].tofile(saved_path)
def split_and_convert_process(key, val, factor, saved_dir):
if val.dim() == 2:
val = val.transpose(1, 0)
val = val.detach().numpy()
saved_key = key
if key.find("shared.weight") != -1:
# shared weights, only need to convert the weights of rank 0
saved_path = saved_dir / f"{saved_key}.bin"
val.tofile(saved_path)
saved_path = saved_dir / f"{saved_key}_T.bin"
val.transpose(1, 0).tofile(saved_path)
elif key.find("layer_norm.weight") != -1:
# shared weights, only need to convert the weights of rank 0
saved_path = saved_dir / f"{saved_key}.bin"
val.tofile(saved_path)
elif (
key.find("SelfAttention.o.weight") != -1
or key.find("EncDecAttention.o.weight") != -1
or key.find("DenseReluDense.wo.weight") != -1
):
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"{saved_key}.{j:d}.bin"
split_vals[j].tofile(saved_path)
elif (
key.find("DenseReluDense.wi.weight") != -1
or (key.find("encoder") != -1 and (
key.find("SelfAttention.q.weight") != -1
or key.find("SelfAttention.k.weight") != -1
or key.find("SelfAttention.v.weight") != -1
)
)
or key.find("EncDecAttention.q.weight") != -1
or key.find("EncDecAttention.k.weight") != -1
or key.find("EncDecAttention.v.weight") != -1
):
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"{saved_key}.{j:d}.bin"
split_vals[j].tofile(saved_path)
elif key.find("relative_attention_bias") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"{saved_key}.{j:d}.bin"
split_vals[j].tofile(saved_path)
elif (
key.find("decoder") != -1 and
(
key.find("SelfAttention.q.weight") != -1
or key.find("SelfAttention.k.weight") != -1
or key.find("SelfAttention.v.weight") != -1
)
):
pass
else:
print(f"[ERROR] cannot find key '{key}'")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-saved_dir", "-o", type=str, help="file name of output file", required=True)
parser.add_argument("-in_file", "-i", type=str, help="file name of input checkpoint file. Using model name like 't5-small' is also ok.", required=True)
args = parser.parse_args()
saved_dir = Path(args.saved_dir) / f"1-gpu"
saved_dir.mkdir(parents=True, exist_ok=True)
t5_model = T5ForConditionalGeneration.from_pretrained(args.in_file)
config = configparser.ConfigParser()
config["encoder"] = {}
for key, val in t5_model.encoder.config.to_dict().items():
config["encoder"][key] = f"{val}"
config["encoder"]["weight_data_type"] = "fp32"
config["decoder"] = {}
for key, val in t5_model.decoder.config.to_dict().items():
config["decoder"][key] = f"{val}"
config["decoder"]["weight_data_type"] = "fp32"
for key, val in rename_mapping.items():
config['encoder'][val] = config['encoder'].pop(key)
config['decoder'][val] = config['decoder'].pop(key)
for key, val in new_configs.items():
config[key] = {}
for val_key, val_val in val.items():
config[key][val_key] = val_val
with open(f"{saved_dir}/config.ini", 'w') as configfile:
config.write(configfile)
for name, param in t5_model.named_parameters():
split_and_convert_process(name, param, 1, saved_dir)
fuse_decoder_qkv(t5_model, 1, saved_dir)
print("extract T5 model weight finish!")
| FasterTransformer-main | examples/tensorrt/t5/extractT5ModelToBIN.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import configparser
import os
import sys
import ctypes
import argparse
import math
import numpy as np
import tensorrt as trt
import torch
from datetime import datetime
from transformers import PreTrainedTokenizerFast
from transformers import T5Tokenizer
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.pytorch.decoding.utils.recover_bpe import recover_bpe
npToTrt = {np.int8: trt.int8, np.float16: trt.float16, np.int32: trt.int32, np.float32: trt.float32}
npToPFT = {np.int8: trt.PluginFieldType.INT8, np.float16: trt.PluginFieldType.FLOAT16, np.int32: trt.PluginFieldType.INT32, np.float32: trt.PluginFieldType.FLOAT32}
npToTorch = {np.dtype('float16'): torch.float16, np.dtype('int32'): torch.int32, np.dtype('float32'): torch.float32}
device = 0
# global variables with default value
globalNMaxBatchSize = 128
globalNMaxSeqLen = 384
globalNBeamSize = 4
globalNUseFP16 = 0
globalNSM = (lambda x: x[0] * 10 + x[1])(torch.cuda.get_device_capability())
globalFBeamDiversity = 0.0
globalFTemperature = 1.0
globalFLenPenalty = 0.0
globalFRepPenalty = 1.0
nMinBatchSize = 1
nOptBatchSize = globalNMaxBatchSize
nMaxBatchSize = globalNMaxBatchSize
nMinSeqLen = 1
nOptSeqLen = globalNMaxSeqLen
nMaxSeqLen = globalNMaxSeqLen
def bleu_score(pred, ref):
from sacrebleu import corpus_bleu
bleu = corpus_bleu(pred, [ref], force=True)
print(" bleu score: {:6.2f}".format(bleu.score))
print(" bleu counts: {}".format(bleu.counts))
print(" bleu totals: {}".format(bleu.totals))
print(" bleu precisions: {}".format(bleu.precisions))
print(" bleu sys_len: {}; ref_len: {}".format(bleu.sys_len, bleu.ref_len))
return bleu
def getT5EncoderPlugin(arg):
nBatchSize = arg['batch_size']
nMaxSeqLen = arg['max_seq_len']
nBeamSize = arg['beam_width'],
nSM = globalNSM
useFP16 = int(arg['data_type'] == 'fp16')
ckpt_path = arg['ckpt_path'].encode()
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == 'T5EncoderPlugin':
pList = [
trt.PluginField('max_batch_size', np.int32(nBatchSize), npToPFT[np.int32]),
trt.PluginField('max_seq_len', np.int32(nMaxSeqLen), npToPFT[np.int32]),
trt.PluginField('beam_width', np.int32(nBeamSize), npToPFT[np.int32]),
trt.PluginField('sm', np.int32(nSM), npToPFT[np.int32]),
trt.PluginField('useFP16', np.int32(useFP16), npToPFT[np.int32]),
trt.PluginField('ckpt_path', ckpt_path, trt.PluginFieldType.CHAR),
]
return c.create_plugin(c.name, trt.PluginFieldCollection(pList))
return None
def getT5DecodingPlugin(arg):
nBatchSize = arg['batch_size']
nMaxSeqLen = arg['max_seq_len']
nMemMaxSeqLen = arg['max_seq_len']
nBeamSize = arg['beam_width']
useFP16 = int(arg['data_type'] == 'fp16')
ckpt_path = arg['ckpt_path'].encode()
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == 'T5DecodingPlugin':
pList = [
trt.PluginField('max_batch_size', np.int32(nBatchSize), npToPFT[np.int32]),
trt.PluginField('max_seq_len', np.int32(nMaxSeqLen), npToPFT[np.int32]),
trt.PluginField('mem_max_seq_len', np.int32(nMaxSeqLen), npToPFT[np.int32]),
trt.PluginField('beam_width', np.int32(nBeamSize), npToPFT[np.int32]),
trt.PluginField('useFP16', np.int32(useFP16), npToPFT[np.int32]),
trt.PluginField('ckpt_path', ckpt_path, trt.PluginFieldType.CHAR),
]
return c.create_plugin(c.name, trt.PluginFieldCollection(pList))
return None
def buildEngine(logger, arg, trtFileName):
builder = trt.Builder(logger)
network = builder.create_network(1)
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30
config.flags = int(arg['data_type'] == 'fp16')
inputT0 = network.add_input('inputId', npToTrt[np.int32], [-1, -1])
inputT1 = network.add_input('inputSeqLen', npToTrt[np.int32], [-1])
inputT2 = network.add_input('inputTopK', npToTrt[np.int32], [-1])
inputT3 = network.add_input('inputTopP', npToTrt[np.int32], [-1])
inputT4 = network.add_input('inputBeam_search_diversity_rate', npToTrt[np.float32], [-1])
inputT5 = network.add_input('inputTemperature', npToTrt[np.float32], [-1])
inputT6 = network.add_input('inputLen_penalty', npToTrt[np.float32], [-1])
inputT7 = network.add_input('inputRepetition_penalty', npToTrt[np.float32], [-1])
profile.set_shape(inputT0.name, [nMinBatchSize, nMinSeqLen], [nOptBatchSize, nOptSeqLen], [nMaxBatchSize, nMaxSeqLen])
profile.set_shape(inputT1.name, [nMinBatchSize], [nOptBatchSize], [nMaxBatchSize])
profile.set_shape(inputT2.name, [1], [nOptBatchSize], [nMaxBatchSize])
profile.set_shape(inputT3.name, [1], [nOptBatchSize], [nMaxBatchSize])
profile.set_shape(inputT4.name, [1], [nOptBatchSize], [nMaxBatchSize])
profile.set_shape(inputT5.name, [1], [nOptBatchSize], [nMaxBatchSize])
profile.set_shape(inputT6.name, [1], [nOptBatchSize], [nMaxBatchSize])
profile.set_shape(inputT7.name, [1], [nOptBatchSize], [nMaxBatchSize])
config.add_optimization_profile(profile)
model_config = configparser.ConfigParser()
model_config_path = os.path.join(arg["ckpt_path"], 'config.ini')
if os.path.isfile(model_config_path):
model_config.read(model_config_path)
encoderPlugin = getT5EncoderPlugin(arg)
decodingPlugin = getT5DecodingPlugin(arg)
if encoderPlugin == None:
print("Failed making encoder plugin!")
return None
if decodingPlugin == None:
print("Failed making decoding plugin!")
return None
encoderLayer = network.add_plugin_v2([inputT0, inputT1], encoderPlugin)
decodingLayer = network.add_plugin_v2([encoderLayer.get_output(0), inputT1, inputT2, inputT3, inputT4, inputT5, inputT6, inputT7], decodingPlugin)
decodingLayer.get_output(0).name = "decodingOutput0"
decodingLayer.get_output(1).name = "decodingOutput1"
decodingLayer.get_output(0).dtype = npToTrt[np.int32]
decodingLayer.get_output(1).dtype = npToTrt[np.int32]
network.mark_output(decodingLayer.get_output(0))
network.mark_output(decodingLayer.get_output(1))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed getting serialized engine!")
return None
print("Succeeded getting serialized engine!")
with open(trtFileName, "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
return engine
def testBoth(arg, stream):
useFP16 = int(arg['data_type'] == 'fp16')
nBatchSize = arg['batch_size']
nSeqLen = arg['max_seq_len']
testCase = "<fp%s,bs=%d,sl=%d>" % (['32', '16'][useFP16], nBatchSize, nSeqLen)
print("Test both Encoder and Decoding", testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(arg['lib_path'])
trtFile = 'T5Engine-fp' + ['32', '16'][useFP16] + '.plan'
if os.path.isfile(trtFile):
with open(trtFile, 'rb') as f:
engineString = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
engine = buildEngine(logger, arg, trtFile)
context = engine.create_execution_context()
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(engine.num_bindings):
# print("Bind[%2d]:i[%d]->"%(i,i) if engine.binding_is_input(i) else "Bind[%2d]:o[%d]->"%(i,i-nInput),
# engine.get_binding_dtype(i),engine.get_binding_shape(i),context.get_binding_shape(i),engine.get_binding_name(i))
tokenizer = T5Tokenizer.from_pretrained(arg['model'])
fast_tokenizer = PreTrainedTokenizerFast.from_pretrained(arg['model'])
with open(arg['source'], 'r') as f:
src_text = recover_bpe(f.readlines())
src_text = ["translate English to German: " + line.strip() for line in src_text]
with open(arg['target'], 'r') as f:
tgt_text = recover_bpe(f.readlines())
sys.stdout.flush()
outputId = []
outputSeqLen = []
prev = 0
needWarmUp = True
torch.cuda.synchronize()
start_time = datetime.now()
while prev < len(src_text):
input_texts = src_text[prev:prev + nBatchSize]
prev += nBatchSize
input_token = tokenizer(input_texts, return_tensors='pt', padding=True)
inputId = np.ascontiguousarray(input_token['input_ids'].numpy().astype(np.int32))
inputMask = np.ascontiguousarray(np.sum(input_token['attention_mask'].numpy(), 1).astype(np.int32))
nRealBatchSize, nRealSeqLen = np.shape(inputId)
context.set_binding_shape(0, [nRealBatchSize, nRealSeqLen])
context.set_binding_shape(1, [nRealBatchSize])
context.set_binding_shape(2, [nRealBatchSize])
context.set_binding_shape(3, [nRealBatchSize])
context.set_binding_shape(4, [nRealBatchSize])
context.set_binding_shape(5, [nRealBatchSize])
context.set_binding_shape(6, [nRealBatchSize])
context.set_binding_shape(7, [nRealBatchSize])
inputTopK = np.full([nRealBatchSize], arg['sampling_topk'], dtype=np.int32)
inputTopP = np.full([nRealBatchSize], arg['sampling_topp'], dtype=np.float32)
inputFBeamDiversity = np.full([nRealBatchSize], globalFBeamDiversity, dtype=np.float32)
inputFTemperature = np.full([nRealBatchSize], globalFTemperature, dtype=np.float32)
inputFLenPenalty = np.full([nRealBatchSize], globalFLenPenalty, dtype=np.float32)
inputFRepPenalty = np.full([nRealBatchSize], globalFRepPenalty, dtype=np.float32)
bufferD = []
bufferD.append(torch.from_numpy(inputId).to(device))
bufferD.append(torch.from_numpy(inputMask).to(device))
bufferD.append(torch.from_numpy(inputTopK).to(device))
bufferD.append(torch.from_numpy(inputTopP).to(device))
bufferD.append(torch.from_numpy(inputFBeamDiversity).to(device))
bufferD.append(torch.from_numpy(inputFTemperature).to(device))
bufferD.append(torch.from_numpy(inputFLenPenalty).to(device))
bufferD.append(torch.from_numpy(inputFRepPenalty).to(device))
bufferD.append(torch.empty(tuple(context.get_binding_shape(8)), dtype=torch.int32, device=device))
bufferD.append(torch.empty(tuple(context.get_binding_shape(9)), dtype=torch.int32, device=device))
torch.cuda.synchronize()
if needWarmUp:
for i in range(5):
context.execute_async_v2([b.data_ptr() for b in bufferD], stream)
prev = 0
needWarmUp = False
torch.cuda.synchronize()
start_time = datetime.now()
continue
context.execute_async_v2([b.data_ptr() for b in bufferD], stream)
torch.cuda.synchronize()
outputId.append(bufferD[nInput + 0].cpu().numpy())
outputSeqLen.append(bufferD[nInput + 1].cpu().numpy())
if len(outputId) >= arg["max_iteration"]:
break
stop_time = datetime.now()
execution_time = (stop_time - start_time).total_seconds()
outputText = []
for batch_token, batch_seq_len in zip(outputId, outputSeqLen):
for j in range(len(batch_token)):
outputText.append(fast_tokenizer.decode(batch_token[j][0][:batch_seq_len[j][0]], skip_special_tokens=True))
bleuScore = bleu_score(outputText, tgt_text[:len(outputText)])
with open("output.txt", 'w') as f:
for line in outputText:
f.write(line + '\n')
print("[INFO] FT translates {} batches taking {:.2f} sec to translate {} tokens, BLEU score: {:.2f}, {:.0f} tokens/sec.".format(len(outputText) // nBatchSize, execution_time, bleuScore.sys_len, bleuScore.score, bleuScore.sys_len / execution_time))
if arg["ft_BLEU_threshold"] != None:
assert bleuScore.score >= arg["ft_BLEU_threshold"], f"[ERROR] T5Plugin Test FAIL !"
print(f"[INFO] T5Plugin Test PASS !")
print(f"[INFO] Test both Encoder and Decoding {testCase} finish!")
if __name__ == '__main__':
np.set_printoptions(precision=4, linewidth=200, suppress=True)
torch.cuda.set_device(device)
stream = 0 #torch.cuda.Stream(device).cuda_stream
#os.system('rm -f ./*.plan ./*.in')
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, metavar='NUMBER', default=32, help='batch size (default: 32)')
parser.add_argument('-beam', '--beam_width', type=int, metavar='NUMBER', default=4, help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, metavar='NUMBER', default=128, help='max sequence length (default: 200)')
parser.add_argument('--source', type=str, metavar='STRING', default="../examples/pytorch/decoding/utils/translation/test.en", help="Path to the source file.")
parser.add_argument('--target', type=str, metavar='STRING', default="../examples/pytorch/decoding/utils/translation/test.de", help="Path to the target file.")
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, metavar='NUMBER', default=0.0, help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-topk', '--sampling_topk', type=int, metavar='NUMBER', default=4, help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, metavar='NUMBER', default=0.0, help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, metavar='STRING', default="fp32", help='data type (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-lib_path', '--lib_path', type=str, metavar='STRING', default="lib/libtrt_t5.so", help='the path of FasterTransformer pytorch t5 op library.')
parser.add_argument('-model', '--model', type=str, metavar='STRING', default="t5-small", help='T5 model size.', choices=["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b"])
parser.add_argument( '--ckpt_path', type=str, metavar='STRING', help='path to the checkpoint file.')
parser.add_argument('-max_ite', '--max_iteration', type=int, metavar='NUMBER', default=100000, help='Maximum iteraiton for translation, default is 100000 (as large as possible to run all test set).')
parser.add_argument('--ft_BLEU_threshold', type=float, help='Threshold of FT BLEU score')
arg = vars(parser.parse_args())
testBoth(arg, stream)
print("Test finish!")
| FasterTransformer-main | examples/tensorrt/t5/testT5Plugin.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import numpy as np
import os
import os.path
import tensorrt as trt
from scipy import ndimage
def load_weights(weight_path:str):
suffix = weight_path.split('.')[-1]
if suffix != 'npz':
print("Unsupported weight file: Unrecognized format %s " % suffix)
exit(-1)
return np.load(weight_path)
class ViTPluginLoader:
def __init__(self, plugin_path) -> None:
handle = ctypes.CDLL(plugin_path, mode=ctypes.RTLD_GLOBAL)
if not handle:
raise RuntimeError("Fail to load plugin library: %s" % plugin_path)
self.logger_ = trt.Logger(trt.Logger.VERBOSE)
trt.init_libnvinfer_plugins(self.logger_, "")
plg_registry = trt.get_plugin_registry()
self.plg_creator = plg_registry.get_plugin_creator("CustomVisionTransformerPlugin", "1", "")
def load_model_config(self, config, args):
self.patch_size_ = config.patches.size[0]
self.num_heads_ = config.transformer.num_heads
self.layer_num_ = config.transformer.num_layers
self.inter_size_ = config.transformer.mlp_dim
self.embed_dim_ = config.hidden_size
self.max_batch_ = args.batch_size
self.img_size_ = args.img_size
self.with_class_token_ = (config.classifier == 'token')
self.seq_len_ = pow(self.img_size_//self.patch_size_, 2) + 1 if self.with_class_token_ else 0
self.in_chans_ = 3
self.is_fp16_ = args.fp16
self.serial_name_ = "ViTEngine_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(self.patch_size_,
self.num_heads_ ,
self.layer_num_ ,
self.inter_size_,
self.embed_dim_ ,
self.max_batch_ ,
self.img_size_ ,
self.seq_len_,
int(self.is_fp16_))
self.value_holder = []
def build_plugin_field_collection(self, weights):
field_type = trt.PluginFieldType.FLOAT16 if self.is_fp16_ else trt.PluginFieldType.FLOAT32
arr_type = np.float16 if self.is_fp16_ else np.float32
self.value_holder = [np.array([self.max_batch_ ]).astype(np.int32),
np.array([self.img_size_ ]).astype(np.int32),
np.array([self.patch_size_]).astype(np.int32),
np.array([self.in_chans_ ]).astype(np.int32),
np.array([self.embed_dim_ ]).astype(np.int32),
np.array([self.num_heads_ ]).astype(np.int32),
np.array([self.inter_size_]).astype(np.int32),
np.array([self.layer_num_ ]).astype(np.int32),
np.array([self.with_class_token_]).astype(np.int32)
]
max_batch = trt.PluginField("max_batch", self.value_holder[0], trt.PluginFieldType.INT32)
img_size = trt.PluginField("img_size", self.value_holder[1], trt.PluginFieldType.INT32)
patch_size = trt.PluginField("patch_size", self.value_holder[2], trt.PluginFieldType.INT32)
in_chans = trt.PluginField("in_chans", self.value_holder[3], trt.PluginFieldType.INT32)
embed_dim = trt.PluginField("embed_dim", self.value_holder[4], trt.PluginFieldType.INT32)
num_heads = trt.PluginField("num_heads", self.value_holder[5], trt.PluginFieldType.INT32)
inter_size = trt.PluginField("inter_size", self.value_holder[6], trt.PluginFieldType.INT32)
layer_num = trt.PluginField("layer_num", self.value_holder[7], trt.PluginFieldType.INT32)
with_cls_token = trt.PluginField("with_cls_token", self.value_holder[8], trt.PluginFieldType.INT32)
part_fc = []
for name in weights.files:
if name == 'embedding/kernel': #transpose conv kernel
w = np.ascontiguousarray(weights[name].transpose([3, 2, 0, 1]).copy())
self.value_holder.append(w.astype(arr_type))
elif name == 'Transformer/posembed_input/pos_embedding':
w = weights[name]
if w.shape[1] != self.seq_len_:
print("load_pretrained: resized variant: %s to %s" % (w.shape[1], self.seq_len_))
ntok_new = self.seq_len_
if self.with_class_token_:
posemb_tok, posemb_grid = w[:, :1], w[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = w[:, :0], w[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
w = np.concatenate([posemb_tok, posemb_grid], axis=1)
self.value_holder.append(w.astype(arr_type))
elif name == 'cls' and (not self.with_class_token_):
continue
else:
self.value_holder.append(weights[name].astype(arr_type))
part_fc.append(trt.PluginField(name, self.value_holder[-1], field_type))
return trt.PluginFieldCollection([max_batch, img_size, patch_size, in_chans, embed_dim, num_heads, inter_size, layer_num, with_cls_token] + part_fc)
def build_network(self, weights_path):
trt_dtype = trt.float16 if self.is_fp16_ else trt.float32
explicit_batch_flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
weights = load_weights(weights_path)
with trt.Builder(self.logger_) as builder, builder.create_network(explicit_batch_flag) as network, builder.create_builder_config() as builder_config:
builder_config.max_workspace_size = 8 << 30
if self.is_fp16_:
builder_config.set_flag(trt.BuilderFlag.FP16)
builder_config.set_flag(trt.BuilderFlag.STRICT_TYPES)
# Create the network
input_tensor = network.add_input(name="input_img", dtype=trt_dtype, shape=(-1, self.in_chans_, self.img_size_, self.img_size_))
# Specify profiles
profile = builder.create_optimization_profile()
min_shape = (1, self.in_chans_, self.img_size_, self.img_size_)
##TODO: There is a bug in TRT when opt batch is large
max_shape = (self.max_batch_, self.in_chans_, self.img_size_, self.img_size_)
profile.set_shape("input_img", min=min_shape, opt=min_shape, max=max_shape)
builder_config.add_optimization_profile(profile)
#import pdb;pdb.set_trace()
print("Generate plugin field collection...")
pfc = self.build_plugin_field_collection(weights)
fn = self.plg_creator.create_plugin("vision_transformer", pfc)
inputs = [input_tensor]
vit = network.add_plugin_v2(inputs, fn)
output_tensor = vit.get_output(0)
output_tensor.name = "visiont_transformer_output"
if self.is_fp16_:
vit.precision = trt.float16
vit.set_output_type(0, trt.float16)
network.mark_output(output_tensor)
print("Building TRT engine....")
engine = builder.build_engine(network, builder_config)
return engine
def serialize_engine(self, engine, file_folder='./'):
if not os.path.isdir(file_folder):
self.logger_.log(self.logger_.VERBOSE, "%s is not a folder." % file_folder)
exit(-1)
file_path =os.path.join(file_folder, self.serial_name_)
self.logger_.log(self.logger_.VERBOSE, "Serializing Engine...")
serialized_engine = engine.serialize()
self.logger_.log(self.logger_.INFO, "Saving Engine to {:}".format(file_path))
with open(file_path, "wb") as fout:
fout.write(serialized_engine)
self.logger_.log(self.logger_.INFO, "Done.")
def deserialize_engine(self, file_folder='./'):
if not os.path.isdir(file_folder):
self.logger_.log(self.logger_.VERBOSE, "%s is not a folder." % file_folder)
exit(-1)
file_path =os.path.join(file_folder, self.serial_name_)
if not os.path.isfile(file_path):
self.logger_.log(self.logger_.VERBOSE, "%s not exists. " % file_path)
return None
filename = os.path.basename(file_path)
info = filename.split('_')
self.patch_size_ = int(info[1])
self.num_heads_ = int(info[2])
self.layer_num_ = int(info[3])
self.inter_size_ = int(info[4])
self.embed_dim_ = int(info[5])
self.max_batch_ = int(info[6])
self.img_size_ = int(info[7])
self.seq_len_ = int(info[8])
self.is_fp16_ = bool(info[9])
self.in_chans_ = 3
with open(file_path, 'rb') as f:
runtime = trt.Runtime(self.logger_)
return runtime.deserialize_cuda_engine(f.read())
| FasterTransformer-main | examples/tensorrt/vit/plugin_loader.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import tensorrt as trt
import ctypes
import sys
sys.path.insert(0, "../../pytorch/vit/ViT-quantization")
sys.path.insert(0, "../../pytorch/vit/ViT-quantization/ViT-pytorch")
from vit_int8 import VisionTransformerINT8
import quant_utils
from models.modeling import CONFIGS
from plugin_loader_int8 import ViTINT8PluginLoader
test_time = 100
warmup_time = 10
def setup_torch(args):
# Prepare model
config = CONFIGS[args.model_type]
print(config)
model = VisionTransformerINT8(config, args.img_size, zero_head=False, num_classes=1000)
model.load_state_dict(torch.load(args.pretrained_dir))
quant_utils.configure_model(model, args, calib=False)
model.to(args.device)
return config, model
def setup_trt(args, config, model):
p_loader = ViTINT8PluginLoader(args.plugin_path)
p_loader.load_model_config(config, args)
engine = p_loader.build_network(model.state_dict())
p_loader.serialize_engine(engine)
return engine, p_loader
def parse_option():
parser = argparse.ArgumentParser('ViT evaluation script', add_help=False)
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--img_size", default=384, type=int,
help="Resolution size")
parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
# easy config modification
parser.add_argument('--plugin_path', type=str, default="../../../build/lib/libvit_plugin.so", help='path to plugin lib')
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--int8-mode', type=int, default=2, choices=[1, 2],
help="Which int8 mode to use, choose from [1, 2]")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help='local rank for DistributedDataParallel')
quant_utils.add_arguments(parser)
args, unparsed = parser.parse_known_args()
if args.quant_mode is not None:
args = quant_utils.set_args(args)
quant_utils.set_default_quantizers(args)
if args.quant_mode == 'ft1':
args.int8_mode = 1
elif args.quant_mode == 'ft2':
args.int8_mode = 2
else:
raise NotImplementedError("For ViT-INT8, we only support ft1/ft2 as quant_mode")
return args
def main(args):
config, model = setup_torch(args)
engine, p_loader = setup_trt(args, config, model)
validate_with_random_data(p_loader, model, engine)
@torch.no_grad()
def run_trt_plugin(plugin_loader:ViTINT8PluginLoader, images, engine):
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
seq_len = plugin_loader.seq_len_
embed_dim = plugin_loader.embed_dim_
max_batch = plugin_loader.max_batch_
img_size = plugin_loader.img_size_
in_chans = plugin_loader.in_chans_
with engine.create_execution_context() as context:
context.active_optimization_profile = 0
stream = torch.cuda.Stream()
context.set_binding_shape(0, (max_batch, in_chans, img_size, img_size))
output_shape = tuple(context.get_binding_shape(1))
print(output_shape)
# Copy input h2d
d_inputs = [images]
d_output = torch.empty(output_shape, dtype=torch.float32).cuda()
# warm up
for i in range(warmup_time):
context.execute_async_v2([d_inp.data_ptr() for d_inp in d_inputs] + [d_output.data_ptr()], stream.cuda_stream)
#ignore the last fc layer
torch.cuda.synchronize()
op_end = time.time()
for i in range(test_time):
context.execute_async_v2([d_inp.data_ptr() for d_inp in d_inputs] + [d_output.data_ptr()], stream.cuda_stream)
stream.synchronize()
torch.cuda.synchronize()
print("plugin time : ", (time.time() - op_end)/test_time*1000.0, "ms")
return d_output.cpu().numpy()
@torch.no_grad()
def run_torch(model, images, mark):
torch_output = model.transformer(images)
torch_output = torch_output[0].cpu().numpy()
return torch_output
@torch.no_grad()
def validate_with_random_data(plugin_loader:ViTINT8PluginLoader, model, engine):
model.eval()
model.half()
dtype_torch = torch.float16
max_batch = plugin_loader.max_batch_
img_size = plugin_loader.img_size_
in_chans = plugin_loader.in_chans_
image = np.random.rand(1, in_chans, img_size, img_size)
images = np.repeat(image, max_batch, axis=0)
images_tensor = torch.tensor(images, dtype=dtype_torch)
images_tensor = images_tensor.cuda(non_blocking=True)
plugin_output = run_trt_plugin(plugin_loader, images_tensor, engine)
torch_output = run_torch(model, images_tensor, "torch")
print(torch_output.shape)
print(plugin_output.shape)
diff = abs(torch_output - plugin_output.reshape(torch_output.shape))
print("torch_output vs plugin_output , avg diff : ", diff.mean(), "max diff : ", diff.max())
if __name__ == '__main__':
args = parse_option()
seed = args.seed + int(time.time())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# Setup CUDA, GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
main(args)
| FasterTransformer-main | examples/tensorrt/vit/infer_visiontransformer_int8_plugin.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import tensorrt as trt
import ctypes
import sys
sys.path.insert(0, "../../pytorch/vit/ViT-quantization/ViT-pytorch")
from models.modeling import VisionTransformer, CONFIGS
from plugin_loader import ViTPluginLoader
test_time = 100
warmup_time = 10
def setup_torch(args):
# Prepare model
config = CONFIGS[args.model_type]
print(config)
model = VisionTransformer(config, args.img_size, zero_head=False, num_classes=1000)
model.load_from(np.load(args.pretrained_dir))
model.to(args.device)
return config, model
def setup_trt(args, config):
p_loader = ViTPluginLoader(args.plugin_path)
p_loader.load_model_config(config, args)
engine = p_loader.build_network(args.pretrained_dir)
return engine, p_loader
def parse_option():
parser = argparse.ArgumentParser('ViT evaluation script', add_help=False)
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14"],
default="ViT-B_16",
help="Which variant to use.")
parser.add_argument("--img_size", default=384, type=int,
help="Resolution size")
parser.add_argument("--pretrained_dir", type=str, default="checkpoint/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
# easy config modification
parser.add_argument('--plugin_path', type=str, default="../../../build/lib/libvit_plugin.so", help='path to plugin lib')
parser.add_argument('--batch-size', type=int, default=32, help="batch size for single GPU")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
args, unparsed = parser.parse_known_args()
return args
def main(args):
config, model = setup_torch(args)
engine, p_loader = setup_trt(args, config)
validate_with_random_data(p_loader, model, engine)
@torch.no_grad()
def run_trt_plugin(plugin_loader:ViTPluginLoader, images, engine):
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
seq_len = plugin_loader.seq_len_
embed_dim = plugin_loader.embed_dim_
max_batch = plugin_loader.max_batch_
img_size = plugin_loader.img_size_
in_chans = plugin_loader.in_chans_
is_fp16 = plugin_loader.is_fp16_
dtype_trt = trt.float16 if is_fp16 else trt.float32
dtype_np = np.float16 if is_fp16 else np.float32
dtype_torch = torch.float16 if is_fp16 else torch.float32
with engine.create_execution_context() as context:
context.active_optimization_profile = 0
stream = torch.cuda.Stream()
context.set_binding_shape(0, (max_batch, in_chans, img_size, img_size))
output_shape = tuple(context.get_binding_shape(1))
print(output_shape)
# Copy input h2d
d_inputs = [images]
d_output = torch.empty(output_shape, dtype=torch.float32).cuda()
# warm up
for i in range(warmup_time):
context.execute_async_v2([d_inp.data_ptr() for d_inp in d_inputs] + [d_output.data_ptr()], stream.cuda_stream)
#ignore the last fc layer
torch.cuda.synchronize()
op_end = time.time()
for i in range(test_time):
context.execute_async_v2([d_inp.data_ptr() for d_inp in d_inputs] + [d_output.data_ptr()], stream.cuda_stream)
stream.synchronize()
torch.cuda.synchronize()
print("plugin time : ", (time.time() - op_end)/test_time*1000.0, "ms")
return d_output.cpu().numpy()
@torch.no_grad()
def run_torch(model, images, mark):
# warm up
for i in range(warmup_time):
output = model(images)
torch.cuda.synchronize()
torch_start = time.time()
for i in range(test_time):
torch_output = model.transformer(images)
torch.cuda.synchronize()
torch_end = time.time()
embed = model.transformer.embeddings(images)
np.save('embed_torch.npy',embed.cpu().numpy())
torch_output = torch_output[0].cpu().numpy()
np.save('torch_out.npy', torch_output)
print(mark + " time : ", (torch_end - torch_start)/test_time*1000.0, "ms")
return torch_output
@torch.no_grad()
def validate_with_random_data(plugin_loader:ViTPluginLoader, model, engine):
model.eval()
if plugin_loader.is_fp16_:
model.half()
dtype_torch = torch.float16 if plugin_loader.is_fp16_ else torch.float
max_batch = plugin_loader.max_batch_
img_size = plugin_loader.img_size_
in_chans = plugin_loader.in_chans_
image = np.random.rand(1, in_chans, img_size, img_size)
images = np.repeat(image, max_batch, axis=0)
images_tensor = torch.tensor(images, dtype=dtype_torch)
images_tensor = images_tensor.cuda(non_blocking=True)
plugin_output = run_trt_plugin(plugin_loader, images_tensor, engine)
torch_output = run_torch(model, images_tensor, "torch")
print(torch_output.shape)
print(plugin_output.shape)
diff = abs(torch_output - plugin_output.reshape(torch_output.shape))
print("torch_output vs plugin_output , avg diff : ", diff.mean(), "max diff : ", diff.max())
assert diff.mean() < 0.006, "[ERROR] VIT TRT PLUGIN TEST FAIL !"
print("[INFO] VIT TRT PLUGIN TEST PASS !")
if __name__ == '__main__':
args = parse_option()
seed = args.seed + int(time.time())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# Setup CUDA, GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
main(args)
| FasterTransformer-main | examples/tensorrt/vit/infer_visiontransformer_plugin.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import sys
sys.path.insert(0, "../../pytorch/vit")
from VisionTransformerINT8WeightLoader import ViTINT8WeightLoader
import numpy as np
import os
import os.path
import tensorrt as trt
import torch
def load_weights(weight_path:str):
suffix = weight_path.split('.')[-1]
if suffix != 'pth':
print("Unsupported weight file: Unrecognized format %s " % suffix)
exit(-1)
return torch.load(weight_path, map_location="cpu")
class ViTINT8PluginLoader:
def __init__(self, plugin_path) -> None:
handle = ctypes.CDLL(plugin_path, mode=ctypes.RTLD_GLOBAL)
if not handle:
raise RuntimeError("Fail to load plugin library: %s" % plugin_path)
self.logger_ = trt.Logger(trt.Logger.INFO)
trt.init_libnvinfer_plugins(self.logger_, "")
plg_registry = trt.get_plugin_registry()
self.plg_creator = plg_registry.get_plugin_creator("CustomVisionTransformerINT8Plugin", "1", "")
def load_model_config(self, config, args):
self.patch_size_ = config.patches.size[0]
self.num_heads_ = config.transformer.num_heads
self.layer_num_ = config.transformer.num_layers
self.inter_size_ = config.transformer.mlp_dim
self.embed_dim_ = config.hidden_size
self.max_batch_ = args.batch_size
self.img_size_ = args.img_size
self.with_class_token_ = (config.classifier == 'token')
self.seq_len_ = pow(self.img_size_//self.patch_size_, 2) + 1 if self.with_class_token_ else 0
self.in_chans_ = 3
self.int8_mode_ = args.int8_mode
self.serial_name_ = "ViTINT8Engine_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(self.patch_size_,
self.num_heads_ ,
self.layer_num_ ,
self.inter_size_,
self.embed_dim_ ,
self.max_batch_ ,
self.img_size_ ,
self.seq_len_,
self.int8_mode_)
self.value_holder = []
def build_plugin_field_collection(self, weights):
field_type = trt.PluginFieldType.FLOAT16
arr_type = np.float16
self.value_holder = [np.array([self.max_batch_ ]).astype(np.int32),
np.array([self.img_size_ ]).astype(np.int32),
np.array([self.patch_size_]).astype(np.int32),
np.array([self.in_chans_ ]).astype(np.int32),
np.array([self.embed_dim_ ]).astype(np.int32),
np.array([self.num_heads_ ]).astype(np.int32),
np.array([self.inter_size_]).astype(np.int32),
np.array([self.layer_num_ ]).astype(np.int32),
np.array([self.int8_mode_ ]).astype(np.int32),
np.array([self.with_class_token_]).astype(np.int32)
]
max_batch = trt.PluginField("max_batch", self.value_holder[0], trt.PluginFieldType.INT32)
img_size = trt.PluginField("img_size", self.value_holder[1], trt.PluginFieldType.INT32)
patch_size = trt.PluginField("patch_size", self.value_holder[2], trt.PluginFieldType.INT32)
in_chans = trt.PluginField("in_chans", self.value_holder[3], trt.PluginFieldType.INT32)
embed_dim = trt.PluginField("embed_dim", self.value_holder[4], trt.PluginFieldType.INT32)
num_heads = trt.PluginField("num_heads", self.value_holder[5], trt.PluginFieldType.INT32)
inter_size = trt.PluginField("inter_size", self.value_holder[6], trt.PluginFieldType.INT32)
layer_num = trt.PluginField("layer_num", self.value_holder[7], trt.PluginFieldType.INT32)
int8_mode = trt.PluginField("int8_mode", self.value_holder[8], trt.PluginFieldType.INT32)
with_cls_token = trt.PluginField("with_cls_token", self.value_holder[9], trt.PluginFieldType.INT32)
vit_weights = ViTINT8WeightLoader(self.layer_num_, self.img_size_, self.patch_size_, weights,
classifier='token' if self.with_class_token_ else '' )
vit_weights.to_int8(ths_path='../../../build/lib/libpyt_vit.so')
vit_weights.to_cuda()
weights = vit_weights.listed_weight_to_dict()
part_fc = []
for name in weights.keys():
if name == 'transformer.embeddings.cls_token' and (not self.with_class_token_):
continue
elif name.split('.')[-1] == 'amaxList' or name.split('.')[-1] == 'h_amaxList':
self.value_holder.append(weights[name].cpu().numpy().astype(np.float32))
part_fc.append(trt.PluginField(name, self.value_holder[-1], trt.PluginFieldType.FLOAT32))
else:
self.value_holder.append(weights[name].cpu().numpy().astype(np.float16))
part_fc.append(trt.PluginField(name, self.value_holder[-1], trt.PluginFieldType.FLOAT16))
return trt.PluginFieldCollection([max_batch, img_size, patch_size, in_chans, embed_dim, num_heads, inter_size, layer_num, int8_mode, with_cls_token] + part_fc)
def build_network(self, weights):
explicit_batch_flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
# weights = load_weights(weights_path)
with trt.Builder(self.logger_) as builder, builder.create_network(explicit_batch_flag) as network, builder.create_builder_config() as builder_config:
builder_config.max_workspace_size = 8 << 30
builder_config.set_flag(trt.BuilderFlag.FP16)
builder_config.set_flag(trt.BuilderFlag.STRICT_TYPES)
# Create the network
input_tensor = network.add_input(name="input_img", dtype=trt.float16, shape=(-1, self.in_chans_, self.img_size_, self.img_size_))
# Specify profiles
profile = builder.create_optimization_profile()
min_shape = (1, self.in_chans_, self.img_size_, self.img_size_)
##TODO: There is a bug in TRT when opt batch is large
max_shape = (self.max_batch_, self.in_chans_, self.img_size_, self.img_size_)
profile.set_shape("input_img", min=min_shape, opt=min_shape, max=max_shape)
builder_config.add_optimization_profile(profile)
#import pdb;pdb.set_trace()
print("Generate plugin field collection...")
pfc = self.build_plugin_field_collection(weights)
fn = self.plg_creator.create_plugin("vision_transformer", pfc)
inputs = [input_tensor]
vit = network.add_plugin_v2(inputs, fn)
output_tensor = vit.get_output(0)
output_tensor.name = "visiont_transformer_output"
vit.precision = trt.float16
vit.set_output_type(0, trt.float16)
network.mark_output(output_tensor)
print("Building TRT engine....")
engine = builder.build_engine(network, builder_config)
return engine
def serialize_engine(self, engine, file_folder='./'):
if not os.path.isdir(file_folder):
self.logger_.log(self.logger_.VERBOSE, "%s is not a folder." % file_folder)
exit(-1)
file_path =os.path.join(file_folder, self.serial_name_)
self.logger_.log(self.logger_.VERBOSE, "Serializing Engine...")
serialized_engine = engine.serialize()
self.logger_.log(self.logger_.INFO, "Saving Engine to {:}".format(file_path))
with open(file_path, "wb") as fout:
fout.write(serialized_engine)
self.logger_.log(self.logger_.INFO, "Done.")
def deserialize_engine(self, file_folder='./'):
if not os.path.isdir(file_folder):
self.logger_.log(self.logger_.VERBOSE, "%s is not a folder." % file_folder)
exit(-1)
file_path =os.path.join(file_folder, self.serial_name_)
if not os.path.isfile(file_path):
self.logger_.log(self.logger_.VERBOSE, "%s not exists. " % file_path)
return None
filename = os.path.basename(file_path)
info = filename.split('_')
self.patch_size_ = int(info[1])
self.num_heads_ = int(info[2])
self.layer_num_ = int(info[3])
self.inter_size_ = int(info[4])
self.embed_dim_ = int(info[5])
self.max_batch_ = int(info[6])
self.img_size_ = int(info[7])
self.seq_len_ = int(info[8])
self.int8_mode_ = int(info[9])
self.in_chans_ = 3
with open(file_path, 'rb') as f:
runtime = trt.Runtime(self.logger_)
return runtime.deserialize_cuda_engine(f.read())
| FasterTransformer-main | examples/tensorrt/vit/plugin_loader_int8.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
# python ckpt_type_convert.py --init_checkpoint=mrpc_output/model.ckpt-343 --fp16_checkpoint=mrpc_output/fp16_model.ckpt
import numpy as np
import os
import tensorflow as tf
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.ops import io_ops
from tensorflow.python.training.saver import BaseSaverBuilder
def checkpoint_dtype_cast(in_checkpoint_file, out_checkpoint_file):
var_list = checkpoint_utils.list_variables(in_checkpoint_file)
def init_graph():
for name, shape in var_list:
var = checkpoint_utils.load_variable(in_checkpoint_file, name)
if "quant" in name or "amaxList" in name:
recon_dtype = var.dtype
else:
recon_dtype = tf.float16 if var.dtype == np.float32 else var.dtype
tf.get_variable(name, shape=shape, dtype=recon_dtype)
init_graph()
saver = tf.train.Saver(builder=CastFromFloat32SaverBuilder())
with tf.Session() as sess:
saver.restore(sess, in_checkpoint_file)
saver.save(sess, 'tmp-ckpt/tmp.ckpt')
tf.reset_default_graph()
init_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'tmp-ckpt/tmp.ckpt')
saver.save(sess, out_checkpoint_file)
os.system("rm tmp-ckpt -r")
class CastFromFloat32SaverBuilder(BaseSaverBuilder):
# Based on tensorflow.python.training.saver.BulkSaverBuilder.bulk_restore
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
restore_specs = []
for saveable in saveables:
for spec in saveable.specs:
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
names, slices, dtypes = zip(*restore_specs)
restore_dtypes = [tf.float32 if dtype.base_dtype==tf.float16 else dtype for dtype in dtypes]
# print info
for i in range(len(restore_specs)):
print(names[i], 'from', restore_dtypes[i], 'to', dtypes[i].base_dtype)
with tf.device("cpu:0"):
restored = io_ops.restore_v2(
filename_tensor, names, slices, restore_dtypes)
return [tf.cast(r, dt.base_dtype) for r, dt in zip(restored, dtypes)]
if __name__ == '__main__':
tf.flags.DEFINE_string("fp16_checkpoint", None, "fp16 checkpoint file")
tf.flags.DEFINE_string("init_checkpoint", None, "initial checkpoint file")
checkpoint_dtype_cast(tf.flags.FLAGS.init_checkpoint, tf.flags.FLAGS.fp16_checkpoint)
| FasterTransformer-main | examples/tensorflow/ckpt_type_convert.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import tensorflow as tf
def pad_in_time(x, padding_length):
"""Helper function to pad a tensor in the time dimension and retain the static depth dimension."""
return tf.pad(x, [[0, 0], [0, padding_length], [0, 0]])
def align_in_time(x, length):
"""Aligns the time dimension of :obj:`x` with :obj:`length`."""
time_dim = tf.shape(x)[1]
return tf.cond(
tf.less(time_dim, length),
true_fn=lambda: pad_in_time(x, length - time_dim),
false_fn=lambda: x[:, :length])
def pad_with_identity(x, sequence_length, max_sequence_length, identity_values=0, maxlen=None):
"""Pads a tensor with identity values up to :obj:`max_sequence_length`.
Args:
x: A ``tf.Tensor`` of shape ``[batch_size, time, depth]``.
sequence_length: The true sequence length of :obj:`x`.
max_sequence_length: The sequence length up to which the tensor must contain
:obj:`identity values`.
identity_values: The identity value.
maxlen: Size of the output time dimension. Default is the maximum value in
obj:`max_sequence_length`.
Returns:
A ``tf.Tensor`` of shape ``[batch_size, maxlen, depth]``.
"""
if maxlen is None:
maxlen = tf.reduce_max(max_sequence_length)
mask = tf.sequence_mask(sequence_length, maxlen=maxlen, dtype=x.dtype)
mask = tf.expand_dims(mask, axis=-1)
mask_combined = tf.sequence_mask(
max_sequence_length, maxlen=maxlen, dtype=x.dtype)
mask_combined = tf.expand_dims(mask_combined, axis=-1)
identity_mask = mask_combined * (1.0 - mask)
x = pad_in_time(x, maxlen - tf.shape(x)[1])
x = x * mask + (identity_mask * identity_values)
return x
def pad_n_with_identity(inputs, sequence_lengths, identity_values=0):
"""Pads each input tensors with identity values up to
``max(sequence_lengths)`` for each batch.
Args:
inputs: A list of ``tf.Tensor``.
sequence_lengths: A list of sequence length.
identity_values: The identity value.
Returns:
A tuple ``(padded, max_sequence_length)`` which are respectively a list of
``tf.Tensor`` where each tensor are padded with identity and the combined
sequence length.
"""
max_sequence_length = tf.reduce_max(sequence_lengths, axis=0)
maxlen = tf.reduce_max([tf.shape(x)[1] for x in inputs])
padded = [
pad_with_identity(
x, length, max_sequence_length, identity_values=identity_values, maxlen=maxlen)
for x, length in zip(inputs, sequence_lengths)]
return padded, max_sequence_length
class Reducer():
"""Base class for reducers."""
def zip_and_reduce(self, x, y):
"""Zips the :obj:`x` with :obj:`y` structures together and reduces all
elements. If the structures are nested, they will be flattened first.
Args:
x: The first structure.
y: The second structure.
Returns:
The same structure as :obj:`x` and :obj:`y` where each element from
:obj:`x` is reduced with the correspond element from :obj:`y`.
Raises:
ValueError: if the two structures are not the same.
"""
tf.nest.assert_same_structure(x, y)
x_flat = tf.nest.flatten(x)
y_flat = tf.nest.flatten(y)
reduced = list(map(self, zip(x_flat, y_flat)))
return tf.nest.pack_sequence_as(x, reduced)
def __call__(self, inputs, sequence_length=None):
"""Reduces all input elements.
Args:
inputs: A list of ``tf.Tensor``.
sequence_length: The length of each input, if reducing sequences.
Returns:
If :obj:`sequence_length` is set, a tuple
``(reduced_input, reduced_length)``, otherwise a reduced ``tf.Tensor``
only.
"""
if sequence_length is None:
return self.reduce(inputs)
else:
return self.reduce_sequence(inputs, sequence_lengths=sequence_length)
@abc.abstractmethod
def reduce(self, inputs):
"""See :meth:`opennmt.layers.Reducer.__call__`."""
raise NotImplementedError()
@abc.abstractmethod
def reduce_sequence(self, inputs, sequence_lengths):
"""See :meth:`opennmt.layers.Reducer.__call__`."""
raise NotImplementedError()
class SumReducer(Reducer):
"""A reducer that sums the inputs."""
def reduce(self, inputs):
return tf.add_n(inputs)
def reduce_sequence(self, inputs, sequence_lengths):
padded, combined_length = pad_n_with_identity(
inputs, sequence_lengths, identity_values=0)
return self.reduce(padded), combined_length
| FasterTransformer-main | examples/tensorflow/common_utils/reducer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import tensorflow as tf
import numpy as np
import ctypes
class TransformerArgument:
def __init__( self,
beam_width,
head_num,
size_per_head,
inter_size,
num_layer,
dtype=tf.float32,
kernel_init_range=0.02,
bias_init_range=0.02,
fuse_qkv=True,
remove_padding=False,
int8_mode=0,
allow_gemm_test=False,
memory_hidden_dim=-1):
'''
The arguments of Transformer layer (for both encoder and decoder).
Args:
beam_width: The beam_width size for beam search. This argument is always one for encoder.
head_num: The head number of self attention in transformer layer.
size_per_head: The size of hidden dimension for each head of self attention in transformer layer.
inter_size: The size of intermediate dimension of FFN layer.
num_layer: The number of transformer layer. For example, BERT-base uses 12 layers.
dtype: The data type of weights initializer and inputs.
kernel_init_range: The initializer range of kernel for all convolution layer and fully-connected layer.
kernel_init_range: The initializer range of bias for all convolution layer and fully-connected layer.
fuse_qkv: bool. Whether fuse the q, k, v gemm or not.
remove_padding: bool. Remove the padding of sentences of encoder.
int8_mode: Mode of int8 quantization. 0 means not using int8 quantization, 1 means using int8 quantization without quantizing residuals,
2 means using int8 quantization with quantizing residuals.
allow_gemm_test: whether allow gemm test inside FT.
'''
self.beam_width = beam_width
self.head_num = head_num
self.size_per_head = size_per_head
self.inter_size = inter_size
self.num_layer = num_layer
self.dtype = dtype
self.hidden_dim = self.head_num * self.size_per_head
self.kernel_init_range = kernel_init_range
self.bias_init_range = bias_init_range
self.int8_mode = int8_mode
self.allow_gemm_test = allow_gemm_test
if self.dtype == tf.float32:
self.check_threshold = 2e-5
elif self.dtype == tf.float16:
self.check_threshold = 2e-2
self.fuse_qkv = fuse_qkv
self.remove_padding = remove_padding
self.memory_hidden_dim = memory_hidden_dim
def create_initializer(initializer_range=0.02, data_type=tf.float32):
return tf.truncated_normal_initializer(stddev=initializer_range, dtype=data_type)
def time_test(sess, tensor, iterations=100, warmup=True):
# return in ms
# warmup
if warmup == True:
for i in range(iterations):
sess.run(tensor)
t1 = datetime.now()
for i in range(iterations):
sess.run(tensor)
t2 = datetime.now()
time_sum = (t2 - t1).total_seconds()
return time_sum * 1000 / iterations
def cross_check(name, tf_val, op_val, atol_threshold):
abs_diff = np.fabs(tf_val - op_val)
print("[INFO] {} Cross check {}".format(name, np.allclose(tf_val, op_val, atol=atol_threshold)))
print("[INFO] Max diff {}".format(abs_diff.max()))
print("[INFO] min diff {}".format(abs_diff.min()))
def int_result_cross_check(name, tf_result, op_result, shape):
print(" ")
is_same = (tf_result.flatten() == op_result.flatten()).all()
print(" {} cross-check: {}".format(name, is_same))
if is_same == False:
tf_reshaped_result = np.reshape(tf_result, shape)
op_reshaped_result = np.reshape(op_result, shape)
for i in range(tf_reshaped_result.shape[0]):
is_true = (tf_reshaped_result[i] == op_reshaped_result[i]).all()
print(" Cross-Check on batch-{} {}".format(i, is_true))
if is_true == False:
print("TF result: {}".format(tf_reshaped_result[i]))
print("OP result: {}".format(op_reshaped_result[i]))
class cudaProfiler:
def __init__(self):
self.profiler = ctypes.CDLL("libcudart.so")
def start(self):
ret = self.profiler.cudaProfilerStart()
if ret != 0:
raise Exception("cudaProfilerStart() return %d " %ret)
def stop(self):
ret = self.profiler.cudaProfilerStop()
if ret != 0:
raise Exception("cudaProfilerStop() return %d " %ret)
def print_abs_mean(node, tensor, info = ""):
return tf.Print(node, ["[INFO] {} abs mean".format(info),
tf.shape(tensor),
tf.reduce_mean(tf.abs(tensor)),
tf.reduce_sum(tf.abs(tensor))]) | FasterTransformer-main | examples/tensorflow/common_utils/common.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import abc
import tensorflow as tf
from examples.tensorflow.common_utils.reducer import SumReducer
class PositionEncoder(tf.keras.layers.Layer):
"""Base class for position encoders."""
def __init__(self, reducer=SumReducer(), **kwargs):
"""Initializes the position encoder.
Args:
reducer: A :class:`opennmt.layers.Reducer` to merge inputs and position
encodings.
**kwargs: Additional layer keyword arguments.
"""
# super(PositionEncoder, self).__init__(**kwargs)
super(PositionEncoder, self).__init__(**kwargs)
self.reducer = reducer
def call(self, inputs, position=None): # pylint: disable=arguments-differ
"""Add position encodings to :obj:`inputs`.
Args:
inputs: The inputs to encode.
position: The single position to encode, to use when this layer is called
step by step.
Returns:
A ``tf.Tensor`` whose shape depends on the configured ``reducer``.
"""
batch_size = tf.shape(inputs)[0]
timesteps = tf.shape(inputs)[1]
input_dim = inputs.get_shape().as_list()[-1] # return int
positions = tf.range(timesteps) + 1 if position is None else position
position_encoding = self._encode([positions], input_dim, dtype=inputs.dtype)
position_encoding = tf.tile(position_encoding, [batch_size, 1, 1])
return self.reducer([inputs, position_encoding])
@abc.abstractmethod
def _encode(self, positions, depth, dtype):
"""Creates position encodings.
Args:
positions: The positions to encode of shape :math:`[B, ...]`.
depth: The encoding depth :math:`D`.
Returns:
A ``tf.Tensor`` of shape :math:`[B, ..., D]`.
"""
raise NotImplementedError()
def _create_position_encoding_table(self, max_seq_len, input_dim, dtype):
positions = tf.range(max_seq_len) + 1
self.position_encoding_table = self._encode([positions], input_dim, dtype=dtype)
self.position_encoding_table = tf.squeeze(self.position_encoding_table)
return self.position_encoding_table
class SinusoidalPositionEncoder(PositionEncoder):
"""Encodes positions with sine waves as described in
https://arxiv.org/abs/1706.03762.
"""
def _encode(self, positions, depth, dtype):
if depth % 2 != 0:
raise ValueError("SinusoidalPositionEncoder expects the depth to be divisble "
"by 2 but got %d" % depth)
batch_size = tf.shape(positions)[0]
positions = tf.cast(positions, tf.float32)
log_timescale_increment = math.log(10000) / (depth / 2 - 1)
inv_timescales = tf.exp(
tf.cast(tf.range(depth / 2), dtype=tf.float32) * -log_timescale_increment)
inv_timescales = tf.reshape(
tf.tile(inv_timescales, [batch_size]), [batch_size, -1])
scaled_time = tf.expand_dims(
positions, -1) * tf.expand_dims(inv_timescales, 1)
encoding = tf.concat(
[tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
return tf.cast(encoding, dtype)
| FasterTransformer-main | examples/tensorflow/common_utils/position.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is a sample code to demonstrate how to use the TensorFlow custom op with
FasterTransformer library.
This sample code builds a DeBERTa transformer model by TensorFlow and FasterTransformer's TensorFlow custom op. Then compare the results on random inputs to verify the correctness of FasterTransformer implementation.
Note that DeBERTa FasterTransformer implementation does not include pooling layer or downstream task heads. Therefore the comparison was made on the raw hidden states from the DeBERTa encoder model.
Users are also able to use this sample code to test the average forward time of
TensorFlow and FasterTransformer.
'''
import os
import sys
import time
import argparse
import numpy as np
import tensorflow as tf
from transformers import DebertaV2Tokenizer, TFDebertaV2ForSequenceClassification
dir_path = os.path.dirname(os.path.realpath(__file__))
ROOT_DIR = dir_path + "/../../.."
sys.path.append(ROOT_DIR)
from examples.tensorflow.deberta.utils.ft_deberta import FTDebertaWeights, FTDebertaModel, FTHFDebertaModel
from examples.tensorflow.bert.utils.common import cross_check
def main(args):
model_name = args['model']
batch_size = args['batch_size']
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name)
# Model setup - Huggingface TensorFlow
model_tf = TFDebertaV2ForSequenceClassification.from_pretrained(model_name)
# Model setup - FasterTransformer
lib_path = os.path.join(ROOT_DIR, './build/lib/libtf_deberta.so')
ft_deberta_weight = FTDebertaWeights(model_tf.config, tensor_para_size=1, pipeline_para_size=1)
ft_deberta_weight.load_from_model(model_tf)
ft_deberta = FTDebertaModel(lib_path, ft_deberta_weight)
# Random input
random_sentences = tokenizer.batch_decode([np.random.randint(1, model_tf.config.vocab_size, size=np.random.randint(
1, model_tf.config.max_position_embeddings)) for _ in range(batch_size)])
inputs = tokenizer(random_sentences, padding=True, return_tensors="tf")
# Inference and simple timing
measurement_iters = 10
tf_latencies = []
ft_latencies = []
# TF E2E
for _ in range(measurement_iters):
start_time = time.time()
output_tf = model_tf(**inputs)
end_time = time.time()
tf_latencies.append(end_time - start_time)
tf_p50 = np.percentile(tf_latencies, 50)
tf_p99 = np.percentile(tf_latencies, 99)
logits_tf = output_tf.logits
# print("TF results: ", logits_tf)
# predicted_class_id = int(tf.math.argmax(logits_tf, axis=-1)[0])
# print(model.config.id2label[predicted_class_id])
# FT E2E
# trick to wrap FT inside HF by replacing TF layer, see ft_deberta.py
model_tf.deberta = FTHFDebertaModel(ft_deberta, remove_padding=True)
# w/ padding removal by default i.e., Effective Transformer
for _ in range(measurement_iters):
start_time = time.time()
output_ft = model_tf(**inputs)
end_time = time.time()
ft_latencies.append(end_time - start_time)
ft_p50 = np.percentile(ft_latencies, 50)
ft_p99 = np.percentile(ft_latencies, 99)
logits_ft = output_ft.logits
# print("FT results: ", logits_ft)
print(f"TF p50: {tf_p50*1000:.2f} ms, p99: {tf_p99*1000:.2f} ms ")
print(f"FT p50: {ft_p50*1000:.2f} ms, p99: {ft_p99*1000:.2f} ms ")
# Correctness check
atol_threshold = 3e-3
cross_check("TF v.s. FT", logits_tf, logits_ft, atol_threshold)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-model', '--model', type=str, default="microsoft/deberta-v3-base", metavar='STRING',
help='DeBERTa-V3 model variants. Note DeBERTa-V2 and -V1 variants are both slightly different from V3, thus not supported in the current example yet')
# not tested for the moment and not supported
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='NUMBER',
help='size of tensor parallelism (default: 1). This feature hasn\'t been tested.')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='NUMBER',
help='size of pipeline parallelism (default: 1). This feature hasn\'t been tested.')
args = parser.parse_args()
main(vars(args)) | FasterTransformer-main | examples/tensorflow/deberta/deberta_example.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import math
import os
from collections import namedtuple
class FTDebertaWeights(object):
def __init__(
self,
config,
tensor_para_size=1,
pipeline_para_size=1
):
self.num_heads = config.num_attention_heads
self.head_size = config.hidden_size / config.num_attention_heads
self.max_relative_positions = config.max_position_embeddings
self.relative_position_buckets = config.position_buckets
self.inter_size = config.intermediate_size
self.num_layer = config.num_hidden_layers
self.config = config
self.tensor_para_size = tensor_para_size
self.pipeline_para_rank = 0 # no mpi for the moment
self.pipeline_para_size = pipeline_para_size
self.activation_type = config.hidden_act
self.weights = None
self.q_scaling = np.sqrt(3)
assert tensor_para_size == 1, "This op only supports TP = 1 now."
assert pipeline_para_size == 1, "This op only supports PP = 1 now."
def load_from_model(self, model):
"""
Routine to load DeBERTa weights from a HuggingFace model. This assumes the latest DeBERT-v2 architecture and DeBERTa-v3 weights.
"""
start_layer = self.pipeline_para_rank * self.num_layer // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.num_layer // self.pipeline_para_size
weight_data_type = {'float32': tf.float32, 'float16': tf.float16}[model.dtype]
variables_dict = {}
for var in model.variables:
variables_dict[var.name] = var.numpy()
var_prefix_model = model.name + '/deberta/' # model-level weights
var_prefix_layer = model.name + '/deberta/encoder/' # layer-level weights
# model-level weight loading
word_embedding_table = variables_dict.get(var_prefix_model + "embeddings/word_embeddings/weight:0")
word_embedding_layernorm_gamma = variables_dict.get(var_prefix_model + "embeddings/LayerNorm/gamma:0")
word_embedding_layernorm_beta = variables_dict.get(var_prefix_model + "embeddings/LayerNorm/beta:0")
relative_embedding_table = variables_dict.get(var_prefix_model + "encoder/rel_embeddings.weight:0")
relative_embedding_layernorm_gamma = variables_dict.get(var_prefix_model + "encoder/LayerNorm/gamma:0")
relative_embedding_layernorm_beta = variables_dict.get(var_prefix_model + "encoder/LayerNorm/beta:0")
# layer-level weight loading
attn_q_kernel = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/self/query_proj/kernel:0") for i in range(start_layer, end_layer)]
attn_q_bias = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/self/query_proj/bias:0") for i in range(start_layer, end_layer)]
attn_k_kernel = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/self/key_proj/kernel:0") for i in range(start_layer, end_layer)]
attn_k_bias = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/self/key_proj/bias:0") for i in range(start_layer, end_layer)]
attn_v_kernel = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/self/value_proj/kernel:0") for i in range(start_layer, end_layer)]
attn_v_bias = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/self/value_proj/bias:0") for i in range(start_layer, end_layer)]
attn_output_kernel = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/output/dense/kernel:0") for i in range(start_layer, end_layer)]
attn_output_bias = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/output/dense/bias:0") for i in range(start_layer, end_layer)]
attn_output_layernorm_gamma = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/output/LayerNorm/gamma:0") for i in range(start_layer, end_layer)]
attn_output_layernorm_beta = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/attention/output/LayerNorm/beta:0") for i in range(start_layer, end_layer)]
inter_kernel = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/intermediate/dense/kernel:0") for i in range(start_layer, end_layer)]
inter_bias = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/intermediate/dense/bias:0") for i in range(start_layer, end_layer)]
output_kernel = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/output/dense/kernel:0") for i in range(start_layer, end_layer)]
output_bias = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/output/dense/bias:0") for i in range(start_layer, end_layer)]
output_layernorm_gamma = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/output/LayerNorm/gamma:0") for i in range(start_layer, end_layer)]
output_layernorm_beta = [variables_dict.get(
var_prefix_layer + f"layer_._{i}/output/LayerNorm/beta:0") for i in range(start_layer, end_layer)]
# pack the arguments into a tuple that mirrors the TF custom OP input
weights = [
word_embedding_table,
word_embedding_layernorm_gamma,
word_embedding_layernorm_beta,
relative_embedding_table,
relative_embedding_layernorm_gamma,
relative_embedding_layernorm_beta,
attn_q_kernel,
attn_q_bias,
attn_k_kernel,
attn_k_bias,
attn_v_kernel,
attn_v_bias,
attn_output_kernel,
attn_output_bias,
attn_output_layernorm_gamma,
attn_output_layernorm_beta,
inter_kernel,
inter_bias,
output_kernel,
output_bias,
output_layernorm_gamma,
output_layernorm_beta
]
# clean up if there is None. Note - we cannot use np.array([0]) as TF won't accept empty tensors
for i in range(0, len(weights)):
if weights[i] is None:
weights[i] = tf.constant([0], dtype=weight_data_type)
elif type(weights[i]) is list:
weights[i] = [tf.constant([0], dtype=weight_data_type) if w is None else tf.convert_to_tensor(
w, dtype=weight_data_type) for w in weights[i]]
else:
weights[i] = tf.convert_to_tensor(weights[i], dtype=weight_data_type)
self.weights = tuple(weights)
class FTDebertaModel():
def __init__(self, lib_path, params):
self.transformer_op_module = tf.load_op_library(lib_path)
self.params = params
def __call__(self, input_ids, seq_len, remove_padding=True):
return self.forward(input_ids, seq_len, remove_padding=remove_padding)
def forward(self, input_ids, seq_len, remove_padding=True):
outputs = self.transformer_op_module.deberta(input_ids,
seq_len,
*self.params.weights,
head_num=self.params.num_heads,
size_per_head=self.params.head_size,
max_relative_positions=self.params.max_relative_positions,
relative_position_buckets=self.params.relative_position_buckets,
inter_size=self.params.inter_size,
num_layer=self.params.num_layer,
remove_padding=remove_padding,
q_scaling=self.params.q_scaling)
return outputs
class FTHFDebertaModel():
def __init__(self, ft_model, remove_padding=True):
self.model = ft_model
self.remove_padding = remove_padding
def __call__(self, input_ids, attention_mask, **kwargs):
seq_len = tf.reduce_sum(attention_mask, axis=1)
outputs = self.model.forward(input_ids, seq_len, remove_padding=self.remove_padding)
# to match HF structure
FTOutput = namedtuple("FTOutput", ["output", "hidden_states", "attentions"])
o = FTOutput(outputs, None, None)
return o
| FasterTransformer-main | examples/tensorflow/deberta/utils/ft_deberta.py |
#!/usr/bin/env python3
# Modified MIT License
# Software Copyright (c) 2019 OpenAI
# We don’t claim ownership of the content you create with GPT-2, so it is yours to do with as you please.
# We only ask that you use GPT-2 responsibly and clearly indicate your content was created using GPT-2.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# The above copyright notice and this permission notice need not be included
# with content created by the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fire
import json
import os
import numpy as np
import sys
import tensorflow as tf
from tensorflow.contrib.training import HParams
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
import examples.tensorflow.gpt.utils.gpt_token_encoder as encoder
from examples.tensorflow.decoder.utils.common import TransformerArgument
from examples.tensorflow.decoder.utils.common import DecodingArgumentNew
from examples.tensorflow.decoder.utils.common import time_test
def sample_model(
vocab_file="../models/gpt2-vocab.json",
bpe_file="../models/gpt2-merges.txt",
model_name='124M',
nsamples=1,
batch_size=2,
length=32,
temperature=1,
top_k=4,
top_p=0.0,
models_dir='../models/openai_gpt_model',
data_type='fp32',
beam_width=1
):
"""Run the sample_model.
:model_name=124M : String, which model to use
:nsamples=0 : Number of samples to return, if 0, continues to
generate samples indefinitely.
:batch_size=1 : Number of batches (only affects speed/memory).
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=4 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
np.random.seed(1)
tf.set_random_seed(1)
if data_type == 'fp32':
tf_data_type = tf.float32
elif data_type == 'fp16':
tf_data_type = tf.float16
else:
assert(False)
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
vocab_file=os.path.join(models_dir, model_name, 'encoder.json')
bpe_file=os.path.join(models_dir, model_name, 'vocab.bpe')
enc = encoder.get_encoder(vocab_file, bpe_file)
hparams = HParams(n_vocab=0,
n_ctx=1024,
n_embd=768,
n_head=12,
n_layer=12)
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(graph=tf.Graph(), config=config) as sess:
saver = tf.train.import_meta_graph("{}/{}/model.ckpt.meta".format(models_dir, model_name))
# lengths = np.random.randint(low=1, high=4, size=batch_size)
lengths = np.ones([batch_size], dtype=np.int32) * 8 # TODO support various input lengths
# lengths = np.zeros([batch_size], dtype=np.int32) # unconditional case
max_start_length = lengths.max()
start_ids = np.ones([batch_size, max_start_length]) * enc.encoder['<|endoftext|>']
# for i in range(batch_size):
# start_ids[i][0:lengths[i]] = 198
# User can put some real start ids here, we use '\n' (198) here.
sess.run(tf.global_variables_initializer())
print("[INFO] restore the model {}/{}".format(models_dir, model_name))
saver.restore(sess, ("{}/{}/model.ckpt".format(models_dir, model_name)))
decoder_args = TransformerArgument(beam_width=beam_width,
head_num=hparams.n_head,
size_per_head=hparams.n_embd // hparams.n_head,
inter_size=hparams.n_embd * 4,
num_layer=hparams.n_layer,
dtype=tf_data_type,
kernel_init_range=0.00,
bias_init_range=0.00)
decoding_args = DecodingArgumentNew(hparams.n_vocab,
enc.encoder['<|endoftext|>'],
enc.encoder['<|endoftext|>'],
length,
0.0,
top_k,
top_p,
decoder_args)
ckpt_dict = {}
for var in tf.trainable_variables():
ckpt_dict[var.name] = var
op_output, sequence_length = ft_gpt_op(ckpt_dict,
decoding_args,
batch_size,
start_ids,
lengths)
generated = 0
while nsamples == 0 or generated < nsamples:
op_out, seq_len = sess.run([op_output, sequence_length])
for i in range(batch_size):
generated += 1
for j in range(beam_width):
print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
print(enc.decode(op_out[i][j][:seq_len[i][j]]))
def finalize(input_ids, beam_width, parent_ids, sequence_lengths, outputs, end_id, max_seq_len=None):
maximum_lengths = tf.reduce_max(tf.reshape(
sequence_lengths, [-1, beam_width]), axis=-1)
if max_seq_len != None:
array_shape = [max_seq_len, -1, beam_width]
else:
array_shape = [tf.reduce_max(maximum_lengths), -1, beam_width]
step_ids = tf.reshape(outputs, array_shape)
parent_ids = tf.reshape(parent_ids, array_shape)
# ids = tf.contrib.seq2seq.gather_tree(
# step_ids, parent_ids, maximum_lengths, end_id)
# Since we use end_id to padding, we cannot use end_id in the gather_tree
ids = tf.contrib.seq2seq.gather_tree(
step_ids, parent_ids, maximum_lengths, -1)
ids = tf.transpose(ids, perm=[1, 2, 0])
lengths = tf.not_equal(ids, end_id)
lengths = tf.cast(lengths, tf.int32)
max_input_length = tf.shape(input_ids)[-1]
input_ids = tf.reshape(input_ids, [-1, beam_width, max_input_length])
padding_lengths = tf.cast(tf.equal(input_ids, end_id), tf.int32)
padding_lengths = tf.reduce_sum(padding_lengths, axis=-1)
lengths = tf.reduce_sum(lengths, axis=-1)
lengths = lengths + padding_lengths
return ids, lengths
def ft_gpt_op(var_dict,
decoding_args,
batch_size,
input_ids,
input_lengths):
"""Run the decoding with sampling by FasterTransformer.
Args:
decoder_vars: A list of tf.Tensor. The variables for decoding. A list of model variables of TensorFlow model.
decoder_args: The arguments for decoding. The details are in the class "DecodingArgumentNew" of common.py
Outputs:
output_ids: A tf.Tensor with shape [batch_size, max(sequence_lengths)], with int type.
The results of decoding. It contains the id of token of vocabulary.
sequence_lengths: A tf.Tensor with shape [batch_size], with int type.
"""
decoder_args = decoding_args.decoder_args
gpt_op_module = tf.load_op_library(os.path.join('./lib/libtf_gpt.so'))
data_type = decoder_args.dtype
output_ids, sequence_length, cum_log_probs = gpt_op_module.gpt(
input_ids, # 0
input_lengths, # 1
[tf.cast(var_dict["model/h%d/ln_1/b:0" % l], data_type) for l in range(decoder_args.num_layer)], # 2
[tf.cast(var_dict["model/h%d/ln_1/g:0" % l], data_type) for l in range(decoder_args.num_layer)], # 3
[tf.cast(var_dict["model/h%d/attn/c_attn/w:0" % l], data_type) for l in range(decoder_args.num_layer)], # 4
[tf.cast(var_dict["model/h%d/attn/c_attn/b:0" % l], data_type) for l in range(decoder_args.num_layer)], # 5
[tf.cast(var_dict["model/h%d/attn/c_proj/w:0" % l], data_type) for l in range(decoder_args.num_layer)], # 6
[tf.cast(var_dict["model/h%d/attn/c_proj/b:0" % l], data_type) for l in range(decoder_args.num_layer)], # 7
[tf.cast(var_dict["model/h%d/ln_2/b:0" % l], data_type) for l in range(decoder_args.num_layer)], # 8
[tf.cast(var_dict["model/h%d/ln_2/g:0" % l], data_type) for l in range(decoder_args.num_layer)], # 9
[tf.cast(var_dict["model/h%d/mlp/c_fc/w:0" % l], data_type) for l in range(decoder_args.num_layer)], # 10
[tf.cast(var_dict["model/h%d/mlp/c_fc/b:0" % l], data_type)for l in range(decoder_args.num_layer)], # 11
[tf.cast(var_dict["model/h%d/mlp/c_proj/w:0" % l], data_type) for l in range(decoder_args.num_layer)], # 12
[tf.cast(var_dict["model/h%d/mlp/c_proj/b:0" % l], data_type) for l in range(decoder_args.num_layer)], # 13
tf.cast(var_dict['model/ln_f/b:0'], data_type), # 14
tf.cast(var_dict['model/ln_f/g:0'], data_type), # 15
tf.cast(var_dict['model/wpe:0'], data_type), # 16
tf.cast(var_dict['model/wte:0'], data_type), # 17
tf.cast(var_dict['model/wte:0'], data_type), # 18
max_batch_size=batch_size,
max_seq_len=decoding_args.max_seq_len,
beam_width=decoder_args.beam_width,
head_num=decoder_args.head_num,
size_per_head=decoder_args.size_per_head,
inter_size=decoder_args.inter_size,
num_layer=decoder_args.num_layer,
start_id=decoding_args.start_id,
end_id=decoding_args.end_id,
beam_search_diversity_rate=decoding_args.beam_search_diversity_rate,
top_k=decoding_args.top_k,
top_p=decoding_args.top_p,
temperature=1.0,
len_penalty=0.0,
repetition_penalty=1.0,
output_log_probs=True,
request_output_length=decoding_args.max_seq_len - input_lengths.max())
return output_ids, sequence_length
if __name__ == '__main__':
fire.Fire(sample_model)
| FasterTransformer-main | examples/tensorflow/gpt/gpt_example.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
from examples.tensorflow.gpt.utils import gpt_token_encoder as encoder
import fire
import numpy as np
def convert_token(
vocab_file="../models/gpt2-vocab.json",
bpe_file="../models/gpt2-merges.txt",
out_file="out",
max_input_length=-1
):
enc = encoder.get_encoder(vocab_file, bpe_file)
tokens_batch = np.loadtxt(out_file, dtype=np.int32)
end_id = 50256
if(tokens_batch.ndim == 1):
tokens_batch = tokens_batch.reshape([1, -1])
for batch_num, tokens in enumerate(tokens_batch):
if max_input_length > -1:
end_index = np.where(tokens[max_input_length:] == end_id)[0]
else:
end_index = []
end_pos = -1
if len(end_index) > 0:
end_pos = end_index[0]
print("[INFO] batch {}: {}".format(batch_num, enc.decode(tokens[:end_pos])))
return tokens_batch
if __name__ == "__main__":
fire.Fire(convert_token) | FasterTransformer-main | examples/tensorflow/gpt/utils/gpt_token_converter.py |
# Modified MIT License
# Software Copyright (c) 2019 OpenAI
# We don’t claim ownership of the content you create with GPT-2, so it is yours to do with as you please.
# We only ask that you use GPT-2 responsibly and clearly indicate your content was created using GPT-2.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# The above copyright notice and this permission notice need not be included
# with content created by the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import requests
from tqdm import tqdm
if len(sys.argv) != 2:
print('You must enter the model name as a parameter, e.g.: download_model.py 124M')
sys.exit(1)
model = sys.argv[1]
subdir = os.path.join('models', model)
if not os.path.exists(subdir):
os.makedirs(subdir)
subdir = subdir.replace('\\','/') # needed for Windows
for filename in ['checkpoint','encoder.json','hparams.json','model.ckpt.data-00000-of-00001', 'model.ckpt.index', 'model.ckpt.meta', 'vocab.bpe']:
r = requests.get("https://openaipublic.blob.core.windows.net/gpt-2/" + subdir + "/" + filename, stream=True)
with open(os.path.join(subdir, filename), 'wb') as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
| FasterTransformer-main | examples/tensorflow/gpt/utils/download_gpt2_model.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
import argparse
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
"""
This file converts the model of TensorFlow checkpoint to numpy array,
and store the numpy into <.bin> file. We also little modify the name.
The following are examples:
For example, the original name of variable of the 3rd transformer layers are:
model.h<layer>.ln_1.b
model.h<layer>.ln_1.g
model.h<layer>.attn.c_attn.b
model.h<layer>.attn.c_attn.w
model.h<layer>.attn.c_proj.b
model.h<layer>.attn.c_proj.w
model.h<layer>.ln_2.b
model.h<layer>.ln_2.g
model.h<layer>.mlp.c_fc.b
model.h<layer>.mlp.c_fc.w
model.h<layer>.mlp.c_proj.b
model.h<layer>.mlp.c_proj.w
and we convert them to
model.layers.3.input_layernorm.weight
model.layers.3.input_layernorm.bias
model.layers.3.attention.query_key_value.weight
model.layers.3.attention.query_key_value.bias
model.layers.3.attention.dense.weight
model.layers.3.attention.dense.bias
model.layers.3.post_attention_layernorm.weight
model.layers.3.post_attention_layernorm.bias
model.layers.3.mlp.dense_h_to_4h.weight
model.layers.3.mlp.dense_h_to_4h.bias
model.layers.3.mlp.dense_4h_to_h.weight
model.layers.3.mlp.dense_4h_to_h.bias
For other variables:
model.wpe
model.wte
model.ln_f.b
model.ln_f.g
we convert them to
model.wpe (no change)
model.wte (no change)
model.final_layernorm.weight
model.final_layernorm.bias
Note that we convert the "gamma" and "beta" of layernorm to "weight" and
"bias".
This converter would skip the variables about training. For example,
the weights come from Adam optimizers.
For multi-gpu weights, we need to split the following weights:
1. attn/c_attn/w: we need to reshape to [hidden_dim, 3, hidden_dim], split
at last axis, and then reshape to [hidden_dim, 3 * hidden_dim / gpu_num].
Namely, we split by W = [W_1, W_2, ...]
If we do not fuse QKV, we will convert from [h, 3 * h] to [3, h, h]
2. attn/c_attn/b: it is similar to attn/c_attn/w
3. attn/c_proj/w: we need to split at axis 1. Namely, we split by W = [ [W_1], [W_2] ]
4. mlp/c_fc/w: we need to split at axis 0. Namely, we split by W = [W1, W2]
5. mlp/c_fc/b: it is similar to mlp/c_fc/w
6. mlp/c_proj/w: we need to split at axis 1. Namely, we split by W = [ [W_1], [W_2] ]
Note that we do not need to split following variables:
attn/c_proj/b
mlp/c_proj/b
ln_1/g, ln_1/b
ln_2/g, ln_2/b
wte, wpe
"""
# def convert_to_bin(args):
# split the ckpt from 1 to n
def split_and_convert(args):
if args.fused_qkv == 1:
saved_dir = args.saved_dir + "/%d-gpu/" % args.gpu_num
else:
saved_dir = args.saved_dir + "/unfusedQKV-%d-gpu/" % args.gpu_num
if(os.path.exists(saved_dir) == False):
os.makedirs(saved_dir)
ckpt_name = args.in_file
gpu_num = args.gpu_num
with tf.Session() as sess:
saver = tf.train.import_meta_graph(ckpt_name + ".meta")
saver.restore(sess, (ckpt_name))
all_variables = tf.trainable_variables()
ckpt = {}
all_val = sess.run(all_variables)
for var, val in zip(all_variables, all_val):
if var.name.find("Adam") == -1:
print(var.name, var.shape)
val = np.squeeze(val)
# spilt the kernel for multi-gpu inference
saved_name = var.name.replace("model/h", "model.layers.").replace("/", ".")
if saved_name.find(".w:0") != -1:
saved_name = saved_name.replace(".w:0", ".weight")
elif saved_name.find(".b:0") != -1:
saved_name = saved_name.replace(".b:0", ".bias")
elif saved_name.find(".g:0") != -1:
saved_name = saved_name.replace(".g:0", ".weight")
elif saved_name.find(".wpe:0") != -1:
saved_name = saved_name.replace(".wpe:0", ".wpe")
elif saved_name.find(".wte:0") != -1:
saved_name = saved_name.replace(".wte:0", ".wte")
if saved_name.find("ln_1") != -1:
saved_name = saved_name.replace("ln_1", "input_layernorm")
elif saved_name.find("attn.c_attn") != -1:
saved_name = saved_name.replace("attn.c_attn", "attention.query_key_value")
elif saved_name.find("attn.c_proj") != -1:
saved_name = saved_name.replace("attn.c_proj", "attention.dense")
elif saved_name.find("ln_2") != -1:
saved_name = saved_name.replace("ln_2", "post_attention_layernorm")
elif saved_name.find("mlp.c_fc") != -1:
saved_name = saved_name.replace("mlp.c_fc", "mlp.dense_h_to_4h")
elif saved_name.find("mlp.c_proj") != -1:
saved_name = saved_name.replace("mlp.c_proj", "mlp.dense_4h_to_h")
elif saved_name.find("ln_f") != -1:
saved_name = saved_name.replace("ln_f", "final_layernorm")
if var.name.find("attn/c_attn") != -1:
val = val.reshape([-1, 3, (int)(val.shape[-1] / 3)])
if args.fused_qkv == 0:
val = val.transpose([1, 0, 2])
split_vals = np.split(val, gpu_num, axis=-1)
for i in range(gpu_num):
saved_path = saved_dir + saved_name + ".%d.bin" % i
split_vals[i].astype(np.float32).tofile(saved_path)
elif var.name.find("attn/c_proj/w") != -1:
split_vals = np.split(val, gpu_num, axis=0)
for i in range(gpu_num):
saved_path = saved_dir + saved_name + ".%d.bin" % i
split_vals[i].astype(np.float32).tofile(saved_path)
elif var.name.find("mlp/c_fc") != -1:
split_vals = np.split(val, gpu_num, axis=-1)
for i in range(gpu_num):
saved_path = saved_dir + saved_name + ".%d.bin" % i
split_vals[i].astype(np.float32).tofile(saved_path)
elif var.name.find("mlp/c_proj/w") != -1:
split_vals = np.split(val, gpu_num, axis=0)
for i in range(gpu_num):
saved_path = saved_dir + saved_name + ".%d.bin" % i
split_vals[i].astype(np.float32).tofile(saved_path)
else:
saved_path = saved_dir + saved_name + ".bin"
val.astype(np.float32).tofile(saved_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-saved_dir', '-o', type=str, help='file name of output file', required=True)
parser.add_argument('-in_file', '-i', type=str, help='file name of input checkpoint file', required=True)
parser.add_argument('-gpu_num', '-g', type=int, default=1, help='How many gpus for inference')
parser.add_argument('-fused_qkv', '-fused_qkv', type=int, default=1, help='Fuse the qkv weights or not. Default is true (1)', choices=[0, 1])
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
split_and_convert(args) | FasterTransformer-main | examples/tensorflow/gpt/utils/openai_gpt_ckpt_converter.py |
"""Byte pair encoding utilities"""
# Modified MIT License
# Software Copyright (c) 2019 OpenAI
# We don’t claim ownership of the content you create with GPT-2, so it is yours to do with as you please.
# We only ask that you use GPT-2 responsibly and clearly indicate your content was created using GPT-2.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# The above copyright notice and this permission notice need not be included
# with content created by the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import regex as re
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a significant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def get_encoder(vocab_file, bpe_file):
with open(vocab_file, 'r') as f:
encoder = json.load(f)
with open(bpe_file, 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
| FasterTransformer-main | examples/tensorflow/gpt/utils/gpt_token_encoder.py |
# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is a sample code to demonstrate how to use the TensorFlow custom op with
FasterTransformer library in encoder.
This sample code builds a BERT transformer model by TensorFlow and TensorFlow
custom op. Then compare the maximum difference of them to verify the correctness
of FasterTransformer.
Users are also able to use this sample code to test the average forward time of
TensorFlow and FasterTransformer.
'''
import argparse
import copy
import numpy as np
import tensorflow as tf
import threading
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.encoder.utils.encoder import build_sequence_mask
from examples.tensorflow.encoder.utils.encoder import ft_encoder_opennmt
from examples.tensorflow.encoder.utils.encoder import tf_encoder_opennmt
from examples.tensorflow.common_utils.common import cross_check
from examples.tensorflow.common_utils.common import time_test
from examples.tensorflow.common_utils.common import TransformerArgument
def encoder_example(args_dict):
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
np.random.seed(1)
tf.set_random_seed(1)
batch_size = args_dict['batch_size']
num_layer = args_dict['num_layer']
max_seq_len = args_dict['max_seq_len']
avg_seq_len = args_dict['avg_seq_len']
head_num = args_dict['head_number']
size_per_head = args_dict['size_per_head']
inter_size = args_dict['inter_size']
if inter_size == 0:
inter_size = head_num * size_per_head * 4
tf_datatype = tf.float32
np_datatype = np.float32
atol_threshold = 3e-5
allow_gemm_test = True if args_dict['allow_gemm_test'].lower() == "true" else False
if args_dict['data_type'] == "fp16":
tf_datatype = tf.float16
np_datatype = np.float16
atol_threshold = 3e-2
hidden_dim = head_num * size_per_head
sequence_length = np.random.randint(1, max_seq_len + 1, size=batch_size)
if avg_seq_len != -1:
# This means we use "remove_padding" and set other average sequence length
sequence_length = np.ones(batch_size) * avg_seq_len
else:
sequence_length = np.ones(batch_size) * (max_seq_len / 2)
sequence_length = sequence_length.astype(np.int32)
from_data = np.random.randn(batch_size, max_seq_len, hidden_dim)
from_tensor = tf.convert_to_tensor(from_data, dtype=tf_datatype)
attention_mask = build_sequence_mask(sequence_length, num_heads=head_num,
maximum_length=max_seq_len, dtype=tf_datatype)
encoder_args = TransformerArgument(beam_width=1,
head_num=head_num,
size_per_head=size_per_head,
inter_size=inter_size,
num_layer=num_layer,
dtype=tf_datatype,
remove_padding=False,
allow_gemm_test=allow_gemm_test)
eff_encoder_args = copy.deepcopy(encoder_args)
eff_encoder_args.remove_padding = True
with tf.variable_scope("transformer/encoder", reuse=tf.AUTO_REUSE):
tf_encoder_result = tf_encoder_opennmt(input_tensor=from_tensor,
encoder_args=encoder_args,
sequence_length=sequence_length)
encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
encoder_variables_dict = {}
for v in encoder_vars:
encoder_variables_dict[v.name] = v
op_encoder_result = ft_encoder_opennmt(inputs=from_tensor,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
eff_encoder_result = ft_encoder_opennmt(inputs=from_tensor,
encoder_args=eff_encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
'''
Because FasterTransformer skip some computation for the padding parts,
if we do not mask these parts, the cross check result would be wrong.
'''
# Prevent nan since we will skip to write the data to some position, and these positions may be dirty.
eff_encoder_result = tf.where(tf.is_nan(eff_encoder_result), tf.zeros_like(eff_encoder_result), eff_encoder_result)
tf_encoder_result = tf_encoder_result * \
tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
op_encoder_result = op_encoder_result * \
tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
eff_encoder_result = eff_encoder_result * \
tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for idx, name in enumerate(encoder_variables_dict):
print((str(idx) + " " + str(name) + " " +
str(encoder_variables_dict[name].shape)) + " " + str(encoder_variables_dict[name].dtype))
print("#################################")
tf_encoder_result_val = sess.run(tf_encoder_result)
op_encoder_result_val = sess.run(op_encoder_result)
eff_encoder_result_val = sess.run(eff_encoder_result)
cross_check("Encoder TF v.s. FT with tensor input",
tf_encoder_result_val, op_encoder_result_val, atol_threshold)
cross_check("Encoder TF v.s. EFF-FT with tensor input",
tf_encoder_result_val, eff_encoder_result_val, atol_threshold)
op_diff = abs(tf_encoder_result_val.reshape([-1]) - op_encoder_result_val.reshape([-1]))
eff_diff = abs(tf_encoder_result_val.reshape([-1]) - eff_encoder_result_val.reshape([-1]))
max_diff = max(op_diff.max(), eff_diff.max())
max_diff = op_diff.max()
ite = 50
def _cond(from_tensor):
return tf.constant(True)
def _ft_body(from_tensor):
op_encoder_result = ft_encoder_opennmt(inputs=from_tensor,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
return op_encoder_result
def _eff_body(from_tensor):
eff_encoder_result = ft_encoder_opennmt(inputs=from_tensor,
encoder_args=eff_encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
return eff_encoder_result
def _tf_body(from_tensor):
tf_encoder_result = tf_encoder_opennmt(input_tensor=from_tensor,
encoder_args=encoder_args,
sequence_length=sequence_length)
return tf_encoder_result
tf_while_tensor = tf.while_loop(_cond,
_tf_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
ft_while_tensor = tf.while_loop(_cond,
_ft_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
eff_while_tensor = tf.while_loop(_cond,
_eff_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
if args_dict['test_time'] == 1:
# Using while loop to run 'ite' times to ignore the overheads of memory copy and model preprocess.
# We use these times as the profiling results.
tf_while_time = time_test(sess, tf_while_tensor, 1) / ite # while_loop has run ite times
# time.sleep(60)
ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
# time.sleep(60)
eff_while_time = time_test(sess, eff_while_tensor, 1) / ite # while_loop has run ite times
# time.sleep(60)
ft_type = args_dict['data_type'].upper()
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer TF-while-time {:6.2f} ms ( {} iterations)".format(
batch_size, max_seq_len, args_dict['data_type'].upper(), num_layer, tf_while_time, ite))
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer FT-OP-while-time {:6.2f} ms ( {} iterations)".format(
batch_size, max_seq_len, ft_type, num_layer, ft_while_time, ite))
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer EFF-OP-while-time {:6.2f} ms ( {} iterations)".format(
batch_size, max_seq_len, ft_type, num_layer, eff_while_time, ite))
if args_dict['thread_num'] > 1:
# Multi-threading demonstration
thread_list = []
thread_num = args_dict['thread_num']
def run():
ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
print("[INFO] batch_size {} max_seq_len {} {} layer FT-OP-while-time {:6.2f} ms with {} threads".format(batch_size,
max_seq_len, num_layer, ft_while_time, thread_num))
for i in range(thread_num):
thread_list.append(threading.Thread(target=run, name="RunFT"))
for t in thread_list:
t.start()
for t in thread_list:
t.join()
return max_diff
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-batch', '--batch_size', type=int, default=4, metavar='NUMBER',
help='batch size (default: 4)')
parser.add_argument('-l', '--num_layer', type=int, default=12, metavar='NUMBER',
help='number of layers (default: 12)')
parser.add_argument('-s', '--max_seq_len', type=int, default=32, metavar='NUMBER',
help='max sequence length (default: 32)')
parser.add_argument('-n', '--head_number', type=int, default=12, metavar='NUMBER',
help='head number (default: 12)')
parser.add_argument('-size', '--size_per_head', type=int, default=64, metavar='NUMBER',
help='size per head (default: 64)')
parser.add_argument('-inter_size', '--inter_size', type=int, default=0, metavar='NUMBER',
help='inter_size (default: 0)')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-allow_gemm_test', '--allow_gemm_test', type=str, default="False", metavar='BOOL',
help='whether allow gemm test inside FT (default: False)', choices=["True", "False"])
parser.add_argument('-time', '--test_time', type=int, default=0, metavar='BOOL',
help='test the time or not. (default: False (0)), True is 1.',
choices=[0, 1])
parser.add_argument('-avg_seq', '--avg_seq_len', type=int, default=-1, metavar='NUMBER',
help='average sequence length (default: -1)')
parser.add_argument('-thread_num', '--thread_num', type=int, default=1, metavar='int',
help='Testing multithread if thread_num > 1.')
args = parser.parse_args()
encoder_example(vars(args))
| FasterTransformer-main | examples/tensorflow/encoder/encoder_example.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import six
import os
from examples.tensorflow.common_utils.position import SinusoidalPositionEncoder
def layer_norm(input_tensor, name=None):
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def build_sequence_mask(sequence_length,
num_heads=None,
maximum_length=None,
dtype=tf.float32):
"""Builds the dot product mask.
Args:
sequence_length: The sequence length.
num_heads: The number of heads.
maximum_length: Optional size of the returned time dimension. Otherwise
it is the maximum of :obj:`sequence_length`.
dtype: The type of the mask tensor.
Returns:
A broadcastable ``tf.Tensor`` of type :obj:`dtype` and shape
``[batch_size, 1, max_length, max_length]``.
"""
mask = tf.sequence_mask(sequence_length, maxlen=maximum_length, dtype=dtype) # [batch_size, maximum_length]
mask = tf.reshape(mask, [-1, 1, 1, maximum_length])
m_2 = tf.transpose(mask, [0, 1, 3, 2])
mask = mask * m_2
return mask
def tf_encoder_opennmt(input_tensor,
encoder_args,
sequence_length,
initializer_range=0.02):
'''
Run the bert transformer layer by TensorFlow.
Args:
input_tensor: A tf.Tensor with shape [batch_size, seq_len, hidden_dimension].
The inputs tensor of encoder. The rank must be 3.
encoder_args: The arguments for encoder. The details are in the class
"TransformerArgument" of common.py
sequence_length: A tf.Tensor with shape [batch_size], with tf.int type.
The sequence length of each sentence in input_tensor.
initializer_range: A float value.
The range of initializer for all weights.
Outputs:
output: A tf.Tensor with shape [batch_size, max(sequence_length), hidden_dimension].
The results of encoder.
'''
data_type = encoder_args.dtype
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_tensor *= encoder_args.hidden_dim**0.5
position_encoder = SinusoidalPositionEncoder()
input_tensor = position_encoder(input_tensor, position=tf.range(seq_length))
mask = build_sequence_mask(
sequence_length,
encoder_args.head_num,
maximum_length=tf.shape(input_tensor)[1],
dtype=data_type)
intermediate_size = encoder_args.hidden_dim * 4
if encoder_args.hidden_dim % encoder_args.head_num != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (encoder_args.hidden_dim, encoder_args.head_num))
layer_input = input_tensor
for layer_idx in range(encoder_args.num_layer):
with tf.variable_scope("layer_%d" % layer_idx, reuse=tf.AUTO_REUSE):
with tf.variable_scope("multi_head"):
normed_input = tf.cast(layer_norm(tf.cast(layer_input, tf.float32)), data_type)
queries, keys, values = tf.split(tf.layers.conv1d(normed_input, encoder_args.hidden_dim * 3, 1), 3, axis=2)
# split head
queries = tf.reshape(queries, [batch_size, seq_length, encoder_args.head_num, encoder_args.size_per_head])
queries = tf.transpose(queries, [0, 2, 1, 3])
keys = tf.reshape(keys, [batch_size, seq_length, encoder_args.head_num, encoder_args.size_per_head])
keys = tf.transpose(keys, [0, 2, 1, 3])
values = tf.reshape(values, [batch_size, seq_length, encoder_args.head_num, encoder_args.size_per_head])
values = tf.transpose(values, [0, 2, 1, 3])
queries *= (encoder_args.size_per_head)**-0.5
dot = tf.matmul(queries, keys, transpose_b=True)
if mask is not None:
dot = tf.cast(tf.cast(dot, data_type) * mask + ((1.0 - mask) * data_type.min), dot.dtype)
attn = tf.cast(tf.nn.softmax(tf.cast(dot, data_type)), dot.dtype)
context_1 = tf.matmul(attn, values)
context_1 = tf.transpose(context_1, [0, 2, 1, 3])
context_1 = tf.reshape(context_1, [batch_size, seq_length, encoder_args.hidden_dim])
attention_output = tf.layers.conv1d(context_1, encoder_args.hidden_dim, 1)
context_2 = attention_output + layer_input
with tf.variable_scope("ffn"):
normed_context_2 = tf.cast(layer_norm(tf.cast(context_2, tf.float32)), data_type)
intermediate_output = tf.layers.conv1d(normed_context_2, intermediate_size, 1, activation=tf.nn.relu)
layer_output_1 = tf.layers.conv1d(intermediate_output, encoder_args.hidden_dim, 1)
layer_output_2 = layer_output_1 + context_2
layer_input = layer_output_2
layer_input = tf.cast(layer_input, tf.float32)
output = layer_norm(layer_input, name="LayerNorm")
output = tf.cast(output, encoder_args.dtype)
return output
def get_shape_list(tensor, expected_rank=None, name=None):
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def ft_encoder_opennmt(inputs,
encoder_args,
encoder_vars_dict,
sequence_length):
'''
Run the bert transformer layer by FasterTransformer.
Args:
inputs: A tf.Tensor with shape [batch_size, seq_len, hidden_dimension].
The inputs tensor of encoder. The rank must be 3.
encoder_args: The arguments for encoder. The details are in the class "TransformerArgument" of common.py
attention_mask: A tf.Tensor. The attention mask for self attention.
encoder_vars_dict: A dict of tf.Tensor or numpy array.
The variables for encoder. They can be either some tensor or some numpy array.
The key is the name of the tensor, like 'layer_0/attention/self/query/kernel:0'.
The value is the corresponding tensor or numpy array
sequence_length: A tf.Tensor or numpy array with shape [batch_size].
The sequence length of the sentences
Outputs:
outputs: A tensor with shape [batch_size, seq_len, hidden_dimension].
The results of encoder.
'''
q_w_list = []
q_b_list = []
k_w_list = []
k_b_list = []
v_w_list = []
v_b_list = []
for i in range(encoder_args.num_layer):
q_w, k_w, v_w = tf.split(encoder_vars_dict['transformer/encoder/layer_%d/multi_head/conv1d/kernel:0' % i], 3, axis=-1)
q_w_list.append(q_w)
k_w_list.append(k_w)
v_w_list.append(v_w)
q_b, k_b, v_b = tf.split(encoder_vars_dict['transformer/encoder/layer_%d/multi_head/conv1d/bias:0' % i], 3, axis=-1)
q_b_list.append(q_b)
k_b_list.append(k_b)
v_b_list.append(v_b)
input_shape = get_shape_list(inputs, expected_rank=3)
seq_length = input_shape[1]
inputs *= encoder_args.hidden_dim**0.5
position_encoder = SinusoidalPositionEncoder()
inputs = position_encoder(inputs, position=tf.range(seq_length))
transformer_op_module = tf.load_op_library(os.path.join('./lib/libtf_encoder.so'))
tf_datatype = inputs.dtype
outputs = transformer_op_module.encoder(
inputs,
inputs,
sequence_length,
[tf.cast(encoder_vars_dict['transformer/encoder/layer_%d/multi_head/LayerNorm/beta:0' % id], tf_datatype) for id in range(encoder_args.num_layer)],
[tf.cast(encoder_vars_dict['transformer/encoder/layer_%d/multi_head/LayerNorm/gamma:0' % id], tf_datatype) for id in range(encoder_args.num_layer)],
q_w_list, q_b_list,
k_w_list, k_b_list,
v_w_list, v_b_list,
[encoder_vars_dict['transformer/encoder/layer_%d/multi_head/conv1d_1/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['transformer/encoder/layer_%d/multi_head/conv1d_1/bias:0' % id] for id in range(encoder_args.num_layer)],
[tf.cast(encoder_vars_dict['transformer/encoder/layer_%d/ffn/LayerNorm/beta:0' % id], tf_datatype) for id in range(encoder_args.num_layer)],
[tf.cast(encoder_vars_dict['transformer/encoder/layer_%d/ffn/LayerNorm/gamma:0' % id], tf_datatype) for id in range(encoder_args.num_layer)],
[encoder_vars_dict['transformer/encoder/layer_%d/ffn/conv1d/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['transformer/encoder/layer_%d/ffn/conv1d/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['transformer/encoder/layer_%d/ffn/conv1d_1/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['transformer/encoder/layer_%d/ffn/conv1d_1/bias:0' % id] for id in range(encoder_args.num_layer)],
tf.cast(encoder_vars_dict['transformer/encoder/LayerNorm/beta:0'], tf_datatype),
tf.cast(encoder_vars_dict['transformer/encoder/LayerNorm/gamma:0'], tf_datatype),
head_num = encoder_args.head_num, size_per_head = encoder_args.size_per_head,
inter_size = encoder_args.inter_size,
num_layer = encoder_args.num_layer, remove_padding=encoder_args.remove_padding,
q_scaling = 1.0)
return outputs | FasterTransformer-main | examples/tensorflow/encoder/utils/encoder.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is a sample code to demonstrate how to replace the decoder transformer
layer of TensorFlow by the decoder of FasterTransformer.
This sample code builds a decoding model by TensorFlow, and user can replace
the decoder transformer layer of model by the decoder of FasterTransformer.
The other parts, including the embedding lookup, position encoder and beam
search, are still computed by TensorFlow.
Namely, the baseline model is like:
embedding-lookup -> position encoding -> TensorFlow decoder -> beam search
User can build this model by using "-decoder 0" to set the decoder type
and the new model is like:
embedding-lookup -> position encoding -> FasterTransformer decoder -> beam search
User can build this model by using "-decoder 1" to set the decoder type
If user wants to verify the correctness of decoder, they can use "-decoder 2",
which will run the both TensorFlow decoder and FasterTransformer in one model,
and compare their difference.
Users are also able to use this sample code to test the average forward time of
TensorFlow and FasterTransformer.
'''
from __future__ import print_function
import numpy as np
import tensorflow as tf
import argparse
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.decoder.utils.common import TransformerArgument
from examples.tensorflow.decoder.utils.common import time_test
from examples.tensorflow.decoder.utils.common import DecodingBeamsearchArgument
from examples.tensorflow.decoder.utils.decoding import tf_beamsearch_decoding
from examples.tensorflow.decoder.utils.decoding import generate_encoder_result
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=5, metavar='NUMBER',
help='max sequence length (default: 5)')
parser.add_argument('-n', '--head_number', type=int, default=8, metavar='NUMBER',
help='head number (default: 8)')
parser.add_argument('-size', '--size_per_head', type=int, default=64, metavar='NUMBER',
help='size per head (default: 64)')
parser.add_argument('-inter_size', '--inter_size', type=int, default=0, metavar='NUMBER',
help='inter_size (default: 0)')
parser.add_argument('-l', '--num_layer', type=int, default=6, metavar='NUMBER',
help='number of layers (default: 6)')
parser.add_argument('-mem_hidden', '--memory_hidden_dim', type=int, default=768, metavar='NUMBER',
help='memory hidden dimension (default: 768)')
parser.add_argument('-v', '--vocab_size', type=int, default=30000, metavar='BOOL',
help='vocabulary size. (default: 30000).')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-time', '--test_time', type=int, default=0, metavar='BOOL',
help='test the time or not. (default: False (0)), True is 1.',
choices=[0, 1])
parser.add_argument('-decoder', '--decoder_type', type=int, default=2, metavar='NUMBER',
help='''
Decoder type:
type 0: only run tf decoder;
type 1: only run op decoder;
type 2: run both tf and op decoder, and compare the difference.
default: type 2 ''', choices=[0, 1, 2])
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
start_of_sentence_id = 1
end_of_sentence_id = 2
np.random.seed(1)
tf.set_random_seed(1)
kernel_initializer_range = 0.02
bias_initializer_range = 0.02
batch_size = args.batch_size
beam_width = args.beam_width
max_seq_len = args.max_seq_len
head_num = args.head_number
size_per_head = args.size_per_head
inter_size = args.inter_size
if inter_size == 0:
inter_size = head_num * size_per_head * 4
num_layer = args.num_layer
hidden_dim = head_num * size_per_head
memory_hidden_dim = args.memory_hidden_dim
vocab_size = args.vocab_size
tf_datatype = tf.float32
np_datatype = np.float32
if args.data_type == "fp16":
tf_datatype = tf.float16
np_datatype = np.float16
elif args.data_type == "bf16":
tf_datatype = tf.bfloat16 ## numpy doesn't support bfloat16, fallback to float32
decoder_args = TransformerArgument(beam_width=beam_width,
head_num=head_num,
size_per_head=size_per_head,
inter_size=inter_size,
num_layer=num_layer,
dtype=tf_datatype,
kernel_init_range=kernel_initializer_range,
bias_init_range=bias_initializer_range,
fuse_qkv=True,
memory_hidden_dim=memory_hidden_dim)
decoding_args = DecodingBeamsearchArgument(vocab_size,
start_of_sentence_id,
end_of_sentence_id,
max_seq_len,
decoder_args,
0.0)
embedding_table = np.random.randn(vocab_size, hidden_dim).astype(np_datatype) * 0.01 # a [vocab_size, hidden_dim] table
embedding_table = tf.convert_to_tensor(embedding_table, dtype = tf_datatype)
memory, memory_sequence_length = generate_encoder_result(
batch_size, max_seq_len, memory_hidden_dim, tf_datatype)
finalized_tf_output_ids, finalized_tf_sequence_lengths, _, _, _ = tf_beamsearch_decoding(memory,
memory_sequence_length,
embedding_table,
decoding_args,
decoder_type=args.decoder_type)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
sess.run(finalized_tf_output_ids)
if args.test_time == 1:
time_cost = time_test(sess, finalized_tf_output_ids, iterations=10)
types = ["TF-decoding-beamsearch", "FT-OP-decoder", "TF+FT-OP"]
print("[INFO] batch_size {} beam_width {} head_num {} size_per_head {} seq_len {} " \
"decoder_layers {} vocab_size {} {}-time {:6.2f} ms.".format(batch_size, beam_width, head_num, size_per_head,
max_seq_len, num_layer, vocab_size, types[args.decoder_type], time_cost))
| FasterTransformer-main | examples/tensorflow/decoder/decoder_example.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tensorflow as tf
from examples.tensorflow.decoder.utils.common import create_initializer
USE_CACHE_BATCH_MAJOR_ATTENTION = True
def get_op_cache_config(size_per_head, dtype):
x = 4 if dtype == tf.float32 else 8
use_batch_major_op_cache = True if USE_CACHE_BATCH_MAJOR_ATTENTION == True and \
size_per_head % x == 0 \
else False
return use_batch_major_op_cache, x
def norm(inputs):
"""Layer normalizes :obj:`inputs`."""
return tf.contrib.layers.layer_norm(inputs, begin_norm_axis=-1)
def split_heads(inputs, num_heads):
"""Splits a tensor in depth.
Args:
inputs: A ``tf.Tensor`` of shape :math:`[B, T, D]`.
num_heads: The number of heads :math:`H`.
Returns:
A ``tf.Tensor`` of shape :math:`[B, H, T, D / H]`.
"""
static_shape = inputs.get_shape().as_list()
depth = static_shape[-1]
outputs = tf.reshape(
inputs, [tf.shape(inputs)[0], tf.shape(inputs)[1], num_heads, depth // num_heads])
outputs = tf.transpose(outputs, perm=[0, 2, 1, 3])
return outputs
def build_sequence_mask(sequence_length,
num_heads=None,
maximum_length=None,
data_type=tf.float32):
"""Builds the dot product mask.
Args:
sequence_length: The sequence length.
num_heads: The number of heads.
maximum_length: Optional size of the returned time dimension. Otherwise
it is the maximum of :obj:`sequence_length`.
dtype: The type of the mask tensor.
Returns:
A broadcastable ``tf.Tensor`` of type :obj:`dtype` and shape
``[batch_size, 1, 1, max_length]``.
"""
mask = tf.sequence_mask(
sequence_length, maxlen=maximum_length, dtype=data_type)
mask = tf.expand_dims(mask, axis=1)
if num_heads is not None:
mask = tf.expand_dims(mask, axis=1)
return mask
def tf_decoder(decoder_args,
inputs,
memory,
memory_sequence_length,
step,
cache=None):
'''
Run the decoder transformer layer by TensorFlow.
Args:
decoder_args: The arguments for decoder. The details are in the class "TransformerArgument" of common.py
inputs: A tf.Tensor with shape [batch_size * beam_width, 1, hidden_dimension].
The inputs tensor of encoder. The rank must be 3.
memory: A tf.tensor with shape [batch_size * beam_width, max(memory_sequence_length), encoder_hidden_dimension].
The results of encoder transformer layer. The rank must be 3.
Note that it must be extended by beam_width times
memory_sequence_length: A tf.Tensor with shape [batch_size * beam_width], type tf.int.
The length of each sentence of results of encoder.
Note that it must be extended by beam_width times
step: A tf.Tensor with tf.int type. The current step in the translation process.
cache: A dict. The cache space to store the keys and values of attention layers.
Outputs:
outputs: A tf.Tensor with shape [batch_size * beam_width, 1, hidden_dimension].
The results of decoder.
'''
k_init_range = decoder_args.kernel_init_range
b_init_range = decoder_args.bias_init_range
data_type = decoder_args.dtype
fuse_qkv = decoder_args.fuse_qkv
hidden_dim = decoder_args.hidden_dim
memory_mask = None # has something
if memory is not None and not tf.contrib.framework.nest.is_sequence(memory):
memory = (memory,)
if memory_sequence_length is not None:
if not tf.contrib.framework.nest.is_sequence(memory_sequence_length):
memory_sequence_length = (memory_sequence_length,)
memory_mask = [
build_sequence_mask(
length, num_heads=decoder_args.head_num, maximum_length=tf.shape(m)[1], data_type=data_type)
for m, length in zip(memory, memory_sequence_length)]
for l in range(decoder_args.num_layer):
layer_name = "layer_{}".format(l)
layer_cache = cache[layer_name] if cache is not None else None
with tf.variable_scope(layer_name):
with tf.variable_scope("masked_multi_head"):
norm_inputs = norm(inputs)
if fuse_qkv == True:
queries, keys, values = tf.split( tf.layers.conv1d(norm_inputs, decoder_args.hidden_dim * 3, 1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type)), 3, axis=2)
else:
'''
This progress wants to prevent a addictional tf.concat to concat the q, k, v kernels for decoder op
because the concat bring large overhead for small batch size.
'''
queries = tf.layers.conv1d(norm_inputs, decoder_args.hidden_dim, 1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type))
keys = tf.layers.conv1d(norm_inputs, decoder_args.hidden_dim, 1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type),
name="key")
values = tf.layers.conv1d(norm_inputs, decoder_args.hidden_dim, 1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type),
name="value")
keys = tf.reshape(keys, [tf.shape(keys)[0], 1, decoder_args.head_num, decoder_args.size_per_head])
keys = tf.transpose(keys, [0, 2, 1, 3])
values = tf.reshape(values, [tf.shape(values)[0], 1, decoder_args.head_num, decoder_args.size_per_head])
values = tf.transpose(values, [0, 2, 1, 3])
keys = tf.concat([layer_cache["self_keys"], keys], axis=2)
values = tf.concat([layer_cache["self_values"], values], axis=2)
layer_cache["self_keys"] = keys
layer_cache["self_values"] = values
queries = tf.reshape(queries, [tf.shape(queries)[0], 1, decoder_args.head_num, decoder_args.size_per_head])
queries = tf.transpose(queries, [0, 2, 1, 3])
queries *= (decoder_args.size_per_head)**-0.5
dot = tf.matmul(queries, keys, transpose_b=True)
attn = tf.cast(tf.nn.softmax(tf.cast(dot, data_type)), dot.dtype)
context = tf.matmul(attn, values)
context = tf.transpose(context, [0, 2, 1, 3])
context = tf.reshape(context, [tf.shape(context)[0], 1, decoder_args.head_num * decoder_args.size_per_head])
outputs = tf.layers.conv1d(context,
decoder_args.hidden_dim,
1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type))
# drop_and_add
input_dim = inputs.get_shape().as_list()[-1]
output_dim = outputs.get_shape().as_list()[-1]
if input_dim == output_dim:
outputs += inputs
last_context = outputs
if memory is not None:
for i, (mem, mask) in enumerate(zip(memory, memory_mask)):
memory_cache = layer_cache["memory"][i] if layer_cache is not None else None
with tf.variable_scope("multi_head" if i == 0 else "multi_head_%d" % i):
queries = tf.layers.conv1d(
norm(last_context),
decoder_args.hidden_dim,
1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type))
def _project_and_split():
if fuse_qkv == True:
keys, values = tf.split( tf.layers.conv1d(mem, decoder_args.hidden_dim * 2, 1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type)), 2, axis=2)
else:
keys = tf.layers.conv1d(mem, decoder_args.hidden_dim, 1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type))
values = tf.layers.conv1d(mem, decoder_args.hidden_dim, 1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type),
name="value")
keys = tf.reshape(keys, [tf.shape(keys)[0], tf.shape(keys)[1],
decoder_args.head_num, decoder_args.size_per_head])
keys = tf.transpose(keys, [0, 2, 1, 3])
values = tf.reshape(values, [tf.shape(values)[0], tf.shape(values)[1],
decoder_args.head_num, decoder_args.size_per_head])
values = tf.transpose(values, [0, 2, 1, 3])
return keys, values
keys, values = tf.cond(
tf.equal(
tf.shape(memory_cache["memory_keys"])[2], 0),
true_fn=_project_and_split,
false_fn=lambda: (memory_cache["memory_keys"], memory_cache["memory_values"]))
memory_cache["memory_keys"] = keys
memory_cache["memory_values"] = values
queries = tf.reshape(queries, [tf.shape(queries)[0], 1,decoder_args.head_num, decoder_args.size_per_head])
queries = tf.transpose(queries, [0, 2, 1, 3])
queries *= (decoder_args.size_per_head)**-0.5
dot = tf.matmul(queries, keys, transpose_b=True)
dot = tf.cast(tf.cast(dot, data_type) * mask +
((1.0 - mask) * data_type.min), dot.dtype)
attn = tf.cast(tf.nn.softmax(
tf.cast(dot, data_type)), dot.dtype)
context = tf.matmul(attn, values)
context = tf.transpose(context, [0, 2, 1, 3])
context = tf.reshape(context, [tf.shape(context)[0], 1,
decoder_args.head_num * decoder_args.size_per_head])
context = tf.layers.conv1d(context,
decoder_args.hidden_dim,
1,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type))
# drop_and_add
input_dim = last_context.get_shape().as_list()[-1]
output_dim = context.get_shape().as_list()[-1]
if input_dim == output_dim:
context += last_context
with tf.variable_scope("ffn"):
# forward
normed_last_context = norm(context)
input_dim = normed_last_context.get_shape().as_list()[-1]
inner = tf.layers.conv1d(normed_last_context,
decoder_args.inter_size,
1,
activation=tf.nn.relu,
use_bias=True,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type))
transformed = tf.layers.conv1d(inner,
input_dim,
1,
use_bias=True,
bias_initializer=create_initializer(b_init_range, data_type),
kernel_initializer=create_initializer(k_init_range, data_type))
# drop_and_add
input_dim = context.get_shape().as_list()[-1]
output_dim = transformed.get_shape().as_list()[-1]
if input_dim == output_dim:
transformed += context
inputs = transformed
outputs = inputs
return outputs
def init_tf_cache(batch_size,
head_num,
size_per_head,
num_layer,
dtype,
num_sources=1):
cache = {}
for l in range(num_layer):
proj_cache_shape = [batch_size, head_num, 0, size_per_head]
layer_cache = {}
layer_cache["memory"] = [
{
"memory_keys": tf.zeros(proj_cache_shape, dtype=dtype, name="memory_keys"),
"memory_values": tf.zeros(proj_cache_shape, dtype=dtype, name="memory_values")
} for _ in range(num_sources)]
layer_cache["self_keys"] = tf.zeros(
proj_cache_shape, dtype=dtype, name="self_keys")
layer_cache["self_values"] = tf.zeros(
proj_cache_shape, dtype=dtype, name="self_values")
cache["layer_{}".format(l)] = layer_cache
return cache
def init_op_cache(decoder_args, batchxbeam, memory_max_seq_len, decoding_max_seq_len):
use_batch_major_op_cache, x = get_op_cache_config(decoder_args.size_per_head, decoder_args.dtype)
if use_batch_major_op_cache == True:
self_cache = ( tf.zeros([decoder_args.num_layer, batchxbeam, decoder_args.head_num, decoder_args.size_per_head // x,
decoding_max_seq_len, x], dtype=decoder_args.dtype, name="op_self_cache_keys"),
tf.zeros([decoder_args.num_layer, batchxbeam, decoder_args.head_num, decoding_max_seq_len,
decoder_args.size_per_head], dtype=decoder_args.dtype, name="op_self_cache_values") )
# use old format for now
# mem_cache = tf.zeros([decoder_args.num_layer, 2, batchxbeam, memory_max_seq_len,
# decoder_args.hidden_dim], dtype=decoder_args.dtype, name="op_memory_caches")
mem_cache = tf.zeros([2, decoder_args.num_layer, batchxbeam,
memory_max_seq_len, decoder_args.hidden_dim], dtype=decoder_args.dtype, name="op_memory_caches")
else :
# TODO(bhsueh) remove this case
self_cache = ( tf.zeros([decoder_args.num_layer, 0, batchxbeam,
decoder_args.hidden_dim], dtype=decoder_args.dtype, name="op_self_cache_keys"),
tf.zeros([decoder_args.num_layer, 0, batchxbeam,
decoder_args.hidden_dim], dtype=decoder_args.dtype, name="op_self_cache_values") )
# mem_cache = tf.zeros([decoder_args.num_layer, 2, batchxbeam,
# memory_max_seq_len, decoder_args.hidden_dim], dtype=decoder_args.dtype, name="op_memory_caches")
mem_cache = tf.zeros([2, decoder_args.num_layer, batchxbeam,
memory_max_seq_len, decoder_args.hidden_dim], dtype=decoder_args.dtype, name="op_memory_caches")
return self_cache, mem_cache
def op_decoder(inputs,
memory_tensor,
memory_sequence_length,
op_self_cache,
op_mem_cache,
psuedo_input,
var_dict,
decoder_args,
step,
sequence_lengths):
'''
Run the decoder transformer layer by FasterTransformer.
Args:
inputs: A tf.Tensor with shape [batch_size * beam_width, 1, hidden_dimension].
The inputs tensor of encoder. The rank must be 3.
memory_tensor: A tf.tensor with shape [batch_size * beam_width, max(memory_sequence_length), encoder_hidden_dimension].
The results of encoder transformer layer. The rank must be 3.
Note that it must be extended by beam_width times
memory_sequence_length: A tf.Tensor with shape [batch_size * beam_width], type tf.int.
The length of each sentence of results of encoder.
Note that it must be extended by beam_width times
op_self_cache: A tf.Tensor with shape [num_layer, 2, None, batch_size * beam_width, hidden_dimension].
The cache space to store the keys and values of first attention layer in each step.
op_mem_cache: A tf.Tensor with shape [num_layer, 2, batch_size * beam_width, max(memory_sequence_length) hidden_dimension].
The cache space to store the keys and values of second attention layer.
Since they are same in each step, it is only need to compute them in first time.
psuedo_input: A tf.Tensor or null list.
Put the decoder results of TensorFlow when running the TensorFlow decoder and FasterTransformer
decoder in one model. This prevents the race condition.
It is useless when only run the FasterTransformer decoder.
decoder_args: The arguments for decoder. The details are in the class "TransformerArgument" of common.py
var_dict: A dict of tf.Tensor or numpy array. The variables for decoder.
They can be either some tensor or some numpy array.
Outputs:
outputs: A tf.Tensor with shape [batch_size * beam_width, 1, hidden_dimension].
The results of decoder.
'''
'''
If fuse_qkv == True, this means that the computation of q, k, v in decoder are fused in one convolution.
Therefore, we need to split them and then passing into the decoder op. The split will bring additional overhead,
especially when the batch size is small because the computation time is short.
However, because most of the pretrained model on network fuse the qkv, so we fuse them as default.
'''
decoder_op_module = tf.load_op_library(os.path.join('./lib/libtf_decoder.so'))
use_batch_major_op_cache, _ = get_op_cache_config(decoder_args.size_per_head, decoder_args.dtype)
if use_batch_major_op_cache == False:
op_self_cache = tf.contrib.framework.nest.map_structure(
lambda s: tf.concat([s, tf.zeros([decoder_args.num_layer, 1,
tf.shape(memory_tensor)[0],
decoder_args.hidden_dim], dtype=decoder_args.dtype)], axis=1),
op_self_cache )
cross_key_kernel_list = []
cross_value_kernel_list = []
cross_key_bias_list = []
cross_value_bias_list = []
for l in range(decoder_args.num_layer):
layer_prefix_name = "transformer/decoder/layer_%d/" % l
cross_key_kernel, cross_value_kernel = tf.split(var_dict[layer_prefix_name + 'multi_head/conv1d_1/kernel:0'], 2, axis=-1)
cross_key_bias, cross_value_bias = tf.split(var_dict[layer_prefix_name + 'multi_head/conv1d_1/bias:0'], 2, axis=-1)
cross_key_kernel_list.append(cross_key_kernel)
cross_value_kernel_list.append(cross_value_kernel)
cross_key_bias_list.append(cross_key_bias)
cross_value_bias_list.append(cross_value_bias)
op_result, _, _, _, _ = decoder_op_module.decoder(
inputs, # 0
memory_tensor, # 1
memory_sequence_length, # 2
op_self_cache[0], # 3
op_self_cache[1], # 4
op_mem_cache[0], # 5
op_mem_cache[1], # 6
[var_dict["transformer/decoder/layer_%d/masked_multi_head/LayerNorm/beta:0" % l] for l in range(decoder_args.num_layer)], # 7
[var_dict["transformer/decoder/layer_%d/masked_multi_head/LayerNorm/gamma:0" % l] for l in range(decoder_args.num_layer)], # 8
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d/kernel:0" % l] for l in range(decoder_args.num_layer)], # 9
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d/bias:0" % l] for l in range(decoder_args.num_layer)], # 10
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d_1/kernel:0" % l] for l in range(decoder_args.num_layer)], # 11
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d_1/bias:0" % l] for l in range(decoder_args.num_layer)], # 12
[var_dict["transformer/decoder/layer_%d/multi_head/LayerNorm/beta:0" % l] for l in range(decoder_args.num_layer)], # 13
[var_dict["transformer/decoder/layer_%d/multi_head/LayerNorm/gamma:0" % l] for l in range(decoder_args.num_layer)], # 14
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d/kernel:0" % l] for l in range(decoder_args.num_layer)], # 15
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d/bias:0" % l] for l in range(decoder_args.num_layer)], # 16
cross_key_kernel_list, # 17
cross_key_bias_list, # 18
cross_value_kernel_list, # 19
cross_value_bias_list, # 20
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d_2/kernel:0" % l] for l in range(decoder_args.num_layer)], # 21
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d_2/bias:0" % l] for l in range(decoder_args.num_layer)], # 22
[var_dict["transformer/decoder/layer_%d/ffn/LayerNorm/beta:0" % l] for l in range(decoder_args.num_layer)], # 23
[var_dict["transformer/decoder/layer_%d/ffn/LayerNorm/gamma:0" % l] for l in range(decoder_args.num_layer)], # 24
[var_dict["transformer/decoder/layer_%d/ffn/conv1d/kernel:0" % l] for l in range(decoder_args.num_layer)], # 25
[var_dict["transformer/decoder/layer_%d/ffn/conv1d/bias:0" % l] for l in range(decoder_args.num_layer)], # 26
[var_dict["transformer/decoder/layer_%d/ffn/conv1d_1/kernel:0" % l] for l in range(decoder_args.num_layer)], # 27
[var_dict["transformer/decoder/layer_%d/ffn/conv1d_1/bias:0" % l] for l in range(decoder_args.num_layer)], # 28
step, # 29
sequence_lengths, # 30
psuedo_input, # 31, add tf_result as input to prevent the OP and TF from parallel execution and lead to error result
head_num=decoder_args.head_num,
size_per_head=decoder_args.size_per_head,
inter_size=decoder_args.inter_size,
num_layer=decoder_args.num_layer)
return op_result, op_self_cache, op_mem_cache
| FasterTransformer-main | examples/tensorflow/decoder/utils/decoder.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import tensorflow as tf
def pad_in_time(x, padding_length):
"""Helper function to pad a tensor in the time dimension and retain the static depth dimension."""
return tf.pad(x, [[0, 0], [0, padding_length], [0, 0]])
def align_in_time(x, length):
"""Aligns the time dimension of :obj:`x` with :obj:`length`."""
time_dim = tf.shape(x)[1]
return tf.cond(
tf.less(time_dim, length),
true_fn=lambda: pad_in_time(x, length - time_dim),
false_fn=lambda: x[:, :length])
def pad_with_identity(x, sequence_length, max_sequence_length, identity_values=0, maxlen=None):
"""Pads a tensor with identity values up to :obj:`max_sequence_length`.
Args:
x: A ``tf.Tensor`` of shape ``[batch_size, time, depth]``.
sequence_length: The true sequence length of :obj:`x`.
max_sequence_length: The sequence length up to which the tensor must contain
:obj:`identity values`.
identity_values: The identity value.
maxlen: Size of the output time dimension. Default is the maximum value in
obj:`max_sequence_length`.
Returns:
A ``tf.Tensor`` of shape ``[batch_size, maxlen, depth]``.
"""
if maxlen is None:
maxlen = tf.reduce_max(max_sequence_length)
mask = tf.sequence_mask(sequence_length, maxlen=maxlen, dtype=x.dtype)
mask = tf.expand_dims(mask, axis=-1)
mask_combined = tf.sequence_mask(
max_sequence_length, maxlen=maxlen, dtype=x.dtype)
mask_combined = tf.expand_dims(mask_combined, axis=-1)
identity_mask = mask_combined * (1.0 - mask)
x = pad_in_time(x, maxlen - tf.shape(x)[1])
x = x * mask + (identity_mask * identity_values)
return x
def pad_n_with_identity(inputs, sequence_lengths, identity_values=0):
"""Pads each input tensors with identity values up to
``max(sequence_lengths)`` for each batch.
Args:
inputs: A list of ``tf.Tensor``.
sequence_lengths: A list of sequence length.
identity_values: The identity value.
Returns:
A tuple ``(padded, max_sequence_length)`` which are respectively a list of
``tf.Tensor`` where each tensor are padded with identity and the combined
sequence length.
"""
max_sequence_length = tf.reduce_max(sequence_lengths, axis=0)
maxlen = tf.reduce_max([tf.shape(x)[1] for x in inputs])
padded = [
pad_with_identity(
x, length, max_sequence_length, identity_values=identity_values, maxlen=maxlen)
for x, length in zip(inputs, sequence_lengths)]
return padded, max_sequence_length
class Reducer():
"""Base class for reducers."""
def zip_and_reduce(self, x, y):
"""Zips the :obj:`x` with :obj:`y` structures together and reduces all
elements. If the structures are nested, they will be flattened first.
Args:
x: The first structure.
y: The second structure.
Returns:
The same structure as :obj:`x` and :obj:`y` where each element from
:obj:`x` is reduced with the correspond element from :obj:`y`.
Raises:
ValueError: if the two structures are not the same.
"""
tf.nest.assert_same_structure(x, y)
x_flat = tf.nest.flatten(x)
y_flat = tf.nest.flatten(y)
reduced = list(map(self, zip(x_flat, y_flat)))
return tf.nest.pack_sequence_as(x, reduced)
def __call__(self, inputs, sequence_length=None):
"""Reduces all input elements.
Args:
inputs: A list of ``tf.Tensor``.
sequence_length: The length of each input, if reducing sequences.
Returns:
If :obj:`sequence_length` is set, a tuple
``(reduced_input, reduced_length)``, otherwise a reduced ``tf.Tensor``
only.
"""
if sequence_length is None:
return self.reduce(inputs)
else:
return self.reduce_sequence(inputs, sequence_lengths=sequence_length)
@abc.abstractmethod
def reduce(self, inputs):
"""See :meth:`opennmt.layers.Reducer.__call__`."""
raise NotImplementedError()
@abc.abstractmethod
def reduce_sequence(self, inputs, sequence_lengths):
"""See :meth:`opennmt.layers.Reducer.__call__`."""
raise NotImplementedError()
class SumReducer(Reducer):
"""A reducer that sums the inputs."""
def reduce(self, inputs):
return tf.add_n(inputs)
def reduce_sequence(self, inputs, sequence_lengths):
padded, combined_length = pad_n_with_identity(
inputs, sequence_lengths, identity_values=0)
return self.reduce(padded), combined_length
| FasterTransformer-main | examples/tensorflow/decoder/utils/reducer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import os
import pickle
import sys
from examples.tensorflow.decoder.utils.decoder import tf_decoder
from examples.tensorflow.decoder.utils.decoder import op_decoder
from examples.tensorflow.decoder.utils.decoder import init_op_cache
from examples.tensorflow.decoder.utils.decoder import init_tf_cache
from examples.tensorflow.decoder.utils.common import create_initializer
from examples.tensorflow.decoder.utils.common import _get_shape_invariants
from examples.tensorflow.decoder.utils.position import SinusoidalPositionEncoder
from examples.tensorflow.decoder.utils.beam_search import search_word
from examples.tensorflow.decoder.utils.sampling import Sampling
def initialize_decoding_variables(decoding_args, batchxbeam):
start_ids = tf.fill([batchxbeam], decoding_args.start_id) # [batch_size * beam_width]
step = tf.constant(0, dtype=tf.int32)
# save the output ids for each step
outputs = tf.TensorArray(tf.int32, size=0, dynamic_size=True)
cache = init_tf_cache(batchxbeam,
decoding_args.decoder_args.head_num, decoding_args.decoder_args.size_per_head,
decoding_args.decoder_args.num_layer, dtype=decoding_args.decoder_args.dtype, num_sources=1)
finished = tf.zeros([batchxbeam], dtype=tf.bool) # [batch_size * beam_width], record that a sentence is finished or not
initial_log_probs = tf.cast(tf.tile([0.] + [-float("inf")] * (decoding_args.decoder_args.beam_width - 1),
[batchxbeam / decoding_args.decoder_args.beam_width]), dtype=tf.float32) # [batch_size * beam_width]
# [batch_size * beam_width], record the lengths of all sentences
sequence_lengths = tf.zeros([batchxbeam], dtype=tf.int32)
# record the beam search indices, used for rebuild the whole sentence in the final
parent_ids = tf.TensorArray(tf.int32, size=0, dynamic_size=True)
extra_vars = tuple([parent_ids, sequence_lengths])
return start_ids, step, outputs, cache, finished, initial_log_probs, sequence_lengths, extra_vars
def generate_encoder_result(batch_size,
max_seq_len,
memory_hidden_dim,
dtype):
memory_sequence_length = np.random.randint(
1, max_seq_len + 1, size=batch_size).astype(np.int32)
memory_sequence_length[np.random.randint(0, batch_size)] = max_seq_len
outter_embbeding = np.random.randn(memory_hidden_dim) * 0.01
memory = []
mem_max_seq_len = np.max(memory_sequence_length)
for i in range(batch_size):
data = np.random.randn(mem_max_seq_len, memory_hidden_dim) * 0.01
for j in range(memory_sequence_length[i], mem_max_seq_len):
data[j] = outter_embbeding
memory.append(data)
memory = np.asarray(memory)
memory = tf.convert_to_tensor(memory, dtype=dtype)
return memory, memory_sequence_length
def finalize(beam_width, parent_ids, sequence_lengths, outputs, end_id, max_seq_len=None):
maximum_lengths = tf.reduce_max(tf.reshape(
sequence_lengths, [-1, beam_width]), axis=-1)
if max_seq_len != None:
array_shape = [max_seq_len, -1, beam_width]
else:
array_shape = [tf.reduce_max(maximum_lengths), -1, beam_width]
step_ids = tf.reshape(outputs, array_shape)
parent_ids = tf.reshape(parent_ids, array_shape)
ids = tf.contrib.seq2seq.gather_tree(
step_ids, parent_ids, maximum_lengths, end_id)
ids = tf.transpose(ids, perm=[1, 2, 0])
lengths = tf.not_equal(ids, end_id)
lengths = tf.cast(lengths, tf.int32)
lengths = tf.reduce_sum(lengths, axis=-1)
return ids, lengths
def decoding_body(word_ids,
step,
memory,
memory_sequence_length,
my_cache,
op_self_cache,
op_mem_cache,
embedding_table,
decoding_args,
decoder_type,
sequence_lengths):
decoder_args = decoding_args.decoder_args
hidden_dim = decoder_args.hidden_dim
k_init_range = decoder_args.kernel_init_range
data_type = decoder_args.dtype
batchxbeam = tf.shape(word_ids)[0]
# [batch_size * beam_width, hidden_dim]
inputs = tf.nn.embedding_lookup(embedding_table, word_ids)
# [batch_size * beam_width, 1, hidden_dim]
inputs = tf.expand_dims(inputs, 1)
inputs *= hidden_dim**0.5
position_encoder = SinusoidalPositionEncoder()
if position_encoder is not None:
position_encoding_table = position_encoder._create_position_encoding_table(decoding_args.max_seq_len, hidden_dim, data_type)
position_encoding_val = position_encoding_table[step]
position_encoding_val = tf.reshape(position_encoding_val, [1, 1, -1])
position_encoding_val = tf.tile(position_encoding_val, [batchxbeam, 1, 1])
inputs = inputs + position_encoding_val
with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
tf_result = tf_decoder(decoder_args=decoder_args,
inputs=inputs,
memory=memory,
memory_sequence_length=memory_sequence_length,
step=step,
cache=my_cache)
if decoder_type != 0:
decoder_vars = tf.global_variables()
decoder_vars_start_id = 0
while decoder_vars_start_id < len(decoder_vars):
if decoder_vars[decoder_vars_start_id].name.find("transformer/decoder/layer") != -1:
break
decoder_vars_start_id += 1
decoder_vars = decoder_vars[decoder_vars_start_id:]
decoder_var_dict = {}
for v in decoder_vars:
decoder_var_dict[v.name] = v
psuedo_input = []
if decoder_type == 2:
psuedo_input = tf_result
op_result, op_self_cache, op_mem_cache = op_decoder(inputs,
memory,
memory_sequence_length,
op_self_cache,
op_mem_cache,
psuedo_input,
decoder_var_dict,
decoder_args,
step,
sequence_lengths)
result = None
if decoder_type == 0:
result = tf_result
elif decoder_type == 1:
result = op_result
elif decoder_type == 2:
result = tf_result
result_2 = op_result
flatten_result = tf.reshape(result, [-1])
flatten_result_2 = tf.reshape(result_2, [-1])
abs_diff = tf.math.abs(flatten_result - flatten_result_2)
abs_argmax = tf.math.argmax(abs_diff)
result = tf.Print(result, ["[INFO][PYTHON] step:", step,
tf.cond(abs_diff[abs_argmax] / (tf.math.abs(flatten_result[abs_argmax]) + 1e-6) < decoder_args.check_threshold,
lambda: "True", lambda: "False"),
"max abs diff: ", abs_diff[abs_argmax],
" op val: ", flatten_result_2[abs_argmax],
" tf val: ", flatten_result[abs_argmax] ])
else:
print("[TF][ERROR] decoder type is only 0 or 1 or 2.")
exit(-1)
result = tf.contrib.layers.layer_norm(result, begin_norm_axis=-1)
# [batch_size * beam_width, hidden_dim]
result = tf.squeeze(result, axis=1)
logits = tf.layers.dense(result,
decoding_args.vocab_size,
use_bias=True,
bias_initializer=create_initializer(0.0, data_type),
kernel_initializer=create_initializer(k_init_range, data_type),
activation=None)
return logits, my_cache, op_self_cache, op_mem_cache
def tf_beamsearch_decoding(memory_tensor,
memory_sequence_length,
embedding_table,
decoding_args,
decoder_type):
'''
Run the decoding with beam search by TensorFlow.
Args:
memory_tensor: A tf.tensor with shape [batch_size * beam_width, max(memory_sequence_length), encoder_hidden_dimension].
The results of encoder transformer layer. The rank must be 3.
Note that it must be extended by beam_width times.
memory_sequence_length: A tf.Tensor with shape [batch_size * beam_width], type tf.int.
The length of each sentence of results of encoder.
Note that it must be extended by beam_width times.
embedding_table: A tf.Tensor with shape [vocab_size, hidden_dimension].
The embedding table of embedding lookup for each step.
decoder_args: The arguments for decoding. The details are in the class "DecodingBeamsearchArgument" of common.py
decoder_type: A int value. Choose to using TensorFlow decoder, FasterTransformer decoder, or both.
If it is 0, then using the TensorFlow decoder only.
If it is 1, then using the FasterTransformer decoder only.
If it is 2, then using both decoder and compare their result.
Outputs:
finalized_tf_output_ids: A tf.Tensor with shape [batch_size, beam_width, max(tf_sequence_lengths)], with tf.int type.
Finalized tf_output_ids by beam search algorithm and tf_parent_ids.
finalized_tf_sequence_lengths: A tf.Tensor with shape [batch_size * beam_width], with int type.
Finalized tf_sequence_lengths by beam search algorithm and tf_parent_ids.
tf_output_ids: A tf.Tensor with shape [batch_size, beam_width, max(tf_sequence_lengths)], with tf.int type.
The results of decoding. It contains the id of token of vocabulary.
tf_parent_ids: A tf.Tensor with shape [batch_size, beam_width, max(tf_sequence_lengths)], with tf.int type.
The beam index of output ids for each step.
tf_sequence_lengths: A tf.Tensor with shape [batch_size * beam_width], with int type.
'''
decoder_args = decoding_args.decoder_args
beam_width = decoder_args.beam_width
search_method = decoding_args.search_method
with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE):
# copy memory and memory_sequence_length by beam_width times
# if memory is [a, b, c], beam_width = 3, then the result is: [a a a b b b c c c ]
extended_memory = tf.contrib.seq2seq.tile_batch(memory_tensor, multiplier=beam_width)
extended_memory_sequence_length = tf.contrib.seq2seq.tile_batch(
memory_sequence_length, multiplier=beam_width)
def _cond(word_ids, cum_log_probs, finished, step, outputs, my_cache, extra_vars, op_self_cache, op_mem_cache):
return tf.reduce_any(tf.logical_not(finished))
def _body(word_ids, cum_log_probs, finished, step, outputs, my_cache, extra_vars, op_self_cache, op_mem_cache):
logits, my_cache, op_self_cache, op_mem_cache = decoding_body(word_ids,
step,
extended_memory,
extended_memory_sequence_length,
my_cache,
op_self_cache,
op_mem_cache,
embedding_table,
decoding_args,
decoder_type,
extra_vars[1])
end_ids = tf.fill([tf.shape(logits)[0]], decoding_args.end_id) # [batch_size * beam_width]
eos_max_prob = tf.one_hot(end_ids, decoding_args.vocab_size,
on_value=decoder_args.dtype.max,
off_value=decoder_args.dtype.min) # [batch_size * beam_width, vocab_size]
# [batch_size * beam_width, vocab_size]
logits = tf.where(finished, x=eos_max_prob, y=logits)
logits = tf.cast(logits, tf.float32)
output_id, next_cum_log_probs, finished, my_cache, \
extra_vars, op_self_cache = search_word(beam_width,
decoding_args.vocab_size,
step,
logits,
cum_log_probs,
finished,
my_cache,
extra_vars,
op_self_cache,
search_method=search_method)
cum_log_probs = tf.where(finished, x=cum_log_probs, y=next_cum_log_probs)
outputs = outputs.write(step, output_id)
finished = tf.logical_or(finished, tf.equal(output_id, decoding_args.end_id))
return output_id, cum_log_probs, finished, step + 1, outputs, my_cache, extra_vars, op_self_cache, op_mem_cache
# initialization
batchxbeam = tf.shape(extended_memory)[0]
start_ids, step, outputs, tf_decoder_cache, finished, initial_log_probs, \
tf_sequence_lengths, extra_vars = initialize_decoding_variables(decoding_args, batchxbeam)
word_ids = tf.identity(start_ids, name="word_ids")
cum_log_probs = tf.identity(initial_log_probs, name="cum_log_probs")
# if use_op == False, these two caches are useless
op_self_cache, op_mem_cache = init_op_cache(decoder_args, batchxbeam, tf.reduce_max(memory_sequence_length), decoding_args.max_seq_len)
_, _, _, _, outputs, _, extra_vars, _, _ = tf.while_loop(
_cond,
_body,
loop_vars=(
word_ids,
cum_log_probs,
finished,
step,
outputs,
tf_decoder_cache,
extra_vars,
op_self_cache,
op_mem_cache
),
back_prop=False,
maximum_iterations=decoding_args.max_seq_len,
shape_invariants=(
start_ids.shape,
initial_log_probs.shape,
finished.shape,
step.shape,
tf.TensorShape(None),
tf.contrib.framework.nest.map_structure(_get_shape_invariants, tf_decoder_cache),
tf.contrib.framework.nest.map_structure(_get_shape_invariants, extra_vars),
tf.contrib.framework.nest.map_structure(_get_shape_invariants, op_self_cache),
tf.contrib.framework.nest.map_structure(_get_shape_invariants, op_mem_cache))
)
tf_parent_ids = extra_vars[0].stack()
tf_sequence_lengths = extra_vars[1]
tf_output_ids = outputs.stack()
finalized_tf_output_ids, finalized_tf_sequence_lengths = finalize(beam_width,
tf_parent_ids,
tf_sequence_lengths,
tf_output_ids,
decoding_args.end_id)
finalized_tf_output_ids = tf.cast(finalized_tf_output_ids, start_ids.dtype)
finalized_tf_sequence_lengths = tf.minimum(
finalized_tf_sequence_lengths + 1, tf.shape(finalized_tf_output_ids)[2])
return finalized_tf_output_ids, finalized_tf_sequence_lengths, tf_output_ids, tf_parent_ids, tf_sequence_lengths
def tf_sampling_decoding(memory_tensor,
memory_sequence_length,
embedding_table,
decoding_args,
decoder_type):
'''
Run the decoding with sampling by TensorFlow.
Args:
memory_tensor: A tf.tensor with shape [batch_size, max(memory_sequence_length), encoder_hidden_dimension].
The results of encoder transformer layer. The rank must be 3.
memory_sequence_length: A tf.Tensor with shape [batch_size], type tf.int.
The length of each sentence of results of encoder.
embedding_table: A tf.Tensor with shape [vocab_size, hidden_dimension].
The embedding table of embedding lookup for each step.
decoder_args: The arguments for decoding. The details are in the class "DecodingSamplingArgument" of common.py
decoder_type: A int value. Choose to using TensorFlow decoder, FasterTransformer decoder, or both.
If it is 0, then using the TensorFlow decoder only.
If it is 1, then using the FasterTransformer decoder only.
If it is 2, then using both decoder and compare their result.
Outputs:
tf_output_ids: A tf.Tensor with shape [batch_size, max(sequence_lengths)], with int type.
The results of decoding. It contains the id of token of vocabulary.
sequence_lengths: A tf.Tensor with shape [batch_size], with int type.
'''
decoder_args = decoding_args.decoder_args
with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE):
batch_size = tf.shape(memory_tensor)[0]
def _cond(word_ids, finished, step, outputs, my_cache, sequence_lengths, op_self_cache, op_mem_cache):
return tf.reduce_any(tf.logical_not(finished))
def _body(word_ids, finished, step, outputs, my_cache, sequence_lengths, op_self_cache, op_mem_cache):
logits, my_cache, op_self_cache, op_mem_cache = decoding_body(word_ids,
step,
memory_tensor,
memory_sequence_length,
my_cache,
op_self_cache,
op_mem_cache,
embedding_table,
decoding_args,
decoder_type,
sequence_lengths)
end_ids = tf.fill([batch_size],decoding_args.end_id) # [batch_size * beam_width]
eos_max_prob = tf.one_hot(end_ids, decoding_args.vocab_size,
on_value=decoder_args.dtype.max,
off_value=decoder_args.dtype.min) # [batch_size * beam_width, vocab_size]
# [batch_size, vocab_size]
logits = tf.where(finished, x=eos_max_prob, y=logits)
logits = tf.cast(logits, tf.float32)
# sampling
if decoding_args.top_k != 0:
sampling_method = Sampling("top_k")
output_id = sampling_method.sample(logits, threshold=decoding_args.top_k)
elif decoding_args.top_p != 0.0:
sampling_method = Sampling("top_p")
output_id = sampling_method.sample(logits, threshold=decoding_args.top_p)
sequence_lengths = tf.where(finished, x=sequence_lengths, y=sequence_lengths + 1)
outputs = outputs.write(step, output_id)
finished = tf.logical_or(finished, tf.equal(output_id, decoding_args.end_id))
# return output_id, cum_log_probs, finished, step + 1, outputs, my_cache, extra_vars, op_self_cache, op_mem_cache
return output_id, finished, step + 1, outputs, my_cache, sequence_lengths, op_self_cache, op_mem_cache
# initialization
start_ids, step, outputs, tf_decoder_cache, finished, _, \
_, extra_vars = initialize_decoding_variables(decoding_args, batch_size)
sequence_lengths = extra_vars[1]
word_ids = tf.identity(start_ids, name="word_ids")
# if use_op == False, these two caches are useless
op_self_cache, op_mem_cache = init_op_cache(decoder_args, batch_size, tf.reduce_max(memory_sequence_length), decoding_args.max_seq_len)
_, _, _, outputs, _, sequence_lengths, _, _ = tf.while_loop(
_cond,
_body,
loop_vars=(
word_ids,
finished,
step,
outputs,
tf_decoder_cache,
sequence_lengths,
op_self_cache,
op_mem_cache
),
back_prop=False,
maximum_iterations=decoding_args.max_seq_len,
shape_invariants=(
start_ids.shape,
finished.shape,
step.shape,
tf.TensorShape(None),
tf.contrib.framework.nest.map_structure(_get_shape_invariants, tf_decoder_cache),
tf.contrib.framework.nest.map_structure(_get_shape_invariants, sequence_lengths),
tf.contrib.framework.nest.map_structure(_get_shape_invariants, op_self_cache),
tf.contrib.framework.nest.map_structure(_get_shape_invariants, op_mem_cache))
)
tf_output_ids = outputs.stack()
tf_output_ids = tf.reshape(tf_output_ids, [-1, batch_size])
tf_output_ids = tf.transpose(tf_output_ids, [1, 0])
tf_output_ids = tf.cast(tf_output_ids, start_ids.dtype)
return tf_output_ids, sequence_lengths
| FasterTransformer-main | examples/tensorflow/decoder/utils/decoding.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def search_word(beam_width,
vocab_size,
step,
logits,
cum_log_probs,
finished,
cache,
extra_vars,
op_self_cache=None,
search_method=None):
# [batch_size * beam_width, vocab_size]
batchxbeam = tf.shape(logits)[0]
log_probs = tf.nn.log_softmax(logits)
parent_ids = extra_vars[0]
sequence_lengths = extra_vars[1]
total_probs = log_probs + tf.expand_dims(cum_log_probs, 1)
# [batch_size * beam_width, vocab_size] + [batch_size * beam_width], has to broadcast
total_probs = tf.reshape(total_probs, [-1, beam_width * vocab_size])
if search_method == None:
search_method = BeamSearch()
sample_ids = search_method.process(total_probs, beam_width, vocab_size)
word_ids = sample_ids % vocab_size # [batch_size * beam_width]
beam_ids = sample_ids // vocab_size # [batch_size * beam_width]
# [batch_size * beam_width]
beam_indices = (tf.range(batchxbeam) // beam_width) * beam_width + beam_ids
sequence_lengths = tf.where(
finished, x=sequence_lengths, y=sequence_lengths + 1)
# [batch_size * beam_width]
batch_pos = tf.range(batchxbeam) // beam_width
cum_log_probs = tf.gather_nd(total_probs, tf.stack(
[batch_pos, sample_ids], axis=-1)) # [batch_size * beam_width]
finished = tf.gather(finished, beam_indices)
sequence_lengths = tf.gather(sequence_lengths, beam_indices)
cache = tf.contrib.framework.nest.map_structure(
lambda s: tf.gather(s, beam_indices), cache)
if op_self_cache != None:
# a workaround to check if we have batch major self caches
# ideally we should pass a config parameter :
op_self_cache_keys_rank = tf.shape(op_self_cache[0]).get_shape()[0].value
axis = 1 if op_self_cache_keys_rank == 6 else 2
op_self_cache = tf.contrib.framework.nest.map_structure(
lambda s: tf.gather(s, beam_indices, axis=axis), op_self_cache)
parent_ids = parent_ids.write(step, beam_ids)
extra_vars = [parent_ids, sequence_lengths]
return word_ids, cum_log_probs, finished, cache, tuple(extra_vars), op_self_cache
class Search():
def __init__(self):
pass
def process(self, total_probs, beam_width, vocab_size):
pass
class BeamSearch(Search):
def __init__(self):
pass
def process(self, total_probs, beam_width, vocab_size):
'''
inputs:
total_probs: float tensor, [batch_size * beam_width, vocab_size]
beam_width: int scalar
outputs:
sample_ids: int tensor, [batch_size * beam_width]
'''
# [batch_size, beam_width * vocab_size], can skip in cuda
total_probs = tf.reshape(total_probs, [-1, beam_width * vocab_size])
_, sample_ids = tf.nn.top_k(total_probs, beam_width)
# [batch_size * beam_width], can skip in cuda
sample_ids = tf.reshape(sample_ids, [-1])
return sample_ids
class DiverseSiblingSearch(Search):
def __init__(self, diversity_rate):
'''
inputs:
diversity: int scalar, >= 0
if diversity_rate == 0, then it is equivalent to beam_search
'''
self.diversity_rate = diversity_rate
def process(self, total_probs, beam_width, vocab_size):
'''
inputs:
total_probs: float tensor, [batch_size * beam_width, vocab_size]
outputs:
sample_ids: int tensor, [batch_size * beam_width]
beam_ids: int tensor, [batch_size * beam_width]
1. calculate hypothese for each beam
2. Intra-sibling ordering
3. rewrite scores
4. choose top K hypothese
'''
total_probs = tf.reshape(total_probs, [-1, beam_width, vocab_size]) # [batch size, beam width, vocab size]
sibling_score = tf.cast(tf.range(1, beam_width+1), total_probs.dtype) * self.diversity_rate # [beam_width]
scores, ids = tf.nn.top_k(total_probs, beam_width) # [batch size, beam width, beam width]
scores = tf.add(scores, sibling_score) # [batch size, beam width, beam width]
scores = tf.reshape(scores, [-1, beam_width * beam_width])
ids = ids + tf.expand_dims(tf.expand_dims(tf.range(0, beam_width) * vocab_size, 0), -1)
ids = tf.reshape(ids, [-1, beam_width * beam_width])
_, final_ids = tf.nn.top_k(scores, beam_width) # [batch size, beam width]
batch_size = tf.shape(final_ids)[0]
final_ids = tf.reshape(final_ids, [-1, 1])
batch_index = tf.range(0, batch_size)
batch_index = tf.reshape(batch_index, [-1, 1])
batch_index = tf.tile(batch_index, [1, beam_width])
batch_index = tf.reshape(batch_index, [-1, 1])
index = tf.concat([batch_index, final_ids ], axis=1)
sample_ids = tf.gather_nd(ids, index)
return sample_ids
| FasterTransformer-main | examples/tensorflow/decoder/utils/beam_search.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import tensorflow as tf
import numpy as np
import ctypes
from examples.tensorflow.decoder.utils.beam_search import BeamSearch
from examples.tensorflow.decoder.utils.beam_search import DiverseSiblingSearch
class TransformerArgument:
def __init__( self,
beam_width,
head_num,
size_per_head,
inter_size,
num_layer,
dtype=tf.float32,
kernel_init_range=0.02,
bias_init_range=0.02,
fuse_qkv=True,
remove_padding=False,
int8_mode=0,
allow_gemm_test=False,
memory_hidden_dim=-1):
'''
The arguments of Transformer layer (for both encoder and decoder).
Args:
beam_width: The beam_width size for beam search. This argument is always one for encoder.
head_num: The head number of self attention in transformer layer.
size_per_head: The size of hidden dimension for each head of self attention in transformer layer.
inter_size: The size of intermediate dimension of FFN layer.
num_layer: The number of transformer layer. For example, BERT-base uses 12 layers.
dtype: The data type of weights initializer and inputs.
kernel_init_range: The initializer range of kernel for all convolution layer and fully-connected layer.
kernel_init_range: The initializer range of bias for all convolution layer and fully-connected layer.
fuse_qkv: bool. Whether fuse the q, k, v gemm or not.
remove_padding: bool. Remove the padding of sentences of encoder.
int8_mode: Mode of int8 quantization. 0 means not using int8 quantization, 1 means using int8 quantization without quantizing residuals,
2 means using int8 quantization with quantizing residuals.
allow_gemm_test: whether allow gemm test inside FT.
'''
self.beam_width = beam_width
self.head_num = head_num
self.size_per_head = size_per_head
self.inter_size = inter_size
self.num_layer = num_layer
self.dtype = dtype
self.hidden_dim = self.head_num * self.size_per_head
self.kernel_init_range = kernel_init_range
self.bias_init_range = bias_init_range
self.int8_mode = int8_mode
self.allow_gemm_test = allow_gemm_test
if self.dtype == tf.float32:
self.check_threshold = 2e-5
elif self.dtype == tf.float16:
self.check_threshold = 2e-2
self.fuse_qkv = fuse_qkv
self.remove_padding = remove_padding
self.memory_hidden_dim = memory_hidden_dim
class DecodingArgument(object):
def __init__( self,
vocab_size,
start_id,
end_id,
max_seq_len,
decoder_args):
'''
The arguments of Decoding.
Decoding is the function which contains the whole translation process.
For example, the embedding lookup, position encoding, decoder, and
beam search or sampling to choose the token.
Args:
vocab_size: The size of vocabulary of Decoding.
start_id: The id of start token in vocabulary.
end_id: The id of end token in vocabulary.
max_seq_len: The maximum length of sentence in translation.
decoder_args: The arguments of decoder layer.
'''
self.vocab_size = vocab_size
self.start_id = start_id
self.end_id = end_id
self.max_seq_len = max_seq_len
self.decoder_args = decoder_args
class DecodingBeamsearchArgument(DecodingArgument):
def __init__( self,
vocab_size,
start_id,
end_id,
max_seq_len,
decoder_args,
beam_search_diversity_rate=-0.0):
'''
The arguments of Decoding with beam search.
Most arguments are similar to DecodingArgument except the beam_search_diversity_rate.
Args:
vocab_size: The size of vocabulary of Decoding.
start_id: The id of start token in vocabulary.
end_id: The id of end token in vocabulary.
max_seq_len: The maximum length of sentence in translation.
decoder_args: The arguments of decoder layer.
beam_search_diversity_rate: The diversity rate of beam search. When it is 0,
it is equivalent to naive beam search.
'''
super(DecodingBeamsearchArgument, self).__init__(vocab_size,
start_id,
end_id,
max_seq_len,
decoder_args)
self.beam_search_diversity_rate = beam_search_diversity_rate
if abs(self.beam_search_diversity_rate) == 0.0:
self.search_method = BeamSearch()
else:
self.search_method = DiverseSiblingSearch(beam_search_diversity_rate)
class DecodingSamplingArgument(DecodingArgument):
def __init__( self,
vocab_size,
start_id,
end_id,
max_seq_len,
decoder_args,
top_k=0,
top_p=0.0):
'''
The arguments of Decoding with sampling.
Most arguments are similar to DecodingArgument except the top_k and top_p.
Args:
vocab_size: The size of vocabulary of Decoding.
start_id: The id of start token in vocabulary.
end_id: The id of end token in vocabulary.
max_seq_len: The maximum length of sentence in translation.
decoder_args: The arguments of decoder layer.
top_k: A int value. The value of k for top k sampling.
top_p: A float value. The value of p for top p sampling.
Note that top_k and top_p both are 0 in the same time is invalid.
Note that top_k and top_p both are non-zero in the same time is invalid.
If top_k is non-zero, the Decoding function will use the top k sampling.
If top_k is non-zero, the Decoding function will use the top p sampling.
'''
super(DecodingSamplingArgument, self).__init__(vocab_size,
start_id,
end_id,
max_seq_len,
decoder_args)
self.top_k = top_k
self.top_p = top_p
if self.top_k == 0 and self.top_p == 0.0:
print("[ERROR] top_k and top_p cannot both be 0.")
exit(-1)
elif self.top_k != 0 and self.top_p != 0.0:
print("[ERROR] top_k and top_p cannot both be non-zero.")
exit(-1)
class DecodingGpt2Argument(DecodingArgument):
def __init__( self,
vocab_size,
start_id,
end_id,
max_seq_len,
decoder_args,
top_k=0,
top_p=0.0,
temperature=1.0):
'''
The arguments of Decoding with sampling.
Most arguments are similar to DecodingArgument except the top_k and top_p.
Args:
vocab_size: The size of vocabulary of Decoding.
start_id: The id of start token in vocabulary.
end_id: The id of end token in vocabulary.
max_seq_len: The maximum length of sentence in translation.
decoder_args: The arguments of decoder layer.
top_k: A int value. The value of k for top k sampling.
top_p: A float value. The value of p for top p sampling.
'''
super(DecodingGpt2Argument, self).__init__(vocab_size,
start_id,
end_id,
max_seq_len,
decoder_args)
self.top_k = top_k
self.top_p = top_p
self.temperature = temperature
# Plan to use this argument for all decoding in the future
class DecodingArgumentNew(object):
def __init__( self,
vocab_size,
start_id,
end_id,
max_seq_len,
beam_search_diversity_rate,
top_k,
top_p,
decoder_args):
'''
The arguments of Decoding.
Decoding is the function which contains the whole translation process.
For example, the embedding lookup, position encoding, decoder, and
beam search or sampling to choose the token.
Args:
vocab_size: The size of vocabulary of Decoding.
start_id: The id of start token in vocabulary.
end_id: The id of end token in vocabulary.
max_seq_len: The maximum length of sentence in translation.
decoder_args: The arguments of decoder layer.
'''
self.vocab_size = vocab_size
self.start_id = start_id
self.end_id = end_id
self.max_seq_len = max_seq_len
self.decoder_args = decoder_args
self.beam_search_diversity_rate = beam_search_diversity_rate
self.top_k = top_k
self.top_p = top_p
def create_initializer(initializer_range=0.02, data_type=tf.float32):
return tf.truncated_normal_initializer(stddev=initializer_range, dtype=data_type)
def _get_shape_invariants(tensor):
"""Returns the shape of the tensor but sets middle dims to None."""
if isinstance(tensor, tf.TensorArray):
shape = None
else:
shape = tensor.shape.as_list()
for i in range(1, len(shape) - 1):
shape[i] = None
return tf.TensorShape(shape)
def time_test(sess, tensor, iterations=100, warmup=True):
# return in ms
# warmup
if warmup == True:
for i in range(iterations):
sess.run(tensor)
t1 = datetime.now()
for i in range(iterations):
sess.run(tensor)
t2 = datetime.now()
time_sum = (t2 - t1).total_seconds()
return time_sum * 1000 / iterations
def cross_check(name, tf_val, op_val, atol_threshold):
abs_diff = np.fabs(tf_val - op_val)
print("[INFO] {} Cross check {}".format(name, np.allclose(tf_val, op_val, atol=atol_threshold)))
print("[INFO] Max diff {}".format(abs_diff.max()))
print("[INFO] min diff {}".format(abs_diff.min()))
def int_result_cross_check(name, tf_result, op_result, shape):
print(" ")
is_same = (tf_result.flatten() == op_result.flatten()).all()
print(" {} cross-check: {}".format(name, is_same))
if is_same == False:
tf_reshaped_result = np.reshape(tf_result, shape)
op_reshaped_result = np.reshape(op_result, shape)
for i in range(tf_reshaped_result.shape[0]):
is_true = (tf_reshaped_result[i] == op_reshaped_result[i]).all()
print(" Cross-Check on batch-{} {}".format(i, is_true))
if is_true == False:
print("TF result: {}".format(tf_reshaped_result[i]))
print("OP result: {}".format(op_reshaped_result[i]))
class cudaProfiler:
def __init__(self):
self.profiler = ctypes.CDLL("libcudart.so")
def start(self):
ret = self.profiler.cudaProfilerStart()
if ret != 0:
raise Exception("cudaProfilerStart() return %d " %ret)
def stop(self):
ret = self.profiler.cudaProfilerStop()
if ret != 0:
raise Exception("cudaProfilerStop() return %d " %ret)
| FasterTransformer-main | examples/tensorflow/decoder/utils/common.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import abc
import tensorflow as tf
from examples.tensorflow.decoder.utils.reducer import SumReducer
class PositionEncoder(tf.keras.layers.Layer):
"""Base class for position encoders."""
def __init__(self, reducer=SumReducer(), **kwargs):
"""Initializes the position encoder.
Args:
reducer: A :class:`opennmt.layers.Reducer` to merge inputs and position
encodings.
**kwargs: Additional layer keyword arguments.
"""
# super(PositionEncoder, self).__init__(**kwargs)
super(PositionEncoder, self).__init__(**kwargs)
self.reducer = reducer
def call(self, inputs, position=None): # pylint: disable=arguments-differ
"""Add position encodings to :obj:`inputs`.
Args:
inputs: The inputs to encode.
position: The single position to encode, to use when this layer is called
step by step.
Returns:
A ``tf.Tensor`` whose shape depends on the configured ``reducer``.
"""
batch_size = tf.shape(inputs)[0]
timesteps = tf.shape(inputs)[1]
input_dim = inputs.get_shape().as_list()[-1] # return int
positions = tf.range(timesteps) + 1 if position is None else position
position_encoding = self._encode([positions], input_dim, dtype=inputs.dtype)
position_encoding = tf.tile(position_encoding, [batch_size, 1, 1])
return self.reducer([inputs, position_encoding])
@abc.abstractmethod
def _encode(self, positions, depth, dtype):
"""Creates position encodings.
Args:
positions: The positions to encode of shape :math:`[B, ...]`.
depth: The encoding depth :math:`D`.
Returns:
A ``tf.Tensor`` of shape :math:`[B, ..., D]`.
"""
raise NotImplementedError()
def _create_position_encoding_table(self, max_seq_len, input_dim, dtype):
positions = tf.range(max_seq_len) + 1
self.position_encoding_table = self._encode([positions], input_dim, dtype=dtype)
self.position_encoding_table = tf.squeeze(self.position_encoding_table)
return self.position_encoding_table
class SinusoidalPositionEncoder(PositionEncoder):
"""Encodes positions with sine waves as described in
https://arxiv.org/abs/1706.03762.
"""
def _encode(self, positions, depth, dtype):
if depth % 2 != 0:
raise ValueError("SinusoidalPositionEncoder expects the depth to be divisble "
"by 2 but got %d" % depth)
batch_size = tf.shape(positions)[0]
positions = tf.cast(positions, tf.float32)
log_timescale_increment = math.log(10000) / (depth / 2 - 1)
inv_timescales = tf.exp(
tf.cast(tf.range(depth / 2), dtype=tf.float32) * -log_timescale_increment)
inv_timescales = tf.reshape(
tf.tile(inv_timescales, [batch_size]), [batch_size, -1])
scaled_time = tf.expand_dims(
positions, -1) * tf.expand_dims(inv_timescales, 1)
encoding = tf.concat(
[tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
return tf.cast(encoding, dtype)
| FasterTransformer-main | examples/tensorflow/decoder/utils/position.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class Sampling():
def __init__(self, sample_method):
if sample_method == "top_k":
self.sample_method = self.top_k_logits
elif sample_method == "top_p":
self.sample_method = self.top_p_logits
else:
print("[ERROR] the sample method should be one of top_k and top_p")
exit(-1)
pass
def sample(self, logits, threshold, num_samples=1):
'''
inputs:
logits: [batch_size, vocab_size], the values of log logits
threshold: int when using top_k, and a probability (0~1) when using top_p
outputs:
samples: [batch_size]
'''
logits = self.sample_method(logits, threshold)
samples = tf.multinomial(logits, num_samples=num_samples, output_dtype=tf.int32)
samples = tf.reshape(samples, [-1])
return samples
def top_k_logits(self, logits, k):
if k == 0:
return logits
else:
values, _ = tf.nn.top_k(logits, k=k) # [batch size, k]
min_values = values[:, -1, tf.newaxis] #[batch size, 1]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * logits.dtype.min,
logits
)
def top_p_logits(self, logits, p):
sorted_logits = tf.sort(logits, direction='DESCENDING')
sorted_probs = tf.nn.softmax(sorted_logits)
probs_sums = tf.cumsum(sorted_probs, axis=1, exclusive=True)
logits_masked = tf.where(
probs_sums < p,
sorted_logits,
tf.ones_like(sorted_logits) * 1000
) # [batchsize, vocab]
min_logits = tf.reduce_min(logits_masked, axis=1, keepdims=True) # [batch size, 1]
return tf.where(
logits < min_logits,
tf.ones_like(logits, dtype=logits.dtype) * logits.dtype.min,
logits
)
| FasterTransformer-main | examples/tensorflow/decoder/utils/sampling.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import os
import sys
import math
import logging
from datetime import datetime
import numpy as np
import tensorflow as tf
from keras import backend as K
from transformers import PreTrainedTokenizerFast
from transformers import T5Tokenizer, TFT5ForConditionalGeneration
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.t5.utils.ft_encoder import FTT5EncoderParams, ftt5_encoder
from examples.tensorflow.t5.utils.ft_decoding import FTT5DecodingParams, ftt5_decoding, FTT5Model
LOGGER = logging.getLogger(__name__)
gemm_data_type_mapping = {"fp32":0, "fp16":1}
def bleu_score(pred, ref):
from sacrebleu import corpus_bleu
bleu = corpus_bleu(pred, [ref], force=True)
LOGGER.info(" bleu score: {:6.2f}".format(bleu.score))
LOGGER.info(" bleu counts: {}".format(bleu.counts))
LOGGER.info(" bleu totals: {}".format(bleu.totals))
LOGGER.info(" bleu precisions: {}".format(bleu.precisions))
LOGGER.info(" bleu sys_len: {}; ref_len: {}".format(bleu.sys_len, bleu.ref_len))
return bleu
def recover_bpe(src):
dst = []
for line in src:
line = line.strip().split()
if line[-1] == '</s>':
line.pop()
if line[0][0] == '▁':
s = line[0][1:]
else:
s = line[0]
for w in line[1:]:
if w[0] == '▁':
s += ' ' + w[1:]
else:
s += w
s += '\n'
dst.append(s)
return dst
class TranslationResult(object):
def __init__(self, name, frame_work):
self.name = name
self.frame_work = frame_work # FT or HF
self.file_name = name + ".txt"
self.token_list = []
self.batch_ids_list = []
self.batch_seq_len_list = []
self.batch_num = 0
self.execution_time = 0.0 # seconds
self.sentence_num = 0
self.token_num = 0
self.bleu_score = None
def translate(args_dict):
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_seq_len = args_dict['max_seq_len']
source_file = args_dict["source"]
tgt_file = args_dict["target"]
time_args = args_dict["test_time"]
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
topk = args_dict['sampling_topk']
topp = args_dict['sampling_topp']
tensor_para_size = args_dict['tensor_para_size']
pipeline_para_size = args_dict['pipeline_para_size']
max_ite = args_dict['max_iteration']
repetition_penalty = args_dict["repetition_penalty"]
temperature = args_dict["temperature"]
len_penalty = args_dict["len_penalty"]
## huggingface without bias and use relative position embedding
## relative position embedding -> 0, absolute position embedding -> 1
t5_with_bias = False
use_gated_activation = False
position_embedding_type = 0
## only huggingface model path supported
model_path = args_dict['model_path'] if args_dict['model_path'] != None else args_dict['model']
# ckpt_path = args_dict['ckpt_path']
model_type = args_dict['model_type']
## read checkpoint config if exists
ckpt_config = configparser.ConfigParser()
LOGGER.info("\n=============== Argument ===============")
for key in args_dict:
LOGGER.info("{}: {}".format(key, args_dict[key]))
LOGGER.info("========================================")
if args_dict['data_type'] == 'fp16':
K.set_floatx('float16')
elif args_dict['data_type'] == 'bf16':
raise NotImplementedError
# t5_model = t5_model ## bfloat inference not supported yet
t5_model = TFT5ForConditionalGeneration.from_pretrained(model_path)
# Currently there is no MPI support in the TF version of T5
if True:
rank = 0
tokenizer = T5Tokenizer.from_pretrained(model_path)
try:
fast_tokenizer = PreTrainedTokenizerFast.from_pretrained(model_path)
except:
fast_tokenizer = T5Tokenizer.from_pretrained(model_path)
encoder_config = t5_model.encoder.config
decoder_config = t5_model.decoder.config
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L1660
# if tie_word_embeddings == True, scale the decoder output by sequence_output = sequence_output * (self.model_dim**-0.5)
tie_word_embeddings = decoder_config.tie_word_embeddings
q_scaling = 1.0 / (math.sqrt(encoder_config.d_kv))
LOGGER.debug(f"{model_type} encoder_config: {encoder_config}")
LOGGER.debug(f"{model_type} decoder_config: {decoder_config}")
if os.path.isfile("gemm_config.in") and rank == 0:
cmd = f"rm gemm_config.in"
LOGGER.info(f"Run {cmd}")
os.system(cmd)
translation_result_list = []
if time_args.find("0") != -1:
translation_result_list.append(TranslationResult("hf-beamsearch-warmup", "HF"))
translation_result_list.append(TranslationResult("hf-beamsearch", "HF"))
if time_args.find("1") != -1:
translation_result_list.append(TranslationResult("ft-beamsearch-warmup", "FT"))
translation_result_list.append(TranslationResult("ft-beamsearch", "FT"))
if rank == 0:
data_type = gemm_data_type_mapping[args_dict['data_type']]
cmd = f"./bin/t5_gemm {math.ceil(batch_size / pipeline_para_size)} {beam_size} {max_seq_len} " \
f"{encoder_config.d_model} {encoder_config.num_heads} {encoder_config.d_kv} {encoder_config.d_ff} " \
f"{decoder_config.d_model} {decoder_config.num_heads} {decoder_config.d_kv} {decoder_config.d_ff} " \
f"{decoder_config.vocab_size} {data_type} {tensor_para_size} 0 > .tmp_gemm.log"
LOGGER.info(f"Run gemm test: {cmd}")
os.system(cmd)
if time_args.find("2") != -1:
translation_result_list.append(TranslationResult("hf-sampling-warmup", "HF"))
translation_result_list.append(TranslationResult("hf-sampling", "HF"))
if time_args.find("3") != -1:
translation_result_list.append(TranslationResult("ft-sampling-warmup", "FT"))
translation_result_list.append(TranslationResult("ft-sampling", "FT"))
if rank == 0:
data_type = gemm_data_type_mapping[args_dict['data_type']]
cmd = f"./bin/t5_gemm {math.ceil(batch_size / pipeline_para_size)} {1} {max_seq_len} " \
f"{encoder_config.d_model} {encoder_config.num_heads} {encoder_config.d_kv} {encoder_config.d_ff} " \
f"{decoder_config.d_model} {decoder_config.num_heads} {decoder_config.d_kv} {decoder_config.d_ff} " \
f"{decoder_config.vocab_size} {data_type} {tensor_para_size} 1 > .tmp_gemm.log"
LOGGER.info(f"Run gemm test: {cmd}")
os.system(cmd)
if time_args.find("1") != -1 or time_args.find("3") != -1:
ft_encoder_params = FTT5EncoderParams(
encoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias,
position_embedding_type=position_embedding_type
)
ft_decoding_params = FTT5DecodingParams(
decoder_config,
tensor_para_size,
pipeline_para_size,
t5_with_bias=t5_with_bias
)
if True:
ft_encoder_params.load_from_model(t5_model)
ft_decoding_params.load_from_model(t5_model)
remove_padding = True if batch_size > 32 else False
ft_model = FTT5Model(ft_encoder_params, ft_decoding_params)
with open(source_file, 'r') as f:
src_text = recover_bpe(f.readlines())
src_text = ["translate English to German: " + line.strip() for line in src_text]
with open(tgt_file, 'r') as f:
tgt_text = recover_bpe(f.readlines())
for i in range(len(translation_result_list)):
sys.stdout.flush()
prev = 0
start_time = datetime.now()
while prev < len(src_text):
input_texts = src_text[prev:prev+batch_size]
prev += batch_size
input_token = tokenizer(input_texts, return_tensors='tf', padding=True)
if translation_result_list[i].frame_work == "HF":
if translation_result_list[i].name.find("beamsearch") != -1:
hf_outputs = t5_model.generate(input_token.input_ids,
max_length=max_seq_len,
early_stopping=True,
num_beams=beam_size)
elif translation_result_list[i].name.find("sampling") != -1:
hf_outputs = t5_model.generate(input_token.input_ids,
max_length=max_seq_len,
early_stopping=True,
do_sample=True,
top_k=topk if topk > 0 else None,
top_p=topp if topp > 0.0 else None)
translation_result_list[i].batch_ids_list.append(hf_outputs)
translation_result_list[i].batch_seq_len_list.append(np.ones(len(input_texts)) * max_seq_len)
elif translation_result_list[i].frame_work == "FT":
tmp_beam_size = beam_size
if translation_result_list[i].name.find("sampling") != -1:
tmp_beam_size = 1
ft_decoding_outputs, ft_decoding_seq_lens = ft_model.compute(input_token,
tmp_beam_size,
max_seq_len,
beam_search_diversity_rate = beam_search_diversity_rate,
top_k = topk,
top_p = topp,
temperature = 1.0,
len_penalty = 0.0,
repetition_penalty = 1.0,
random_seed = 0)
translation_result_list[i].batch_ids_list.append(ft_decoding_outputs)
translation_result_list[i].batch_seq_len_list.append(ft_decoding_seq_lens)
translation_result_list[i].sentence_num += len(input_token)
translation_result_list[i].batch_num += 1
if translation_result_list[i].name.find("warmup") != -1 and \
(translation_result_list[i].batch_num > 10 or translation_result_list[i].sentence_num > 300):
break
if translation_result_list[i].batch_num >= max_ite:
break
stop_time = datetime.now()
translation_result_list[i].execution_time = (stop_time - start_time).total_seconds()
if translation_result_list[i].name.find("warmup") != -1:
continue
for batch_token, batch_seq_len in zip(translation_result_list[i].batch_ids_list, translation_result_list[i].batch_seq_len_list):
for j in range(len(batch_token)):
if translation_result_list[i].frame_work == "HF":
translation_result_list[i].token_list.append(fast_tokenizer.decode(batch_token[j][1:], skip_special_tokens=True))
translation_result_list[i].token_num += sum((batch_token[j][1:] != 0).numpy())
elif translation_result_list[i].frame_work == "FT":
translation_result_list[i].token_list.append(fast_tokenizer.decode(batch_token[j][0][:batch_seq_len[j][0]], skip_special_tokens=True))
translation_result_list[i].token_num += batch_seq_len[j][0]
if rank == 0:
translation_result_list[i].bleu_score = bleu_score(translation_result_list[i].token_list, tgt_text[:len(translation_result_list[i].token_list)])
with open(translation_result_list[i].name + ".txt", 'w') as f:
for line in translation_result_list[i].token_list:
f.write(line)
if rank == 0:
for t in translation_result_list:
if t.name.find("warmup") != -1:
continue
LOGGER.info(f"{t.name} translates {t.batch_num} batches taking {t.execution_time:.2f} sec to translate "
f"{t.token_num} tokens, BLEU score: {t.bleu_score.score:.2f}, {(t.token_num / t.execution_time):.0f} tokens/sec."
f" ({t.bleu_score.sys_len} words, {(t.bleu_score.sys_len / t.execution_time):.0f} words/sec)")
if t.name == "ft-beamsearch" and args_dict["ft_beamsearch_BLEU_threshold"] != None:
assert t.bleu_score.score >= args_dict["ft_beamsearch_BLEU_threshold"], f"[ERROR] {t.name} test fail !"
LOGGER.info(f"{t.name} PASS !")
if t.name == "ft-sampling" and args_dict["ft_sampling_BLEU_threshold"] != None:
assert t.bleu_score.score >= args_dict["ft_sampling_BLEU_threshold"], f"[ERROR] {t.name} test fail !"
LOGGER.info(f"{t.name} PASS !")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=200, metavar='NUMBER',
help='max sequence length (default: 200)')
parser.add_argument("--source", default="../examples/tensorflow/decoding/utils/translation/test.en",
help="Path to the source file.")
parser.add_argument("--target", default="../examples/tensorflow/decoding/utils/translation/test.de",
help="Path to the target file.")
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test hf_beamsearch
'1': test ft_beamsearch
'2': test hf_sampling
'3': test ft_sampling
'e.g., if you want to test tf_beamsearch and ft_sampling,
then you need to use -time '03' ''')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-repeat_penalty', '--repetition_penalty', type=float, default=1.0, metavar='NUMBER',
help='Repetition penalty for generating tokens. Default is 1.0.')
parser.add_argument('-temperature', '--temperature', type=float, default=1.0, metavar='NUMBER',
help='Temperature penalty for generating tokens. Default is 1.0.')
parser.add_argument('-len_penalty', '--len_penalty', type=float, default=0.0, metavar='NUMBER',
help='Length penalty for generating tokens. Default is 0.0.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-lib_path', '--lib_path', type=str, default="lib/libth_t5.so", metavar='STRING',
help='the path of FasterTransformer pytorch t5 op library.')
parser.add_argument('-model_path', '--model_path', type=str, default=None, metavar='STRING',
help='T5 model path.')
parser.add_argument('-model', '--model', type=str, default="t5-small", metavar='STRING',
help='T5 model size. Only used when --model_path=None')
# not tested for the moment and not supported
parser.add_argument('-tensor_para_size', '--tensor_para_size', type=int, default=1, metavar='NUMBER',
help='size of tensor parallelism (default: 1). This feature hasn\'t been tested.')
parser.add_argument('-pipeline_para_size', '--pipeline_para_size', type=int, default=1, metavar='NUMBER',
help='size of pipeline parallelism (default: 1). This feature hasn\'t been tested.')
# assume checkpoint config is also in the same path
# parser.add_argument('--ckpt_path', type=str, help='path to the checkpoint file.') # not supported
parser.add_argument('-max_ite', '--max_iteration', type=int, default=100000, metavar='NUMBER',
help='Maximum iteraiton for translation, default is 100000 (as large as possible to run all test set).')
parser.add_argument('--model_type', type=str, default="Huggingface", choices=["Huggingface"],
help='Currently only supports Huggingface T4, which adopts the paper\'s implementation and has no bias')
parser.add_argument('--return_output_log_probs', action='store_true',
help='Return the log probability of generated tokens.')
parser.add_argument('--return_cum_log_probs', action='store_true',
help='Return the cumulative log probability of generated tokens.')
parser.add_argument('--ft_beamsearch_BLEU_threshold', type=float,
help='Threshold of FT beam search BLEU score')
parser.add_argument('--ft_sampling_BLEU_threshold', type=float,
help='Threshold of FT beam search BLEU score')
parser.add_argument("--verbose", action="store_true", help="Provide verbose messages")
args = parser.parse_args()
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO, format=log_format)
translate(vars(args))
| FasterTransformer-main | examples/tensorflow/t5/translate_example.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import math
import os
class FTT5EncoderParams(object):
def __init__(
self,
config,
tensor_para_size=1,
pipeline_para_size=1,
*,
t5_with_bias=False,
position_embedding_type=None
):
self.num_heads = config.num_heads
self.head_size = config.d_kv
self.inter_size = config.d_ff
self.d_model = config.d_model
self.num_layer = config.num_layers
self.num_bucket = config.relative_attention_num_buckets if hasattr(config, 'relative_attention_num_buckets') else 32
self.max_distance = config.relative_attention_max_distance if hasattr(config, 'relative_attention_max_distance') else 128
self.config = config
self.tensor_para_size = tensor_para_size
self.pipeline_para_rank = 0 # no mpi for the moment
self.pipeline_para_size = pipeline_para_size
self.t5_with_bias = t5_with_bias
self.activation_type = config.feed_forward_proj
self.weights = None
self.q_scaling = 1.0 / (math.sqrt(config.d_kv))
# relative position embedding -> 0, absolute position embedding -> 1
assert tensor_para_size == 1, "This op only supports TP = 1 now."
assert pipeline_para_size == 1, "This op only supports PP = 1 now."
self.position_embedding_type = position_embedding_type or 0
def load_from_model(self, model):
"""
Routine to load T5 encoder weights from a HuggingFace model. This assumes the regular T5 (NOT v1.1) architecture.
Notes:
- Note that FasterTransformer currently doesn't support gated GELU.
- The relative attention bias is transposed with respect to the HF model.
"""
start_layer = self.pipeline_para_rank * self.num_layer // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.num_layer // self.pipeline_para_size
weight_data_type = {'float32': tf.float32, 'float16': tf.float16}[model.dtype]
variables_dict = {}
for var in model.variables:
variables_dict[var.name] = var.numpy()
var_prefix = model.name + '/encoder/'
# fill the datastructures holding the weights:
# layer_._0/layer_norm
attr_output_layernorm_beta = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/layer_norm/bias:0") for i in range(start_layer, end_layer)]
attr_output_layernorm_gamma = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/layer_norm/weight:0") for i in range(start_layer, end_layer)]
# layer_._0/SelfAttention/q
attr_q_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/q/kernel:0") for i in range(start_layer, end_layer)]
attr_q_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/q/bias:0") for i in range(start_layer, end_layer)]
# layer_._0/SelfAttention/k
attr_k_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/k/kernel:0") for i in range(start_layer, end_layer)]
attr_k_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/k/bias:0") for i in range(start_layer, end_layer)]
# layer_._0/SelfAttention/v
attr_v_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/v/kernel:0") for i in range(start_layer, end_layer)]
attr_v_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/v/bias:0") for i in range(start_layer, end_layer)]
# layer_._0/SelfAttention/o
attr_output_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/o/kernel:0") for i in range(start_layer, end_layer)]
attr_output_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/o/bias:0") for i in range(start_layer, end_layer)]
# layer_._1/layer_norm
ffn_output_layernorm_beta = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/layer_norm/bias:0") for i in range(start_layer, end_layer)]
ffn_output_layernorm_gamma = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/layer_norm/weight:0") for i in range(start_layer, end_layer)]
if self.config.feed_forward_proj == "relu" or self.config.feed_forward_proj == "gelu":
# format of t5-small
# layer_._1/DenseReluDense/wi
ffn_inter_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/DenseReluDense/wi/kernel:0") for i in range(start_layer, end_layer)]
ffn_inter_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/DenseReluDense/wi/bias:0") for i in range(start_layer, end_layer)]
ffn_inter2_kernel = [tf.constant([0], dtype=weight_data_type) for i in range(start_layer, end_layer)]
ffn_inter2_bias = [tf.constant([0], dtype=weight_data_type) for i in range(start_layer, end_layer)]
elif self.config.feed_forward_proj == "gated-relu" or self.config.feed_forward_proj == "gated-gelu":
# format of google/t5-v1_1-small
# layer_._1/DenseReluDense/wi_0
ffn_inter_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/DenseReluDense/wi_0/kernel:0") for i in range(start_layer, end_layer)]
ffn_inter_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/DenseReluDense/wi_0/bias:0") for i in range(start_layer, end_layer)]
# layer_._1/DenseReluDense/wi_1
# only applies to gated models
ffn_inter2_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/DenseReluDense/wi_1/kernel:0") for i in range(start_layer, end_layer)]
ffn_inter2_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/DenseReluDense/wi_1/bias:0") for i in range(start_layer, end_layer)]
else:
assert False, f"FT does not support activation type {self.config.feed_forward_proj}"
# layer_._1/DenseReluDense/wo
ffn_output_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/DenseReluDense/wo/kernel:0") for i in range(start_layer, end_layer)]
ffn_output_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/DenseReluDense/wo/bias:0") for i in range(start_layer, end_layer)]
# final_layer_norm/weight:0
output_layernorm_beta = variables_dict.get(var_prefix + f"final_layer_norm/bias:0")
output_layernorm_gamma = variables_dict.get(var_prefix + f"final_layer_norm/weight:0")
# other weights
output_absolute_or_relative_position_embedding = np.transpose(variables_dict.get(var_prefix + f"block_._{0}/layer_._0/SelfAttention/relative_attention_bias/embeddings:0"))
output_embedding_table = model.get_input_embeddings().weight
# pack the arguments into a tuple that mirrors the TF custom OP input
weights = [
attr_output_layernorm_beta,
attr_output_layernorm_gamma,
attr_q_kernel,
attr_q_bias,
attr_k_kernel,
attr_k_bias,
attr_v_kernel,
attr_v_bias,
attr_output_kernel,
attr_output_bias,
ffn_output_layernorm_beta,
ffn_output_layernorm_gamma,
ffn_inter_kernel,
ffn_inter_bias,
ffn_inter2_kernel,
ffn_inter2_bias,
ffn_output_kernel,
ffn_output_bias,
output_layernorm_beta,
output_layernorm_gamma,
output_absolute_or_relative_position_embedding,
output_embedding_table
]
# clean up if there is None. Note - we cannot use np.array([0]) as TF won't accept empty tensors
for i in range(0, len(weights)):
if weights[i] is None:
weights[i] = tf.constant([0], dtype=weight_data_type)
elif type(weights[i]) is list:
weights[i] = [tf.constant([0], dtype=weight_data_type) if w is None else tf.convert_to_tensor(w, dtype=weight_data_type) for w in weights[i]]
else:
weights[i] = tf.convert_to_tensor(weights[i], dtype=weight_data_type)
self.weights = tuple(weights)
# wrapper function
def ftt5_encoder(inputs, seq_len, encoder_params):
transformer_op_module = tf.load_op_library(os.path.join('./lib/libtf_t5.so'))
outputs = transformer_op_module.t5_encoder(inputs,
seq_len,
*encoder_params.weights,
head_num = encoder_params.num_heads,
head_size = encoder_params.head_size, # encoder_config.d_kv
inter_size = encoder_params.inter_size, # encoder_config.d_ff,
num_layer = encoder_params.num_layer,
d_model = encoder_params.d_model,
num_bucket = encoder_params.num_bucket,
max_distance = encoder_params.max_distance,
remove_padding = True,
t5_with_bias = encoder_params.t5_with_bias,
activation_type = encoder_params.activation_type,
q_scaling = encoder_params.q_scaling,
position_embedding_type=encoder_params.position_embedding_type)
return outputs
| FasterTransformer-main | examples/tensorflow/t5/utils/ft_encoder.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import os
import math
transformer_op_module = tf.load_op_library(os.path.join('./lib/libtf_t5.so'))
from utils.ft_encoder import FTT5EncoderParams, ftt5_encoder
class FTT5DecodingParams(object):
def __init__(
self,
config,
tensor_para_size=1,
pipeline_para_size=1,
*,
t5_with_bias=False,
position_embedding_type=None,
tie_word_embeddings=None
):
self.num_heads = config.num_heads
self.head_size = config.d_kv
self.inter_size = config.d_ff
self.d_model = config.d_model
self.num_layer = config.num_layers
self.num_bucket = config.relative_attention_num_buckets if hasattr(config, 'relative_attention_num_buckets') else 32
self.max_distance = config.relative_attention_max_distance if hasattr(config, 'relative_attention_max_distance') else 128
self.config = config
self.tensor_para_size = tensor_para_size
self.pipeline_para_rank = 0 # no mpi for the moment
self.pipeline_para_size = pipeline_para_size
self.t5_with_bias = t5_with_bias
self.activation_type = config.feed_forward_proj
self.weights = None
self.q_scaling = 1.0 / (math.sqrt(config.d_kv))
self.start_id = config.decoder_start_token_id
self.end_id = config.eos_token_id
self.vocab_size = config.vocab_size
assert tensor_para_size == 1, "This op only supports TP = 1 now."
assert pipeline_para_size == 1, "This op only supports PP = 1 now."
self.position_embedding_type = position_embedding_type or 0
self.tie_word_embeddings = tie_word_embeddings or False
def load_from_model(self, model):
"""
Routine to load T5 decoding weights from a HuggingFace model. This assumes the regular T5 (NOT v1.1) architecture.
Notes:
- Note that FasterTransformer currently doesn't support gated GELU.
- The relative attention bias is transposed with respect to the HF model.
"""
# for the moment obsolete. everything runs on single GPU
start_layer = self.pipeline_para_rank * self.num_layer // self.pipeline_para_size
end_layer = (self.pipeline_para_rank + 1) * self.num_layer // self.pipeline_para_size
weight_data_type = {'float32': tf.float32, 'float16': tf.float16}[model.dtype]
variables_dict = {}
for var in model.variables:
variables_dict[var.name] = var.numpy()
var_prefix = model.name + '/decoder/'
# fill the datastructures holding the weights:
# layer_._0/layer_norm
pre_layernorm_beta = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/layer_norm/bias:0") for i in range(start_layer, end_layer)]
pre_layernorm_gamma = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/layer_norm/weight:0") for i in range(start_layer, end_layer)]
# layer_._0/SelfAttention/q
self_qkv_kernel = [tf.stack([
variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/q/kernel:0"),
variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/k/kernel:0"),
variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/v/kernel:0")
], -2) for i in range(start_layer, end_layer)]
self_qkv_bias = [tf.stack([
variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/q/bias:0") or tf.constant([0], dtype=weight_data_type),
variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/k/bias:0") or tf.constant([0], dtype=weight_data_type),
variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/v/bias:0") or tf.constant([0], dtype=weight_data_type)
], -2) for i in range(start_layer, end_layer)]
# layer_._0/SelfAttention/o
self_output_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/o/kernel:0") for i in range(start_layer, end_layer)]
self_output_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._0/SelfAttention/o/bias:0") for i in range(start_layer, end_layer)]
# layer_._1/layer_norm
self_layernorm_beta = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/layer_norm/bias:0") for i in range(start_layer, end_layer)]
self_layernorm_gamma = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/layer_norm/weight:0") for i in range(start_layer, end_layer)]
# layer_._1/EncDecAttention/q
cross_q_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/EncDecAttention/q/kernel:0") for i in range(start_layer, end_layer)]
cross_q_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/EncDecAttention/q/bias:0") for i in range(start_layer, end_layer)]
# layer_._1/EncDecAttention/k
cross_k_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/EncDecAttention/k/kernel:0") for i in range(start_layer, end_layer)]
cross_k_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/EncDecAttention/k/bias:0") for i in range(start_layer, end_layer)]
# layer_._1/EncDecAttention/v
cross_v_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/EncDecAttention/v/kernel:0") for i in range(start_layer, end_layer)]
cross_v_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/EncDecAttention/v/bias:0") for i in range(start_layer, end_layer)]
# layer_._1/EncDecAttention/o
cross_output_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/EncDecAttention/o/kernel:0") for i in range(start_layer, end_layer)]
cross_output_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._1/EncDecAttention/o/bias:0") for i in range(start_layer, end_layer)]
# layer_._2/layer_norm
cross_layernorm_beta = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/layer_norm/bias:0") for i in range(start_layer, end_layer)]
cross_layernorm_gamma = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/layer_norm/weight:0") for i in range(start_layer, end_layer)]
if self.config.feed_forward_proj == "relu" or self.config.feed_forward_proj == "gelu":
# format of t5-small
# layer_._2/DenseReluDense/wi
ffn_inter_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/DenseReluDense/wi/kernel:0") for i in range(start_layer, end_layer)]
ffn_inter_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/DenseReluDense/wi/bias:0") for i in range(start_layer, end_layer)]
ffn_inter2_kernel = [tf.constant([0], dtype=weight_data_type) for i in range(start_layer, end_layer)]
ffn_inter2_bias = [tf.constant([0], dtype=weight_data_type) for i in range(start_layer, end_layer)]
elif self.config.feed_forward_proj == "gated-relu" or self.config.feed_forward_proj == "gated-gelu":
# format of google/t5-v1_1-small
# layer_._2/DenseReluDense/wi_0
ffn_inter_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/DenseReluDense/wi_0/kernel:0") for i in range(start_layer, end_layer)]
ffn_inter_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/DenseReluDense/wi_0/bias:0") for i in range(start_layer, end_layer)]
# layer_._2/DenseReluDense/wi_1
# only applies to gated models
ffn_inter2_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/DenseReluDense/wi_1/kernel:0") for i in range(start_layer, end_layer)]
ffn_inter2_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/DenseReluDense/wi_1/bias:0") for i in range(start_layer, end_layer)]
else:
assert False, f"FT does not support activation type {self.config.feed_forward_proj}"
# layer_._2/DenseReluDense/wo
ffn_output_kernel = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/DenseReluDense/wo/kernel:0") for i in range(start_layer, end_layer)]
ffn_output_bias = [variables_dict.get(var_prefix + f"block_._{i}/layer_._2/DenseReluDense/wo/bias:0") for i in range(start_layer, end_layer)]
# final_layer_norm/weight:0
output_layernorm_beta = variables_dict.get(var_prefix + f"final_layer_norm/bias:0")
output_layernorm_gamma = variables_dict.get(var_prefix + f"final_layer_norm/weight:0")
# other weights
pre_encoder_embedding_table = model.get_input_embeddings().weight
if variables_dict.get(f"tft5_for_conditional_generation/lm_head/kernel:0") is not None:
# format of google/t5-v1_1-small
# In t5 v1_1, pre_encoder_embedding_table and post_decoder_embedding_kernel are different
post_decoder_embedding_kernel = variables_dict.get(f"tft5_for_conditional_generation/lm_head/kernel:0").transpose()
post_decoder_embedding_bias = variables_dict.get(f"tft5_for_conditional_generation/lm_head//bias:0" or tf.constant([0], dtype=weight_data_type))
else:
# format of t5-small
post_decoder_embedding_kernel = variables_dict.get(f"shared/shared/weight:0")
post_decoder_embedding_bias = variables_dict.get(f"shared/shared/bias:0" or tf.constant([0], dtype=weight_data_type))
output_absolute_or_relative_position_embedding = np.transpose(variables_dict.get(var_prefix + f"block_._{0}/layer_._0/SelfAttention/relative_attention_bias/embeddings:0"))
# # pack the arguments into a tuple that mirrors the TF custom OP input
weights = [
pre_layernorm_beta,
pre_layernorm_gamma,
self_qkv_kernel,
self_qkv_bias,
self_output_kernel,
self_output_bias,
self_layernorm_beta,
self_layernorm_gamma,
cross_q_kernel,
cross_q_bias,
cross_k_kernel,
cross_k_bias,
cross_v_kernel,
cross_v_bias,
cross_output_kernel,
cross_output_bias,
cross_layernorm_beta,
cross_layernorm_gamma,
ffn_inter_kernel,
ffn_inter_bias,
ffn_inter2_kernel,
ffn_inter2_bias,
ffn_output_kernel,
ffn_output_bias,
output_layernorm_beta,
output_layernorm_gamma,
pre_encoder_embedding_table,
post_decoder_embedding_kernel,
post_decoder_embedding_bias,
output_absolute_or_relative_position_embedding
]
# clean up if there is None. Note - we cannot use np.array([0]) as TF won't accept empty tensors
for i in range(0, len(weights)):
if weights[i] is None:
weights[i] = tf.constant([0], dtype=weight_data_type)
elif type(weights[i]) is list:
weights[i] = [tf.constant([0], dtype=weight_data_type) if w is None else tf.convert_to_tensor(w, dtype=weight_data_type) for w in weights[i]]
else:
weights[i] = tf.convert_to_tensor(weights[i], dtype=weight_data_type)
self.weights = tuple(weights)
def ftt5_decoding(mem_hidden_states, mem_seq_len, decoding_params, max_seq_len, beam_width, top_k = 1, top_p = 0.0,
beam_search_diversity_rate = 0.0, temperature = 1.0, len_penalty = 0.0, repetition_penalty = 1.0,
random_seed = 0, return_cum_log_probs = False, return_output_log_probs = False):
outputs = transformer_op_module.t5_decoding(mem_hidden_states,
mem_seq_len,
*decoding_params.weights,
max_seq_len = max_seq_len,
beam_width = beam_width,
head_num = decoding_params.num_heads,
head_size = decoding_params.head_size,
inter_size = decoding_params.inter_size,
num_layer = decoding_params.num_layer,
d_model = decoding_params.d_model,
num_bucket = decoding_params.num_bucket,
max_distance = decoding_params.max_distance,
start_id = decoding_params.start_id,
end_id = decoding_params.end_id,
beam_search_diversity_rate = beam_search_diversity_rate,
top_k = top_k,
top_p = top_p,
temperature = temperature,
len_penalty = len_penalty,
repetition_penalty = repetition_penalty,
return_cum_log_probs = return_cum_log_probs,
return_output_log_probs = return_output_log_probs,
t5_with_bias = decoding_params.t5_with_bias,
activation_type = decoding_params.activation_type,
q_scaling = decoding_params.q_scaling,
position_embedding_type = decoding_params.position_embedding_type,
random_seed = random_seed,
tie_word_embeddings = decoding_params.tie_word_embeddings)
return outputs
class FTT5Model():
def __init__(self, encoder_params, decoding_params):
self.encoder_params = encoder_params
self.decoding_params = decoding_params
def compute(self, input_tokens, beam_width, max_seq_len, top_k=1, top_p = 0.0, beam_search_diversity_rate = 0.0,
temperature = 1.0, len_penalty = 0.0, repetition_penalty = 1.0, random_seed=0):
input_ids = tf.cast(input_tokens.input_ids, tf.int32) # maybe convert to int32
mem_seq_len = 0
if hasattr(input_tokens, "attention_mask"):
mem_seq_len = np.sum(input_tokens.attention_mask, axis=1)
else:
mem_seq_len = input_tokens.seq_len
mem_seq_len = tf.cast(mem_seq_len, tf.int32)
encoder_outputs = ftt5_encoder(input_ids, mem_seq_len, self.encoder_params)
ft_decoding_output_ids, ft_decoding_seq_lens, ft_output_log_probs, ft_cum_log_probs = ftt5_decoding(encoder_outputs,
mem_seq_len,
self.decoding_params,
max_seq_len,
beam_width,
top_k,
top_p,
beam_search_diversity_rate,
temperature,
len_penalty,
repetition_penalty,
random_seed=random_seed)
ft_decoding_output_ids = tf.reshape(ft_decoding_output_ids, [-1, beam_width, max_seq_len])
ft_decoding_seq_lens = tf.reshape(ft_decoding_seq_lens, [-1, beam_width])
return ft_decoding_output_ids.numpy(), ft_decoding_seq_lens.numpy() | FasterTransformer-main | examples/tensorflow/t5/utils/ft_decoding.py |
#! /usr/bin/env python3
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing
import numpy as np
import tensorflow as tf
import tensorflow_text
from datetime import datetime
from pathlib import Path
'''
TensorFlow's saved_model T5 -> FasterTransformer
'''
def write_config_file(save_dir, **kwargs):
file_template = """
[encoder]
vocab_size = {vocab_size:d}
d_model = {d_model:d}
d_kv = {d_kv:d}
d_ff = {d_ff:d}
num_layers = {num_encoder_layers:d}
num_decoder_layers = {num_encoder_layers:d}
num_heads = {num_heads:d}
is_gated_act = {decoder_is_gated_act!s}
weight_data_type = {data_type}
[decoder]
vocab_size = {vocab_size:d}
d_model = {d_model:d}
d_kv = {d_kv:d}
d_ff = {d_ff:d}
num_layers = {num_decoder_layers:d}
num_decoder_layers = {num_decoder_layers:d}
num_heads = {num_heads:d}
is_gated_act = {encoder_is_gated_act!s}
weight_data_type = {data_type}
"""
with open(Path(save_dir) / "config.ini", "w") as f:
f.write(file_template.format(**kwargs))
def export_key_name(key):
export_key = ""
split = key.split("__")
prefix = split[0]
is_layer = len(split) > 1 and "layers" in split[1]
if is_layer:
block_id = split[1].split("_")[-1]
name = ""
if prefix == "encoder":
if "attention__key" in key:
name = "0.SelfAttention.k.weight"
elif "attention__out" in key:
name = "0.SelfAttention.o.weight"
elif "attention__query" in key:
name = "0.SelfAttention.q.weight"
elif "attention__value" in key:
name = "0.SelfAttention.v.weight"
elif "pre_attention_layer_norm" in key:
name = "0.layer_norm.weight"
elif "mlp__wi_0" in key:
name = "1.DenseReluDense.wi.weight"
elif "mlp__wi_1" in key:
name = "1.DenseReluDense.wi2.weight"
elif "mlp__wo" in key:
name = "1.DenseReluDense.wo.weight"
elif "pre_mlp_layer_norm" in key:
name = "1.layer_norm.weight"
elif prefix == "decoder":
if "self_attention__key" in key:
name = "0.SelfAttention.k.weight"
elif "self_attention__out" in key:
name = "0.SelfAttention.o.weight"
elif "self_attention__query" in key:
name = "0.SelfAttention.q.weight"
elif "self_attention__value" in key:
name = "0.SelfAttention.v.weight"
elif "pre_self_attention_layer_norm" in key:
name = "0.layer_norm.weight"
elif "encoder_decoder_attention__key" in key:
name = "1.EncDecAttention.k.weight"
elif "encoder_decoder_attention__out" in key:
name = "1.EncDecAttention.o.weight"
elif "encoder_decoder_attention__query" in key:
name = "1.EncDecAttention.q.weight"
elif "encoder_decoder_attention__value" in key:
name = "1.EncDecAttention.v.weight"
elif "pre_cross_attention_layer_norm" in key:
name = "1.layer_norm.weight"
elif "mlp__wi_0" in key:
name = "2.DenseReluDense.wi.weight"
elif "mlp__wi_1" in key:
name = "2.DenseReluDense.wi2.weight"
elif "mlp__wo" in key:
name = "2.DenseReluDense.wo.weight"
elif "pre_mlp_layer_norm" in key:
name = "2.layer_norm.weight"
export_key = f"{prefix}.block.{block_id}.layer.{name}"
elif "decoder__relpos_bias" in key:
export_key = "decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
elif "decoder_norm" in key:
export_key = "decoder.final_layer_norm.weight"
elif "encoder__relpos_bias" in key:
export_key = "encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
elif "encoder_norm" in key:
export_key = "encoder.final_layer_norm.weight"
elif "logits_dense" in key:
export_key = "lm_head.weight"
elif "token_embedder" in key:
export_key = "shared.weight"
return export_key
def handle_layer(key, value, dtype, saved_dir, tensor_para):
print(f"Handling {key} with shape {value.shape}")
val = value.astype(dtype)
factor = tensor_para
if key.find("shared.weight") != -1:
# shared weights, only need to convert the weights of rank 0
saved_path = saved_dir / f"{key}.bin"
val.tofile(saved_path.as_posix())
saved_path = saved_dir / f"{key}_T.bin"
val.T.tofile(saved_path.as_posix())
elif key.find("lm_head.weight") != -1:
# lm_head weights, only need to convert the weights of rank 0
val = val.transpose(1, 0) # For lm_head, we use TN gemm to compute, so we don't need to transpose
saved_path = saved_dir / f"{key}.bin"
val.tofile(saved_path.as_posix())
elif key.find("layer_norm.weight") != -1:
# shared weights, only need to convert the weights of rank 0
saved_path = saved_dir / f"{key}.bin"
val.tofile(saved_path.as_posix())
elif (
key.find("SelfAttention.o.weight") != -1
or key.find("EncDecAttention.o.weight") != -1
or key.find("DenseReluDense.wo.weight") != -1
):
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"{key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (
key.find("DenseReluDense.wi.weight") != -1
or (key.find("encoder") != -1 and (
key.find("SelfAttention.q.weight") != -1
or key.find("SelfAttention.k.weight") != -1
or key.find("SelfAttention.v.weight") != -1
)
)
or key.find("EncDecAttention.q.weight") != -1
or key.find("EncDecAttention.k.weight") != -1
or key.find("EncDecAttention.v.weight") != -1
):
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"{key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (
key.find("DenseReluDense.wi.weight") != -1
or key.find("DenseReluDense.wi2.weight") != -1
):
# For gated activation.
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"{key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("relative_attention_bias") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"{key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (
key.find("decoder") != -1 and
(
key.find("SelfAttention.q.weight") != -1
or key.find("SelfAttention.k.weight") != -1
or key.find("SelfAttention.v.weight") != -1
)
):
pass
def fuse_decoder_qkv(model_dict, num_layers, dtype, saved_dir, tensor_para):
factor = tensor_para
for i in range(num_layers):
shape = model_dict[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"].T.shape
qkv = np.concatenate([model_dict[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"].T,
model_dict[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"].T,
model_dict[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"].T], axis=-1)
qkv = qkv.reshape([shape[0], 3, shape[1]])
qkv = qkv.astype(dtype)
split_vals = np.split(qkv, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"decoder.block.{i}.layer.0.SelfAttention.qkv.weight.{j}.bin"
split_vals[j].tofile(saved_path.as_posix())
def convert_checkpoint(args):
base_dir = Path(args.checkpoint_dir)
model = tf.saved_model.load(base_dir)
layers = {layer._handle_name: np.array(layer)
for layer in model.signatures['serving_default'].variables}
vocab_size = layers["decoder__logits_dense__kernel:0"].shape[1]
d_model, d_ff = layers["encoder__layers_0__mlp__wi_0__kernel:0"].shape
num_heads = layers["decoder__relpos_bias__rel_embedding:0"].shape[0]
d_kv = d_model // num_heads
decoder_is_gated_act = "decoder__layers_0__mlp_wi_1__kernel" in layers
encoder_is_gated_act = "encoder__layers_0__mlp_wi_1__kernel" in layers
num_decoder_layers = 0
num_encoder_layers = 0
for key in layers:
layer = key.split("__")[1]
num = int(layer.split("_")[-1]) if "layers" in layer else 0
if "encoder" in key:
num_encoder_layers = max(num, num_encoder_layers)
elif "decoder" in key:
num_decoder_layers = max(num, num_decoder_layers)
tp_source = 1
tp_target = args.tensor_parallelism
print(f"Converting from {tp_source} to {tp_target} GPUs")
save_dir = Path(args.save_dir) / f"{tp_target:d}-gpu"
save_dir.mkdir(parents=True, exist_ok=True)
layers_export = {export_key_name(layer[0]): layer[1] for layer in layers.items()}
for item in layers_export.items():
handle_layer(*item, np.float32, save_dir, 1)
fuse_decoder_qkv(layers_export, num_decoder_layers, np.float32, save_dir, 1)
write_config_file(args.save_dir + f"/{args.tensor_parallelism}-gpu",
vocab_size=vocab_size,
d_model=d_model,
d_ff=d_ff,
d_kv=d_kv,
num_heads=num_heads,
num_decoder_layers=num_decoder_layers,
decoder_is_gated_act=decoder_is_gated_act,
num_encoder_layers=num_encoder_layers,
encoder_is_gated_act=encoder_is_gated_act,
data_type="fp32",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_dir", metavar="checkpoint-dir",
help="directory where resides the source model.")
parser.add_argument("save_dir", metavar="save-dir",
help="where to store the FT model")
parser.add_argument("--tensor-parallelism", "-t", type=int, default=1,
help="level of tensor parallelism used for inference")
parser.add_argument("--jobs", "-j", type=int, default=None,
help="how many processes to spawn for conversion (default: cpu_count)")
args = parser.parse_args()
start_time = datetime.now()
convert_checkpoint(args)
stop_time = datetime.now()
run_time = (stop_time - start_time)
print("[INFO] Spend {} (h:m:s) to convert the model".format(run_time))
| FasterTransformer-main | examples/tensorflow/t5/utils/tf_saved_model_t5_ckpt_convert.py |
#! /usr/bin/env python3
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing
import numpy as np
import zarr as zr
import re
from datetime import datetime
from multiprocessing import Pool
from pathlib import Path
from string import Template
'''
TensorFlow's saved_model T5 -> FasterTransformer
'''
def write_config_file(save_dir, **kwargs):
script_dir = Path(__file__).parent.resolve()
with open(script_dir / "ul2_config.template") as f:
config_template = Template(f.read())
with open(Path(save_dir) / "config.ini", "w") as f:
f.write(config_template.substitute(**kwargs))
def export_key_name(key):
export_key = ""
split = key.split(".")
prefix = split[1]
is_layer = "layers" in split[2]
if is_layer:
block_id = split[2].split("_")[-1]
name = ""
if prefix == "encoder":
if "attention.key" in key:
name = "0.SelfAttention.k.weight"
elif "attention.out" in key:
name = "0.SelfAttention.o.weight"
elif "attention.query" in key:
name = "0.SelfAttention.q.weight"
elif "attention.value" in key:
name = "0.SelfAttention.v.weight"
elif "pre_attention_layer_norm" in key:
name = "0.layer_norm.weight"
elif "mlp.wi_0" in key:
name = "1.DenseReluDense.wi.weight"
elif "mlp.wi_1" in key:
name = "1.DenseReluDense.wi2.weight"
elif "mlp.wo" in key:
name = "1.DenseReluDense.wo.weight"
elif "pre_mlp_layer_norm" in key:
name = "1.layer_norm.weight"
elif prefix == "decoder":
if "self_attention.key" in key:
name = "0.SelfAttention.k.weight"
elif "self_attention.out" in key:
name = "0.SelfAttention.o.weight"
elif "self_attention.query" in key:
name = "0.SelfAttention.q.weight"
elif "self_attention.value" in key:
name = "0.SelfAttention.v.weight"
elif "pre_self_attention_layer_norm" in key:
name = "0.layer_norm.weight"
elif "encoder_decoder_attention.key" in key:
name = "1.EncDecAttention.k.weight"
elif "encoder_decoder_attention.out" in key:
name = "1.EncDecAttention.o.weight"
elif "encoder_decoder_attention.query" in key:
name = "1.EncDecAttention.q.weight"
elif "encoder_decoder_attention.value" in key:
name = "1.EncDecAttention.v.weight"
elif "pre_cross_attention_layer_norm" in key:
name = "1.layer_norm.weight"
elif "mlp.wi_0" in key:
name = "2.DenseReluDense.wi.weight"
elif "mlp.wi_1" in key:
name = "2.DenseReluDense.wi2.weight"
elif "mlp.wo" in key:
name = "2.DenseReluDense.wo.weight"
elif "pre_mlp_layer_norm" in key:
name = "2.layer_norm.weight"
export_key = f"{prefix}.block.{block_id}.layer.{name}"
elif "decoder.relpos_bias" in key:
export_key = "decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
elif "decoder_norm" in key:
export_key = "decoder.final_layer_norm.weight"
elif "encoder.relpos_bias" in key:
export_key = "encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
elif "encoder_norm" in key:
export_key = "encoder.final_layer_norm.weight"
elif "token_embedder" in key:
export_key = "shared.weight"
return export_key
def handle_layer(key, file_name, dtype, saved_dir, tensor_para):
val = np.array(zr.load(file_name)).astype(dtype)
print(f"Processing {key} with shape {val.shape}")
factor = tensor_para
if key.find("shared.weight") != -1:
# shared weights, only need to convert the weights of rank 0
saved_path = saved_dir / f"{key}.bin"
val.T.tofile(saved_path.as_posix())
saved_path = saved_dir / f"{key}_T.bin"
val.tofile(saved_path.as_posix())
val.tofile(saved_dir / "lm_head.weight.bin")
elif key.find("layer_norm.weight") != -1:
# shared weights, only need to convert the weights of rank 0
saved_path = saved_dir / f"{key}.bin"
val.tofile(saved_path.as_posix())
elif (
key.find("SelfAttention.o.weight") != -1
or key.find("EncDecAttention.o.weight") != -1
or key.find("DenseReluDense.wo.weight") != -1
):
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"{key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (
key.find("DenseReluDense.wi.weight") != -1
or (key.find("encoder") != -1 and (
key.find("SelfAttention.q.weight") != -1
or key.find("SelfAttention.k.weight") != -1
or key.find("SelfAttention.v.weight") != -1
)
)
or key.find("EncDecAttention.q.weight") != -1
or key.find("EncDecAttention.k.weight") != -1
or key.find("EncDecAttention.v.weight") != -1
):
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"{key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (
key.find("DenseReluDense.wi.weight") != -1
or key.find("DenseReluDense.wi2.weight") != -1
):
# For gated activation.
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"{key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif key.find("relative_attention_bias") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir / f"{key}.{j:d}.bin"
split_vals[j].tofile(saved_path.as_posix())
elif (
key.find("decoder") != -1 and
(
key.find("SelfAttention.q.weight") != -1
or key.find("SelfAttention.k.weight") != -1
or key.find("SelfAttention.v.weight") != -1
)
):
pass
def fuse_decoder_qkv(model_dict, layer_num, dtype, saved_dir, tensor_para):
print(f"Processing decoder qkv SelfAttention merge block {layer_num}")
factor = tensor_para
q = np.array(zr.load(model_dict[f"decoder.block.{layer_num}.layer.0.SelfAttention.q.weight"]))
k = np.array(zr.load(model_dict[f"decoder.block.{layer_num}.layer.0.SelfAttention.k.weight"]))
v = np.array(zr.load(model_dict[f"decoder.block.{layer_num}.layer.0.SelfAttention.v.weight"]))
shape = q.shape
qkv = np.concatenate([q, k, v], axis=-1)
qkv = qkv.reshape([shape[0], 3, shape[1]])
qkv = qkv.astype(dtype)
split_vals = np.split(qkv, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir / f"decoder.block.{layer_num}.layer.0.SelfAttention.qkv.weight.{j}.bin"
split_vals[j].tofile(saved_path.as_posix())
def read_config(gin_file):
with open(gin_file) as f:
data = f.read()
config = {}
config["num_embeddings"] = int(re.search(r"NUM_EMBEDDINGS = (\d+)", data).group(1))
config["embed_dim"] = int(re.search(r"EMBED_DIM = (\d+)", data).group(1))
config["head_dim"] = int(re.search(r"HEAD_DIM = (\d+)", data).group(1))
config["mlp_dim"] = int(re.search(r"MLP_DIM = (\d+)", data).group(1))
config["num_decoder_layers"] = int(re.search(r"NUM_DECODER_LAYERS = (\d+)", data).group(1))
config["num_encoder_layers"] = int(re.search(r"NUM_ENCODER_LAYERS = (\d+)", data).group(1))
config["num_heads"] = int(re.search(r"NUM_HEADS = (\d+)", data).group(1))
return config
def convert_checkpoint(args):
base_dir = Path(args.checkpoint_dir)
config = read_config(base_dir / "config.gin")
print(config)
checkpoint_dir = list(base_dir.glob("checkpoint_*"))[0]
print(f"[INFO] Reading checkpoint dir {checkpoint_dir}")
layers = {}
for file in checkpoint_dir.iterdir():
if not file.is_dir():
continue
weight_name = file.parts[-1]
layers[weight_name] = str(file.resolve())
tp_source = 1
tp_target = args.tensor_parallelism
save_dtype = np.float32
print(f"Converting from {tp_source} to {tp_target} GPUs")
save_dir = Path(args.save_dir) / f"{tp_target:d}-gpu"
save_dir.mkdir(parents=True, exist_ok=True)
layers_export = {export_key_name(layer[0]): layer[1] for layer in layers.items() if "target" in layer[0]}
final_layernorm_key = "decoder.final_layer_norm.weight"
if "decoder.final_layer_norm.weight" not in layers_export:
print("[WARNING] Decoder final LayerNorm not found. Generate tensor of ones as a replacement.")
np.ones(config["embed_dim"], dtype=save_dtype).tofile(str(save_dir / (final_layernorm_key + ".bin")))
with Pool(processes=args.jobs) as pool:
pool.starmap(handle_layer,
((*item, save_dtype, save_dir, tp_target) for item in layers_export.items()))
pool.starmap(fuse_decoder_qkv,
((layers_export, i, save_dtype, save_dir, tp_target) for i in range(config["num_decoder_layers"])))
write_config_file(args.save_dir + f"/{args.tensor_parallelism}-gpu",
vocab_size=config["num_embeddings"],
d_model=config["embed_dim"],
d_ff=config["mlp_dim"],
d_kv=config["head_dim"],
num_heads=config["num_heads"],
num_decoder_layers=config["num_decoder_layers"],
num_encoder_layers=config["num_encoder_layers"],
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_dir", metavar="checkpoint-dir",
help="directory where resides the source model.")
parser.add_argument("save_dir", metavar="save-dir",
help="where to store the FT model")
parser.add_argument("--tensor-parallelism", "-t", type=int, default=1,
help="level of tensor parallelism used for inference")
parser.add_argument("--jobs", "-j", type=int, default=None,
help="how many processes to spawn for conversion (default: cpu_count)")
args = parser.parse_args()
start_time = datetime.now()
convert_checkpoint(args)
stop_time = datetime.now()
run_time = (stop_time - start_time)
print("[INFO] Spend {} (h:m:s) to convert the model".format(run_time))
| FasterTransformer-main | examples/tensorflow/t5/utils/jax_t5_ckpt_convert.py |
"""
Modified From https://github.com/OpenNMT/OpenNMT-tf/blob/r1/examples/library/minimal_transformer_training.py
MIT License
Copyright (c) 2017-present The OpenNMT Authors.
This example demonstrates how to train a standard Transformer model using
OpenNMT-tf as a library in about 200 lines of code. While relatively short,
this example contains some advanced concepts such as dataset bucketing and
prefetching, token-based batching, gradients accumulation, beam search, etc.
Currently, the beam search part is not easily customizable. This is expected to
be improved for TensorFlow 2.0 which is eager first.
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Use opennmt-tf-1.25.1
import argparse
import copy
from datetime import datetime
import numpy as np
import os
import sys
import tensorflow as tf
import opennmt as onmt
from opennmt import constants
from opennmt.utils import misc
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding
from examples.tensorflow.decoding.utils.bleu_score import bleu_score
from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding
from examples.tensorflow.decoder.utils.decoding import tf_beamsearch_decoding
from examples.tensorflow.decoder.utils.common import DecodingArgumentNew
from examples.tensorflow.decoder.utils.common import TransformerArgument
from examples.tensorflow.decoder.utils.common import DecodingSamplingArgument
from examples.tensorflow.decoder.utils.common import DecodingBeamsearchArgument
from examples.tensorflow.encoder.utils.encoder import ft_encoder_opennmt
from examples.tensorflow.encoder.utils.encoder import tf_encoder_opennmt
NUM_HEADS = 8
NUM_LAYERS = 6
HIDDEN_UNITS = 512
SIZE_PER_HEAD = 64
FFN_INNER_DIM = 2048
encoder = onmt.encoders.SelfAttentionEncoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
decoder = onmt.decoders.SelfAttentionDecoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
def translate(args_dict):
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_seq_len = args_dict['max_seq_len']
model_dir = args_dict["model_dir"]
source_file = args_dict["source"]
tgt_file = args_dict["target"]
time_args = args_dict["test_time"]
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
sampling_topk = args_dict['sampling_topk']
sampling_topp = args_dict['sampling_topp']
tf_datatype = tf.float32
max_ite = args_dict['max_iteration']
if args_dict['data_type'] == "fp16":
tf_datatype = tf.float16
elif args_dict['data_type'] == "bf16":
tf_datatype = tf.bfloat16
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
# Define the "base" Transformer model.
source_inputter = onmt.inputters.WordEmbedder("source_vocabulary", embedding_size=512, dtype=tf_datatype)
target_inputter = onmt.inputters.WordEmbedder("target_vocabulary", embedding_size=512, dtype=tf_datatype)
inputter = onmt.inputters.ExampleInputter(source_inputter, target_inputter)
inputter.initialize({
"source_vocabulary": args_dict["source_vocabulary"],
"target_vocabulary": args_dict["target_vocabulary"]
})
mode = tf.estimator.ModeKeys.PREDICT
np.random.seed(1)
tf.set_random_seed(1)
# Create the inference dataset.
dataset = inputter.make_inference_dataset(source_file, batch_size)
iterator = dataset.make_initializable_iterator()
source = iterator.get_next()
encoder_args = TransformerArgument(beam_width=1,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
remove_padding=True,
allow_gemm_test=False)
# Encode the source.
with tf.variable_scope("transformer/encoder"):
source_embedding = source_inputter.make_inputs(source)
source_embedding = tf.cast(source_embedding, tf_datatype)
# Using onmt fp16 for encoder.encode leads to significant accuracy drop
# So, we rewrite the encoder
# memory, _, _ = encoder.encode(source_embedding, source["length"], mode=mode)
memory = tf_encoder_opennmt(source_embedding, encoder_args, source["length"])
encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
encoder_variables_dict = {}
for v in encoder_vars:
encoder_variables_dict[v.name] = tf.cast(v, tf_datatype)
ft_encoder_result = ft_encoder_opennmt(inputs=source_embedding,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=source["length"])
# Generate the target.
with tf.variable_scope("transformer/decoder", reuse=tf.AUTO_REUSE):
target_inputter.build()
batch_size = tf.shape(memory)[0]
start_tokens = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)
end_token = constants.END_OF_SENTENCE_ID
target_embedding = tf.cast(target_inputter.embedding, tf_datatype)
target_ids, _, target_length, _ = decoder.dynamic_decode_and_search(
target_embedding,
start_tokens,
end_token,
vocab_size=target_inputter.vocabulary_size,
beam_width=beam_size,
memory=memory,
memory_sequence_length=source["length"],
maximum_iterations=max_seq_len)
target_vocab_rev = target_inputter.vocabulary_lookup_reverse()
target_tokens = target_vocab_rev.lookup(tf.cast(target_ids, tf.int64))
decoder_args = TransformerArgument(beam_width=beam_size,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
kernel_init_range=0.00,
bias_init_range=0.00)
decoder_args_2 = copy.deepcopy(decoder_args) # for beam search
decoder_args_2.__dict__ = copy.deepcopy(decoder_args.__dict__)
decoder_args_2.beam_width = 1 # for sampling
ft_decoder_beamsearch_args = DecodingBeamsearchArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args,
beam_search_diversity_rate)
ft_decoder_sampling_args = DecodingSamplingArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args_2,
sampling_topk,
sampling_topp)
decoding_beamsearch_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
beam_search_diversity_rate,
0,
0.0,
decoder_args)
decoding_sampling_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
0.0,
sampling_topk,
sampling_topp,
decoder_args_2)
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ft_target_ids, ft_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_beamsearch_args)
ft_target_tokens = target_vocab_rev.lookup(tf.cast(ft_target_ids, tf.int64))
ft_sampling_target_ids, ft_sampling_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_sampling_args)
ft_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_sampling_target_ids, tf.int64))
# ### TF Sampling Decoding ###
tf_sampling_target_ids, tf_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=0)
# tf_sampling_target_tokens: [batch_size, seq_len]
tf_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(tf_sampling_target_ids, tf.int64))
# ### end of TF BeamSearch Decoding ###
### OP BeamSearch Decoder ###
ft_decoder_beamsearch_target_ids, ft_decoder_beamsearch_target_length, _, _, _ = tf_beamsearch_decoding(memory,
source["length"],
target_embedding,
ft_decoder_beamsearch_args,
decoder_type=1)
# ft_decoder_beamsearch_target_tokens: [batch_size, beam_width, seq_len]
ft_decoder_beamsearch_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_beamsearch_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
### OP Sampling Decoder ###
ft_decoder_sampling_target_ids, ft_decoder_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=1)
ft_decoder_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_sampling_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
class TranslationResult(object):
def __init__(self, token_op, length_op, name):
self.token_op = token_op
self.length_op = length_op
self.name = name
self.file_name = name + ".txt"
self.token_list = []
self.length_list = []
self.batch_num = 0
self.execution_time = 0.0 # seconds
self.sentence_num = 0
self.bleu_score = None
translation_result_list = []
if time_args != "":
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling-for-warmup"))
if time_args.find("0") != -1:
translation_result_list.append(TranslationResult(
target_tokens, target_length, "tf-decoding-beamsearch"))
if time_args.find("1") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_beamsearch_target_tokens, ft_decoder_beamsearch_target_length, "ft-decoder-beamsearch"))
if time_args.find("2") != -1:
translation_result_list.append(TranslationResult(
ft_target_tokens, ft_target_length, "ft-decoding-beamsearch"))
if time_args.find("3") != -1:
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling"))
if time_args.find("4") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_sampling_target_tokens, ft_decoder_sampling_target_length, "ft-decoder-sampling"))
if time_args.find("5") != -1:
translation_result_list.append(TranslationResult(
ft_sampling_target_tokens, ft_sampling_target_length, "ft-decoding-sampling"))
# Iterates on the dataset.
float_checkpoint_path = tf.train.latest_checkpoint(model_dir)
half_checkpoint_path = tf.train.latest_checkpoint(model_dir + "_fp16")
bf16_checkpoint_path = tf.train.latest_checkpoint(model_dir + "_bf16")
float_var_list = []
half_var_list = []
bf16_var_list = []
for var in tf.global_variables():
if var.dtype.base_dtype == tf.float32:
float_var_list.append(var)
elif var.dtype.base_dtype == tf.float16:
half_var_list.append(var)
elif var.dtype.base_dtype == tf.bfloat16:
bf16_var_list.append(var)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
for i in range(len(translation_result_list)):
with tf.Session(config=config) as sess:
if(len(float_var_list) > 0):
float_saver = tf.train.Saver(float_var_list)
float_saver.restore(sess, float_checkpoint_path)
if(len(half_var_list) > 0):
half_saver = tf.train.Saver(half_var_list)
half_saver.restore(sess, half_checkpoint_path)
if(len(bf16_var_list) > 0):
bf16_saver = tf.train.Saver(bf16_var_list)
bf16_saver.restore(sess, bf16_checkpoint_path)
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
t1 = datetime.now()
while True:
try:
batch_tokens, batch_length = sess.run([translation_result_list[i].token_op,
translation_result_list[i].length_op])
for tokens, length in zip(batch_tokens, batch_length):
# misc.print_bytes(b" ".join(tokens[0][:length[0] - 1]))
if translation_result_list[i].name.find("beamsearch") != -1:
translation_result_list[i].token_list.append(
b" ".join(tokens[0][:length[0] - 1]).decode("UTF-8"))
else:
translation_result_list[i].token_list.append(b" ".join(tokens[:length - 1]).decode("UTF-8"))
translation_result_list[i].batch_num += 1
if translation_result_list[i].name == "tf-decoding-sampling-for-warmup" and translation_result_list[i].batch_num > 20:
break
if translation_result_list[i].batch_num >= max_ite:
break
except tf.errors.OutOfRangeError:
break
t2 = datetime.now()
time_sum = (t2 - t1).total_seconds()
translation_result_list[i].execution_time = time_sum
with open(translation_result_list[i].file_name, "w") as file_b:
for s in translation_result_list[i].token_list:
file_b.write(s)
file_b.write("\n")
ref_file_path = "./.ref_file.txt"
os.system("head -n %d %s > %s" % (len(translation_result_list[i].token_list), tgt_file, ref_file_path))
translation_result_list[i].bleu_score = bleu_score(translation_result_list[i].file_name, ref_file_path)
os.system("rm {}".format(ref_file_path))
for t in translation_result_list:
if t.name == "tf-decoding-sampling-for-warmup":
continue
print("[INFO] {} translates {} batches taking {:.2f} sec to translate {} tokens, BLEU score: {:.2f}, {:.0f} tokens/sec.".format(
t.name, t.batch_num, t.execution_time, t.bleu_score.sys_len, t.bleu_score.score, t.bleu_score.sys_len / t.execution_time))
return translation_result_list
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=200, metavar='NUMBER',
help='max sequence length (default: 200)')
parser.add_argument("--source", default="../examples/tensorflow/decoding/utils/translation/test.en",
help="Path to the source file.")
parser.add_argument("--target", default="../examples/tensorflow/decoding/utils/translation/test.de",
help="Path to the target file.")
parser.add_argument("--source_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the source vocabulary.")
parser.add_argument("--target_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the target vocabulary.")
parser.add_argument("--model_dir", default="../translation/ckpt",
help="Directory where checkpoint are written.")
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test tf_decoding_beamsearch
'1': test op_decoder_beamsearch
'2': test op_decoding_beamsearch
'3': test tf_decoding_sampling
'4': test op_decoder_sampling
'5': test op_decoding_sampling
'e.g., if you want to test op_decoder_beamsearch and op_decoding_sampling,
then you need to use -time '15' ''')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-max_ite', '--max_iteration', type=int, default=100000, metavar='NUMBER',
help='Maximum iteraiton for translation, default is 100000 (as large as possible to run all test set).')
args = parser.parse_args()
translate(vars(args))
# example script
# python ../examples/tensorflow/decoding/translate_example.py --source ../examples/tensorflow/decoding/utils/translation/test.en --target ../examples/tensorflow/decoding/utils/translation/test.de --source_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --target_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --model_dir ../translation/ckpt/ -time 02
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/tensorflow/decoding/translate_example.py |
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is a sample code to demonstrate how to use the TensorFlow custom op
with FasterTransformer library in decoding.
This sample code builds a decoding model by TensorFlow and TensorFlow custom
op. Compare 1. the results of TensorFlow decoding with beam search and
the results FasterTransformer decoding with beam search; and 2. the results
of TensorFlow decoding with sampling and the results FasterTransformer decoding
with sampling.
Users are also able to use this sample code to test the average forward time of
TensorFlow and FasterTransformer.
"""
import copy
import numpy as np
import argparse
import tensorflow as tf
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.decoder.utils.common import time_test
from examples.tensorflow.decoder.utils.common import DecodingBeamsearchArgument
from examples.tensorflow.decoder.utils.common import DecodingSamplingArgument
from examples.tensorflow.decoder.utils.common import DecodingArgumentNew
from examples.tensorflow.decoder.utils.common import TransformerArgument
from examples.tensorflow.decoder.utils.common import int_result_cross_check
from examples.tensorflow.decoder.utils.decoding import tf_beamsearch_decoding
from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding
from examples.tensorflow.decoder.utils.decoding import generate_encoder_result
from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=30, metavar='NUMBER',
help='max sequence length (default: 30)')
parser.add_argument('-n', '--head_number', type=int, default=8, metavar='NUMBER',
help='head number (default: 8)')
parser.add_argument('-size', '--size_per_head', type=int, default=64, metavar='NUMBER',
help='size per head (default: 64)')
parser.add_argument('-inter_size', '--inter_size', type=int, default=0, metavar='NUMBER',
help='inter_size (default: 0)')
parser.add_argument('-l', '--num_layer', type=int, default=6, metavar='NUMBER',
help='number of layers (default: 6)')
parser.add_argument('-mem_hidden', '--memory_hidden_dim', type=int, default=768, metavar='NUMBER',
help='memory hidden dim (default: 768)')
parser.add_argument('-v', '--vocab_size', type=int, default=30000, metavar='BOOL',
help='vocabulary size. (default: 30000).')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16', 'bf16'])
parser.add_argument('-x', '--use_XLA', type=int, default=0, metavar='BOOL',
help='use XLA (default: False 0)', choices=[0, 1])
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test tf_decoding_beamsearch
'1': test op_decoding_beamsearch
'2': test tf_decoding_sampling
'3': test op_decoding_sampling
'e.g., if you want to test tf_decoding_beamsearch and op_decoding_sampling,
then you need to use -time '02' ''')
parser.add_argument('-check', '--cross_check', type=int, default=1, metavar='BOOL',
help='cross check the answer of TF and OP. (default: True (1)), False is 0.',
choices=[0, 1])
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beam search.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
start_of_sentence_id = 1
end_of_sentence_id = 2
np.random.seed(1)
tf.set_random_seed(1)
kernel_initializer_range = 0.02
bias_initializer_range = 0.02
batch_size = args.batch_size
beam_width = args.beam_width
max_seq_len = args.max_seq_len
head_num = args.head_number
size_per_head = args.size_per_head
inter_size = args.inter_size
if inter_size == 0:
inter_size = head_num * size_per_head * 4
num_layer = args.num_layer
vocab_size = args.vocab_size
tf_datatype = tf.float32
np_datatype = np.float32
if args.data_type == "fp16":
tf_datatype = tf.float16
np_datatype = np.float16
elif args.data_type == 'bf16':
tf_datatype = tf.bfloat16
use_XLA = args.use_XLA
beam_search_diversity_rate = args.beam_search_diversity_rate
sampling_topk = args.sampling_topk
sampling_topp = args.sampling_topp
hidden_dim = head_num * size_per_head
memory_hidden_dim = args.memory_hidden_dim
decoder_args = TransformerArgument(beam_width=beam_width,
head_num=head_num,
size_per_head=size_per_head,
inter_size=inter_size,
num_layer=num_layer,
dtype=tf_datatype,
kernel_init_range=kernel_initializer_range,
bias_init_range=bias_initializer_range)
ft_decoding_beamsearch_args = DecodingArgumentNew(vocab_size,
start_of_sentence_id,
end_of_sentence_id,
max_seq_len,
beam_search_diversity_rate,
0,
0.0,
decoder_args)
decoding_args = DecodingBeamsearchArgument(vocab_size,
start_of_sentence_id,
end_of_sentence_id,
max_seq_len,
decoder_args,
beam_search_diversity_rate)
decoder_args_2 = copy.deepcopy(decoder_args) # for beam search
decoder_args_2.__dict__ = copy.deepcopy(decoder_args.__dict__)
decoder_args_2.beam_width = 1 # for sampling
decoding_sampling_args = DecodingSamplingArgument(vocab_size,
start_of_sentence_id,
end_of_sentence_id,
max_seq_len,
decoder_args_2,
sampling_topk,
sampling_topp)
ft_decoding_sampling_args = DecodingArgumentNew(vocab_size,
start_of_sentence_id,
end_of_sentence_id,
max_seq_len,
0.0,
sampling_topk,
sampling_topp,
decoder_args_2)
embedding_table = np.random.rand(vocab_size, hidden_dim).astype(
np_datatype) # a [vocab_size, hidden_dim] table
embedding_table = tf.convert_to_tensor(embedding_table)
memory, memory_sequence_length = generate_encoder_result(
batch_size, max_seq_len, memory_hidden_dim, tf_datatype)
finalized_tf_output_ids, finalized_tf_sequence_lengths, tf_output_ids, \
tf_parent_ids, tf_sequence_lengths = tf_beamsearch_decoding(memory,
memory_sequence_length,
embedding_table,
decoding_args,
decoder_type=0)
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
tf_sampling_target_ids, tf_sampling_target_length = tf_sampling_decoding(memory,
memory_sequence_length,
embedding_table,
decoding_sampling_args,
decoder_type=0)
finalized_op_output_ids, finalized_op_sequence_lengths, cum_log_probs, \
_, _ = ft_decoding(memory,
memory_sequence_length,
embedding_table,
all_vars,
ft_decoding_beamsearch_args)
op_sampling_target_ids, op_sampling_target_length, op_sampling_cum_log_probs, _, _ = ft_decoding(memory,
memory_sequence_length,
embedding_table,
all_vars,
ft_decoding_sampling_args)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
if use_XLA == 1:
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
if args.cross_check == 1:
finalized_op_output_ids_result, finalized_op_sequence_lengths_result = sess.run(
[finalized_op_output_ids, finalized_op_sequence_lengths])
finalized_tf_output_ids_result, tf_output_ids_result, tf_parent_ids_result, \
tf_sequence_lengths_result = sess.run(
[finalized_tf_output_ids, tf_output_ids, tf_parent_ids, tf_sequence_lengths])
print("[INFO] BeamSearch cross check:")
int_result_cross_check("Finalized output ids", finalized_tf_output_ids_result.T,
finalized_op_output_ids_result.T,
shape=[batch_size, beam_width, max_seq_len])
int_result_cross_check("Sequence lengths", tf_sequence_lengths_result,
finalized_op_sequence_lengths_result, shape=[batch_size, beam_width, 1])
op_sampling_ids, op_sampling_length, op_sampling_cum_log_probs = sess.run([op_sampling_target_ids,
op_sampling_target_length,
op_sampling_cum_log_probs])
print("[INFO] Cumulative log probabilities:")
print(op_sampling_cum_log_probs)
tf_sampling_ids, tf_sampling_length = sess.run([tf_sampling_target_ids,
tf_sampling_target_length])
print("[INFO] Sampling cross check:")
int_result_cross_check("Output ids", tf_sampling_ids, op_sampling_ids,
shape=[batch_size, max_seq_len])
int_result_cross_check("Sequence length", tf_sampling_length, op_sampling_length,
shape=[batch_size])
time_args = args.test_time
test_lists = []
test_names = []
if time_args.find("0") != -1:
test_lists.append(finalized_tf_output_ids)
test_names.append("TF-decoding-beamsearch")
if time_args.find("1") != -1:
test_lists.append(finalized_op_output_ids)
test_names.append("FT-OP-decoding-beamsearch")
if time_args.find("2") != -1:
test_lists.append(tf_sampling_target_ids)
test_names.append("TF-decoding-sampling")
if time_args.find("3") != -1:
test_lists.append(op_sampling_target_ids)
test_names.append("FT-OP-decoding-sampling")
test_time_result = []
for op in test_lists:
test_time_result.append(time_test(sess, op, iterations=10, warmup=True))
for name, t_result in zip(test_names, test_time_result):
if name.find("beamsearch") != -1:
print("[INFO] batch_size {} beam_width {} head_num {} size_per_head {} seq_len {} " \
"decoder_layers {} vocab_size {} {}-time {:6.2f} ms.".format(batch_size, beam_width, head_num, size_per_head,
max_seq_len, num_layer, vocab_size, name, t_result))
elif name.find("sampling") != -1:
print("[INFO] batch_size {} topk {} topp {} head_num {} size_per_head {} seq_len {} " \
"decoder_layers {} vocab_size {} {}-time {:6.2f} ms.".format(batch_size, sampling_topk, sampling_topp, head_num, size_per_head,
max_seq_len, num_layer, vocab_size, name, t_result))
| FasterTransformer-main | examples/tensorflow/decoding/decoding_example.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
from sacrebleu import corpus_bleu
def bleu_score(pred_file, ref_file):
with tf.io.gfile.GFile(pred_file) as pred_stream, tf.io.gfile.GFile(ref_file) as ref_stream:
pred_stream_txt = pred_stream.readlines()
ref_stream_txt = ref_stream.readlines()
bleu = corpus_bleu(pred_stream_txt, [ref_stream_txt], force=True)
print(" bleu score: {:6.2f}".format(bleu.score))
print(" bleu counts: {}".format(bleu.counts))
print(" bleu totals: {}".format(bleu.totals))
print(" bleu precisions: {}".format(bleu.precisions))
print(" bleu sys_len: {}; ref_len: {}".format(bleu.sys_len, bleu.ref_len))
return bleu
if __name__ == "__main__":
if len(sys.argv) != 3:
print("[ERROR] bleu_score.py needs a result file and a solution file. \n e.g. python bleu_score.py f1.txt f2.txt")
sys.exit(0)
bleu_score(sys.argv[1], sys.argv[2])
| FasterTransformer-main | examples/tensorflow/decoding/utils/bleu_score.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import os
import pickle
import sys
from examples.tensorflow.decoder.utils.position import SinusoidalPositionEncoder
def finalize(beam_width, parent_ids, sequence_lengths, outputs, end_id, max_seq_len=None):
maximum_lengths = tf.reduce_max(tf.reshape(
sequence_lengths, [-1, beam_width]), axis=-1)
if max_seq_len != None:
array_shape = [max_seq_len, -1, beam_width]
else:
array_shape = [tf.reduce_max(maximum_lengths), -1, beam_width]
step_ids = tf.reshape(outputs, array_shape)
parent_ids = tf.reshape(parent_ids, array_shape)
ids = tf.contrib.seq2seq.gather_tree(
step_ids, parent_ids, maximum_lengths, end_id)
ids = tf.transpose(ids, perm=[1, 2, 0])
lengths = tf.not_equal(ids, end_id)
lengths = tf.cast(lengths, tf.int32)
lengths = tf.reduce_sum(lengths, axis=-1)
return ids, lengths
def ft_decoding(memory_tensor,
memory_sequence_length,
embedding_table,
decoding_vars,
decoding_args,
using_model_var=True,
checkpoint_filename=None):
'''
Run the decoding with beam search by TensorFlow.
Args:
memory_tensor: A tf.tensor with shape [batch_size * beam_width, max(memory_sequence_length), encoder_hidden_dimension].
The results of encoder transformer layer. The rank must be 3.
Note that it must be extended by beam_width times.
memory_sequence_length: A tf.Tensor with shape [batch_size * beam_width], type tf.int.
The length of each sentence of results of encoder.
Note that it must be extended by beam_width times.
embedding_table: A tf.Tensor with shape [vocab_size, hidden_dimension].
The embedding table of embedding lookup for each step.
decoder_vars: A list of tf.Tensor. The variables for decoding. A list of model variables of TensorFlow model.
decoder_args: The arguments for decoding. The details are in the class "DecodingBeamsearchArgument" of common.py
using_model_var: A bool value. Using the model variables of TensorFlow or not.
The details are described in 'preprocess_decoder_var' function in the following.
checkpoint_filename: A string. The checkpoint file name of storing the values of model.
The details are described in 'preprocess_decoder_var' function in the following.
Outputs:
finalized_output_ids: A tf.Tensor with shape [batch_size, beam_width, max(sequence_lengths)], with tf.int type.
Finalized output_ids by beam search algorithm and parent_ids.
finalized_sequence_lengths: A tf.Tensor with shape [batch_size * beam_width], with int type.
Finalized sequence_lengths by beam search algorithm and parent_ids.
output_ids: A tf.Tensor with shape [batch_size, beam_width, max(sequence_lengths)], with tf.int type.
The results of decoding. It contains the id of token of vocabulary.
parent_ids: A tf.Tensor with shape [batch_size, beam_width, max(sequence_lengths)], with tf.int type.
The beam index of output ids for each step.
sequence_lengths: A tf.Tensor with shape [batch_size * beam_width], with int type.
'''
decoder_args = decoding_args.decoder_args
decoding_op_module = tf.load_op_library(os.path.join('./lib/libtf_decoding.so'))
extended_memory = tf.contrib.seq2seq.tile_batch(
memory_tensor, multiplier=decoder_args.beam_width)
extended_memory_sequence_length = tf.contrib.seq2seq.tile_batch(
memory_sequence_length, multiplier=decoder_args.beam_width)
position_encoder = SinusoidalPositionEncoder()
position_encoding_table = position_encoder._create_position_encoding_table(
decoding_args.max_seq_len, decoder_args.head_num * decoder_args.size_per_head, decoder_args.dtype)
# shape of position_encoding_table: [max_seq_len, hidden_dim]
cross_key_kernel_list = []
cross_value_kernel_list = []
cross_key_bias_list = []
cross_value_bias_list = []
var_dict = {}
for v in decoding_vars:
var_dict[v.name] = v
for l in range(decoder_args.num_layer):
layer_prefix_name = "transformer/decoder/layer_%d/" % l
cross_key_kernel, cross_value_kernel = tf.split(var_dict[layer_prefix_name + 'multi_head/conv1d_1/kernel:0'], 2, axis=-1)
cross_key_bias, cross_value_bias = tf.split(var_dict[layer_prefix_name + 'multi_head/conv1d_1/bias:0'], 2, axis=-1)
cross_key_kernel_list.append(cross_key_kernel)
cross_value_kernel_list.append(cross_value_kernel)
cross_key_bias_list.append(cross_key_bias)
cross_value_bias_list.append(cross_value_bias)
output_ids, parent_ids, sequence_lengths, cum_log_probs = decoding_op_module.decoding(
extended_memory, # 1
extended_memory_sequence_length, # 2
[var_dict["transformer/decoder/layer_%d/masked_multi_head/LayerNorm/beta:0" % l] for l in range(decoder_args.num_layer)], # 7
[var_dict["transformer/decoder/layer_%d/masked_multi_head/LayerNorm/gamma:0" % l] for l in range(decoder_args.num_layer)], # 8
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d/kernel:0" % l] for l in range(decoder_args.num_layer)], # 9
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d/bias:0" % l] for l in range(decoder_args.num_layer)], # 10
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d_1/kernel:0" % l] for l in range(decoder_args.num_layer)], # 11
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d_1/bias:0" % l] for l in range(decoder_args.num_layer)], # 12
[var_dict["transformer/decoder/layer_%d/multi_head/LayerNorm/beta:0" % l] for l in range(decoder_args.num_layer)], # 13
[var_dict["transformer/decoder/layer_%d/multi_head/LayerNorm/gamma:0" % l] for l in range(decoder_args.num_layer)], # 14
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d/kernel:0" % l] for l in range(decoder_args.num_layer)], # 15
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d/bias:0" % l] for l in range(decoder_args.num_layer)], # 16
cross_key_kernel_list, # 17
cross_key_bias_list, # 18
cross_value_kernel_list, # 19
cross_value_bias_list, # 20
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d_2/kernel:0" % l] for l in range(decoder_args.num_layer)], # 21
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d_2/bias:0" % l] for l in range(decoder_args.num_layer)], # 22
[var_dict["transformer/decoder/layer_%d/ffn/LayerNorm/beta:0" % l] for l in range(decoder_args.num_layer)], # 23
[var_dict["transformer/decoder/layer_%d/ffn/LayerNorm/gamma:0" % l] for l in range(decoder_args.num_layer)], # 24
[var_dict["transformer/decoder/layer_%d/ffn/conv1d/kernel:0" % l] for l in range(decoder_args.num_layer)], # 25
[var_dict["transformer/decoder/layer_%d/ffn/conv1d/bias:0" % l] for l in range(decoder_args.num_layer)], # 26
[var_dict["transformer/decoder/layer_%d/ffn/conv1d_1/kernel:0" % l] for l in range(decoder_args.num_layer)], # 27
[var_dict["transformer/decoder/layer_%d/ffn/conv1d_1/bias:0" % l] for l in range(decoder_args.num_layer)], # 28
var_dict['transformer/decoder/LayerNorm/beta:0'], # 28
var_dict['transformer/decoder/LayerNorm/gamma:0'], # 29
position_encoding_table, # 33
embedding_table, # 30
var_dict['transformer/decoder/dense/kernel:0'], # 31
var_dict['transformer/decoder/dense/bias:0'], # 32
max_seq_len=decoding_args.max_seq_len,
beam_width=decoder_args.beam_width,
head_num=decoder_args.head_num,
size_per_head=decoder_args.size_per_head,
inter_size=decoder_args.inter_size,
num_layer=decoder_args.num_layer,
start_id=decoding_args.start_id,
end_id=decoding_args.end_id,
beam_search_diversity_rate=decoding_args.beam_search_diversity_rate,
top_k=decoding_args.top_k,
top_p=decoding_args.top_p,
temperature=1.0,
len_penalty=0.0,
repetition_penalty=1.0,
return_cum_log_probs=True
)
if decoder_args.beam_width > 1:
output_ids = tf.transpose(output_ids, [1, 2, 0])
# TODO(bhsueh) Remove useless outputs
return output_ids, sequence_lengths, cum_log_probs, None, None
else:
output_ids = tf.transpose(output_ids, [1, 0])
return output_ids, sequence_lengths, cum_log_probs, None, None | FasterTransformer-main | examples/tensorflow/decoding/utils/ft_decoding.py |
# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is a sample code to demonstrate how to use the TensorFlow custom op with
FasterTransformer library in encoder.
This sample code builds a BERT transformer model by TensorFlow and TensorFlow
custom op. Then compare the maximum difference of them to verify the correctness
of FasterTransformer.
Users are also able to use this sample code to test the average forward time of
TensorFlow and FasterTransformer.
'''
import argparse
import copy
import numpy as np
import tensorflow as tf
import threading
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.bert.utils.common import TransformerArgument
from examples.tensorflow.bert.utils.common import time_test
from examples.tensorflow.bert.utils.common import cross_check
from examples.tensorflow.bert.utils.bert import tf_bert
from examples.tensorflow.bert.utils.bert import ft_bert
from examples.tensorflow.bert.utils.bert import build_sequence_mask
def bert_example(args_dict):
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
np.random.seed(1)
tf.set_random_seed(1)
batch_size = args_dict['batch_size']
num_layer = args_dict['num_layer']
max_seq_len = args_dict['max_seq_len']
avg_seq_len = args_dict['avg_seq_len']
head_num = args_dict['head_number']
size_per_head = args_dict['size_per_head']
inter_size = args_dict['inter_size']
if inter_size == 0:
inter_size = head_num * size_per_head * 4
tf_datatype = tf.float32
np_datatype = np.float32
atol_threshold = 3e-5
int8_mode = args_dict['int8_mode']
if args_dict['data_type'] == "fp16":
tf_datatype = tf.float16
np_datatype = np.float16
atol_threshold = 3e-2
hidden_dim = head_num * size_per_head
sequence_length = np.random.randint(1, max_seq_len + 1, size=batch_size)
if avg_seq_len != -1:
# This means we use "remove_padding" and set other average sequence length
sequence_length = np.ones(batch_size) * avg_seq_len
else:
sequence_length = np.ones(batch_size) * (max_seq_len / 2)
sequence_length = sequence_length.astype(np.int32)
from_data = np.random.randn(batch_size, max_seq_len, hidden_dim)
from_tensor = tf.convert_to_tensor(from_data, dtype=tf_datatype)
attention_mask = build_sequence_mask(sequence_length, num_heads=head_num, maximum_length=max_seq_len, dtype=tf_datatype)
encoder_args = TransformerArgument(beam_width=1,
head_num=head_num,
size_per_head=size_per_head,
inter_size=inter_size,
num_layer=num_layer,
dtype=tf_datatype,
int8_mode=int8_mode,
remove_padding=False)
eff_encoder_args = copy.deepcopy(encoder_args)
eff_encoder_args.remove_padding = True
tf_encoder_result = tf_bert(input_tensor=from_tensor,
encoder_args=encoder_args,
attention_mask=attention_mask)
encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
encoder_variables_dict = {}
for v in encoder_vars:
encoder_variables_dict[v.name] = v
op_encoder_result = ft_bert(inputs=from_tensor,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
eff_encoder_result = ft_bert(inputs=from_tensor,
encoder_args=eff_encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
'''
Because FasterTransformer skip some computation for the padding parts,
if we do not mask these parts, the cross check result would be wrong.
'''
# Prevent nan since we will skip to write the data to some position, and these positions may be dirty.
eff_encoder_result = tf.where(tf.is_nan(eff_encoder_result), tf.zeros_like(eff_encoder_result), eff_encoder_result)
tf_encoder_result = tf_encoder_result * tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
op_encoder_result = op_encoder_result * tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
eff_encoder_result = eff_encoder_result * tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for idx, name in enumerate(encoder_variables_dict):
print((str(idx) + " " + str(name) + " " +
str(encoder_variables_dict[name].shape)) + " " + str(encoder_variables_dict[name].dtype))
print("#################################")
tf_encoder_result_val = sess.run(tf_encoder_result)
op_encoder_result_val = sess.run(op_encoder_result)
eff_encoder_result_val = sess.run(eff_encoder_result)
cross_check("Encoder TF v.s. FT with tensor input", tf_encoder_result_val, op_encoder_result_val, atol_threshold)
cross_check("Encoder TF v.s. EFF-FT with tensor input", tf_encoder_result_val, eff_encoder_result_val, atol_threshold)
op_diff = abs(tf_encoder_result_val.reshape([-1]) - op_encoder_result_val.reshape([-1]))
eff_diff = abs(tf_encoder_result_val.reshape([-1]) - eff_encoder_result_val.reshape([-1]))
max_diff = max(op_diff.max(), eff_diff.max())
max_diff = op_diff.max()
ite = 50
def _cond(from_tensor):
return tf.constant(True)
def _ft_body(from_tensor):
op_encoder_result = ft_bert(inputs=from_tensor,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
return op_encoder_result
def _eff_body(from_tensor):
eff_encoder_result = ft_bert(inputs=from_tensor,
encoder_args=eff_encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=sequence_length)
return eff_encoder_result
def _tf_body(from_tensor):
tf_encoder_result = tf_bert(input_tensor=from_tensor,
encoder_args=encoder_args,
attention_mask=attention_mask)
return tf_encoder_result
tf_while_tensor = tf.while_loop(_cond,
_tf_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
ft_while_tensor = tf.while_loop(_cond,
_ft_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
eff_while_tensor = tf.while_loop(_cond,
_eff_body,
loop_vars=[from_tensor],
back_prop=False,
maximum_iterations=ite)
if args_dict['test_time'] == 1:
# Using while loop to run 'ite' times to ignore the overheads of memory copy and model preprocess.
# We use these times as the profiling results.
tf_while_time = time_test(sess, tf_while_tensor, 1) / ite # while_loop has run ite times
# time.sleep(60)
ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
# time.sleep(60)
eff_while_time = time_test(sess, eff_while_tensor, 1) / ite # while_loop has run ite times
# time.sleep(60)
ft_type = args_dict['data_type'].upper()
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer TF-while-time {:6.2f} ms ( {} iterations)".format(batch_size, max_seq_len, args_dict['data_type'].upper(), num_layer, tf_while_time, ite))
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer FT-OP-while-time {:6.2f} ms ( {} iterations)".format(batch_size, max_seq_len, ft_type, num_layer, ft_while_time, ite))
print("[INFO] batch_size {} max_seq_len {} precision {} {} layer EFF-OP-while-time {:6.2f} ms ( {} iterations)".format(batch_size, max_seq_len, ft_type, num_layer, eff_while_time, ite))
if args_dict['thread_num'] > 1:
# Multi-threading demonstration
thread_list = []
thread_num = args_dict['thread_num']
def run():
ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
print("[INFO] batch_size {} max_seq_len {} {} layer FT-OP-while-time {:6.2f} ms with {} threads".format(batch_size,
max_seq_len, num_layer, ft_while_time, thread_num))
for i in range(thread_num):
thread_list.append(threading.Thread(target=run, name="RunFT"))
for t in thread_list:
t.start()
for t in thread_list:
t.join()
sys.stdout.flush()
return max_diff
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-batch', '--batch_size', type=int, default=4, metavar='NUMBER',
help='batch size (default: 4)')
parser.add_argument('-l', '--num_layer', type=int, default=12, metavar='NUMBER',
help='number of layers (default: 12)')
parser.add_argument('-s', '--max_seq_len', type=int, default=32, metavar='NUMBER',
help='max sequence length (default: 32)')
parser.add_argument('-n', '--head_number', type=int, default=12, metavar='NUMBER',
help='head number (default: 12)')
parser.add_argument('-size', '--size_per_head', type=int, default=64, metavar='NUMBER',
help='size per head (default: 64)')
parser.add_argument('-inter_size', '--inter_size', type=int, default=0, metavar='NUMBER',
help='inter_size (default: 0)')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-time', '--test_time', type=int, default=0, metavar='BOOL',
help='test the time or not. (default: False (0)), True is 1.',
choices=[0, 1])
parser.add_argument('-int8', '--int8_mode', type=int, default=0, metavar='NUMBER',
help='int8 mode. (default: 0)',
choices=[0, 1, 2, 3])
parser.add_argument('-avg_seq', '--avg_seq_len', type=int, default=-1, metavar='NUMBER',
help='average sequence length (default: -1)')
parser.add_argument('-thread_num', '--thread_num', type=int, default=1, metavar='int',
help='Testing multithread if thread_num > 1.')
args = parser.parse_args()
bert_example(vars(args)) | FasterTransformer-main | examples/tensorflow/bert/bert_example.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| FasterTransformer-main | examples/tensorflow/bert/utils/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import tensorflow as tf
def pad_in_time(x, padding_length):
"""Helper function to pad a tensor in the time dimension and retain the static depth dimension."""
return tf.pad(x, [[0, 0], [0, padding_length], [0, 0]])
def align_in_time(x, length):
"""Aligns the time dimension of :obj:`x` with :obj:`length`."""
time_dim = tf.shape(x)[1]
return tf.cond(
tf.less(time_dim, length),
true_fn=lambda: pad_in_time(x, length - time_dim),
false_fn=lambda: x[:, :length])
def pad_with_identity(x, sequence_length, max_sequence_length, identity_values=0, maxlen=None):
"""Pads a tensor with identity values up to :obj:`max_sequence_length`.
Args:
x: A ``tf.Tensor`` of shape ``[batch_size, time, depth]``.
sequence_length: The true sequence length of :obj:`x`.
max_sequence_length: The sequence length up to which the tensor must contain
:obj:`identity values`.
identity_values: The identity value.
maxlen: Size of the output time dimension. Default is the maximum value in
obj:`max_sequence_length`.
Returns:
A ``tf.Tensor`` of shape ``[batch_size, maxlen, depth]``.
"""
if maxlen is None:
maxlen = tf.reduce_max(max_sequence_length)
mask = tf.sequence_mask(sequence_length, maxlen=maxlen, dtype=x.dtype)
mask = tf.expand_dims(mask, axis=-1)
mask_combined = tf.sequence_mask(
max_sequence_length, maxlen=maxlen, dtype=x.dtype)
mask_combined = tf.expand_dims(mask_combined, axis=-1)
identity_mask = mask_combined * (1.0 - mask)
x = pad_in_time(x, maxlen - tf.shape(x)[1])
x = x * mask + (identity_mask * identity_values)
return x
def pad_n_with_identity(inputs, sequence_lengths, identity_values=0):
"""Pads each input tensors with identity values up to
``max(sequence_lengths)`` for each batch.
Args:
inputs: A list of ``tf.Tensor``.
sequence_lengths: A list of sequence length.
identity_values: The identity value.
Returns:
A tuple ``(padded, max_sequence_length)`` which are respectively a list of
``tf.Tensor`` where each tensor are padded with identity and the combined
sequence length.
"""
max_sequence_length = tf.reduce_max(sequence_lengths, axis=0)
maxlen = tf.reduce_max([tf.shape(x)[1] for x in inputs])
padded = [
pad_with_identity(
x, length, max_sequence_length, identity_values=identity_values, maxlen=maxlen)
for x, length in zip(inputs, sequence_lengths)]
return padded, max_sequence_length
class Reducer():
"""Base class for reducers."""
def zip_and_reduce(self, x, y):
"""Zips the :obj:`x` with :obj:`y` structures together and reduces all
elements. If the structures are nested, they will be flattened first.
Args:
x: The first structure.
y: The second structure.
Returns:
The same structure as :obj:`x` and :obj:`y` where each element from
:obj:`x` is reduced with the correspond element from :obj:`y`.
Raises:
ValueError: if the two structures are not the same.
"""
tf.nest.assert_same_structure(x, y)
x_flat = tf.nest.flatten(x)
y_flat = tf.nest.flatten(y)
reduced = list(map(self, zip(x_flat, y_flat)))
return tf.nest.pack_sequence_as(x, reduced)
def __call__(self, inputs, sequence_length=None):
"""Reduces all input elements.
Args:
inputs: A list of ``tf.Tensor``.
sequence_length: The length of each input, if reducing sequences.
Returns:
If :obj:`sequence_length` is set, a tuple
``(reduced_input, reduced_length)``, otherwise a reduced ``tf.Tensor``
only.
"""
if sequence_length is None:
return self.reduce(inputs)
else:
return self.reduce_sequence(inputs, sequence_lengths=sequence_length)
@abc.abstractmethod
def reduce(self, inputs):
"""See :meth:`opennmt.layers.Reducer.__call__`."""
raise NotImplementedError()
@abc.abstractmethod
def reduce_sequence(self, inputs, sequence_lengths):
"""See :meth:`opennmt.layers.Reducer.__call__`."""
raise NotImplementedError()
class SumReducer(Reducer):
"""A reducer that sums the inputs."""
def reduce(self, inputs):
return tf.add_n(inputs)
def reduce_sequence(self, inputs, sequence_lengths):
padded, combined_length = pad_n_with_identity(
inputs, sequence_lengths, identity_values=0)
return self.reduce(padded), combined_length
| FasterTransformer-main | examples/tensorflow/bert/utils/reducer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import tensorflow as tf
import numpy as np
import ctypes
# from utils.beam_search import BeamSearch
# from utils.beam_search import DiverseSiblingSearch
class TransformerArgument:
def __init__( self,
beam_width,
head_num,
size_per_head,
inter_size,
num_layer,
dtype=tf.float32,
kernel_init_range=0.02,
bias_init_range=0.02,
fuse_qkv=True,
remove_padding=False,
int8_mode=0,
memory_hidden_dim=-1):
'''
The arguments of Transformer layer (for both encoder and decoder).
Args:
beam_width: The beam_width size for beam search. This argument is always one for encoder.
head_num: The head number of self attention in transformer layer.
size_per_head: The size of hidden dimension for each head of self attention in transformer layer.
inter_size: The size of intermediate dimension of FFN layer.
num_layer: The number of transformer layer. For example, BERT-base uses 12 layers.
dtype: The data type of weights initializer and inputs.
kernel_init_range: The initializer range of kernel for all convolution layer and fully-connected layer.
kernel_init_range: The initializer range of bias for all convolution layer and fully-connected layer.
fuse_qkv: bool. Whether fuse the q, k, v gemm or not.
remove_padding: bool. Remove the padding of sentences of encoder.
int8_mode: Mode of int8 quantization. 0 means not using int8 quantization, 1 means using int8 quantization without quantizing residuals,
2 means using int8 quantization with quantizing residuals.
'''
self.beam_width = beam_width
self.head_num = head_num
self.size_per_head = size_per_head
self.inter_size = inter_size
self.num_layer = num_layer
self.dtype = dtype
self.hidden_dim = self.head_num * self.size_per_head
self.kernel_init_range = kernel_init_range
self.bias_init_range = bias_init_range
self.int8_mode = int8_mode
if self.dtype == tf.float32:
self.check_threshold = 2e-5
elif self.dtype == tf.float16:
self.check_threshold = 2e-2
self.fuse_qkv = fuse_qkv
self.remove_padding = remove_padding
self.memory_hidden_dim = memory_hidden_dim
def create_initializer(initializer_range=0.02, data_type=tf.float32):
return tf.truncated_normal_initializer(stddev=initializer_range, dtype=data_type)
def time_test(sess, tensor, iterations=100, warmup=True):
# return in ms
# warmup
if warmup == True:
for i in range(iterations):
sess.run(tensor)
t1 = datetime.now()
for i in range(iterations):
sess.run(tensor)
t2 = datetime.now()
time_sum = (t2 - t1).total_seconds()
return time_sum * 1000 / iterations
def cross_check(name, tf_val, op_val, atol_threshold):
abs_diff = np.fabs(tf_val - op_val)
print("[INFO] {} Cross check {}".format(name, np.allclose(tf_val, op_val, atol=atol_threshold)))
print("[INFO] Max diff {}".format(abs_diff.max()))
print("[INFO] min diff {}".format(abs_diff.min()))
def int_result_cross_check(name, tf_result, op_result, shape):
print(" ")
is_same = (tf_result.flatten() == op_result.flatten()).all()
print(" {} cross-check: {}".format(name, is_same))
if is_same == False:
tf_reshaped_result = np.reshape(tf_result, shape)
op_reshaped_result = np.reshape(op_result, shape)
for i in range(tf_reshaped_result.shape[0]):
is_true = (tf_reshaped_result[i] == op_reshaped_result[i]).all()
print(" Cross-Check on batch-{} {}".format(i, is_true))
if is_true == False:
print("TF result: {}".format(tf_reshaped_result[i]))
print("OP result: {}".format(op_reshaped_result[i]))
class cudaProfiler:
def __init__(self):
self.profiler = ctypes.CDLL("libcudart.so")
def start(self):
ret = self.profiler.cudaProfilerStart()
if ret != 0:
raise Exception("cudaProfilerStart() return %d " %ret)
def stop(self):
ret = self.profiler.cudaProfilerStop()
if ret != 0:
raise Exception("cudaProfilerStop() return %d " %ret)
| FasterTransformer-main | examples/tensorflow/bert/utils/common.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import abc
import tensorflow as tf
from utils.reducer import SumReducer
class PositionEncoder(tf.keras.layers.Layer):
"""Base class for position encoders."""
def __init__(self, reducer=SumReducer(), **kwargs):
"""Initializes the position encoder.
Args:
reducer: A :class:`opennmt.layers.Reducer` to merge inputs and position
encodings.
**kwargs: Additional layer keyword arguments.
"""
# super(PositionEncoder, self).__init__(**kwargs)
super(PositionEncoder, self).__init__(**kwargs)
self.reducer = reducer
def call(self, inputs, position=None): # pylint: disable=arguments-differ
"""Add position encodings to :obj:`inputs`.
Args:
inputs: The inputs to encode.
position: The single position to encode, to use when this layer is called
step by step.
Returns:
A ``tf.Tensor`` whose shape depends on the configured ``reducer``.
"""
batch_size = tf.shape(inputs)[0]
timesteps = tf.shape(inputs)[1]
input_dim = inputs.get_shape().as_list()[-1] # return int
positions = tf.range(timesteps) + 1 if position is None else position
position_encoding = self._encode([positions], input_dim, dtype=inputs.dtype)
position_encoding = tf.tile(position_encoding, [batch_size, 1, 1])
return self.reducer([inputs, position_encoding])
@abc.abstractmethod
def _encode(self, positions, depth, dtype):
"""Creates position encodings.
Args:
positions: The positions to encode of shape :math:`[B, ...]`.
depth: The encoding depth :math:`D`.
Returns:
A ``tf.Tensor`` of shape :math:`[B, ..., D]`.
"""
raise NotImplementedError()
def _create_position_encoding_table(self, max_seq_len, input_dim, dtype):
positions = tf.range(max_seq_len) + 1
self.position_encoding_table = self._encode([positions], input_dim, dtype=dtype)
self.position_encoding_table = tf.squeeze(self.position_encoding_table)
return self.position_encoding_table
class SinusoidalPositionEncoder(PositionEncoder):
"""Encodes positions with sine waves as described in
https://arxiv.org/abs/1706.03762.
"""
def _encode(self, positions, depth, dtype):
if depth % 2 != 0:
raise ValueError("SinusoidalPositionEncoder expects the depth to be divisble "
"by 2 but got %d" % depth)
batch_size = tf.shape(positions)[0]
positions = tf.cast(positions, tf.float32)
log_timescale_increment = math.log(10000) / (depth / 2 - 1)
inv_timescales = tf.exp(
tf.cast(tf.range(depth / 2), dtype=tf.float32) * -log_timescale_increment)
inv_timescales = tf.reshape(
tf.tile(inv_timescales, [batch_size]), [batch_size, -1])
scaled_time = tf.expand_dims(
positions, -1) * tf.expand_dims(inv_timescales, 1)
encoding = tf.concat(
[tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
return tf.cast(encoding, dtype)
| FasterTransformer-main | examples/tensorflow/bert/utils/position.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import math
import six
import os
from examples.tensorflow.bert.utils.common import create_initializer
ACTIVATION_AMAX_NUM = 72
INT8O_GEMM_NUM = 8
TRT_AMAX_NUM = 3
SCALE_RESERVE_NUM = 21
def gelu(x):
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def layer_norm(input_tensor, name=None):
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None,
tf_datatype=tf.float32):
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
use_bias=True,
bias_initializer=create_initializer(initializer_range, tf_datatype),
kernel_initializer=create_initializer(initializer_range, tf_datatype))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
use_bias=True,
bias_initializer=create_initializer(initializer_range, tf_datatype),
kernel_initializer=create_initializer(initializer_range, tf_datatype))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
use_bias=True,
bias_initializer=create_initializer(initializer_range, tf_datatype),
kernel_initializer=create_initializer(initializer_range, tf_datatype))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
if tf.rank(attention_mask) == 3:
attention_mask = tf.expand_dims(attention_mask, axis=[1])
adder = (1.0 - tf.cast(attention_mask, tf_datatype)) * -10000.0
attention_scores += adder
attention_probs = tf.nn.softmax(attention_scores)
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def tf_bert(input_tensor,
encoder_args,
attention_mask=None,
intermediate_act_fn=gelu,
initializer_range=0.02):
'''
Run the bert transformer layer by TensorFlow.
Args:
inputs: A tf.Tensor with shape [batch_size, seq_len, hidden_dimension].
The inputs tensor of encoder. The rank must be 3.
encoder_args: The arguments for encoder. The details are in the class
"TransformerArgument" of common.py
attention_mask: A tf.Tensor. The attention mask for self attention.
intermediate_act_fn: A callable function.
The activation function in the FFN. It is gelu in BERT.
initializer_range: A float value.
The range of initializer for all weights.
Outputs:
outputs: A tf.Tensor with shape [batch_size, seq_len, hidden_dimension].
The results of encoder.
'''
if encoder_args.hidden_dim % encoder_args.head_num != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (encoder_args.hidden_dim, encoder_args.head_num))
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
prev_output = reshape_to_matrix(input_tensor)
for layer_idx in range(encoder_args.num_layer):
with tf.variable_scope("layer_%d" % layer_idx, reuse=tf.AUTO_REUSE):
layer_input = prev_output
with tf.variable_scope("attention"):
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=encoder_args.head_num,
size_per_head=encoder_args.size_per_head,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length,
tf_datatype=encoder_args.dtype)
attention_output = attention_head
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
encoder_args.hidden_dim,
use_bias=True,
bias_initializer=create_initializer(
initializer_range, encoder_args.dtype),
kernel_initializer=create_initializer(initializer_range, encoder_args.dtype))
attention_output = layer_norm(
attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
encoder_args.inter_size,
activation=intermediate_act_fn,
use_bias=True,
bias_initializer=create_initializer(
initializer_range, encoder_args.dtype),
kernel_initializer=create_initializer(initializer_range, encoder_args.dtype))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
encoder_args.hidden_dim,
use_bias=True,
bias_initializer=create_initializer(
initializer_range, encoder_args.dtype),
kernel_initializer=create_initializer(initializer_range, encoder_args.dtype))
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
# amaxList for int8 quantization
if encoder_args.int8_mode != 0:
amaxList = tf.get_variable(name="amaxList", shape=[ACTIVATION_AMAX_NUM + 9*encoder_args.hidden_dim + INT8O_GEMM_NUM + TRT_AMAX_NUM + SCALE_RESERVE_NUM], dtype=tf.float32)
prev_output = tf.reshape(prev_output, shape=tf.shape(input_tensor))
return prev_output
def build_sequence_mask(sequence_length,
num_heads=None,
maximum_length=None,
dtype=tf.float32):
"""Builds the dot product mask.
Args:
sequence_length: The sequence length.
num_heads: The number of heads.
maximum_length: Optional size of the returned time dimension. Otherwise
it is the maximum of :obj:`sequence_length`.
dtype: The type of the mask tensor.
Returns:
A broadcastable ``tf.Tensor`` of type :obj:`dtype` and shape
``[batch_size, 1, max_length, max_length]``.
"""
mask = tf.sequence_mask(sequence_length, maxlen=maximum_length, dtype=dtype) # [batch_size, maximum_length]
mask = tf.reshape(mask, [-1, 1, 1, maximum_length])
m_2 = tf.transpose(mask, [0, 1, 3, 2])
mask = mask * m_2
return mask
def get_shape_list(tensor, expected_rank=None, name=None):
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def ft_bert(inputs,
encoder_args,
encoder_vars_dict,
sequence_length):
'''
Run the bert transformer layer by FasterTransformer.
Args:
inputs: A tf.Tensor with shape [batch_size, seq_len, hidden_dimension].
The inputs tensor of encoder. The rank must be 3.
encoder_args: The arguments for encoder. The details are in the class "TransformerArgument" of common.py
attention_mask: A tf.Tensor. The attention mask for self attention.
encoder_vars_dict: A dict of tf.Tensor or numpy array.
The variables for encoder. They can be either some tensor or some numpy array.
The key is the name of the tensor, like 'layer_0/attention/self/query/kernel:0'.
The value is the corresponding tensor or numpy array
sequence_length: A tf.Tensor or numpy array with shape [batch_size].
The sequence length of the sentences
Outputs:
outputs: A tensor with shape [batch_size, seq_len, hidden_dimension].
The results of encoder.
'''
transformer_op_module = tf.load_op_library(os.path.join('./lib/libtf_bert.so'))
if encoder_args.int8_mode == 0:
outputs = transformer_op_module.bert(
inputs,
inputs,
sequence_length,
[encoder_vars_dict['layer_%d/attention/self/query/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/query/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/key/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/key/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/value/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/value/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/output/dense/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/output/dense/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/output/LayerNorm/beta:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/output/LayerNorm/gamma:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/intermediate/dense/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/intermediate/dense/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/output/dense/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/output/dense/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/output/LayerNorm/beta:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/output/LayerNorm/gamma:0' % id] for id in range(encoder_args.num_layer)],
head_num = encoder_args.head_num, size_per_head = encoder_args.size_per_head,
inter_size = encoder_args.inter_size,
num_layer = encoder_args.num_layer, remove_padding=encoder_args.remove_padding,
q_scaling = 1.0)
else:
outputs = transformer_op_module.bert_int8(
inputs,
inputs,
sequence_length,
[encoder_vars_dict['layer_%d/attention/self/query/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/query/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/key/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/key/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/value/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/self/value/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/output/dense/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/output/dense/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/output/LayerNorm/beta:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/attention/output/LayerNorm/gamma:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/intermediate/dense/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/intermediate/dense/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/output/dense/kernel:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/output/dense/bias:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/output/LayerNorm/beta:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/output/LayerNorm/gamma:0' % id] for id in range(encoder_args.num_layer)],
[encoder_vars_dict['layer_%d/amaxList:0' % id] for id in range(encoder_args.num_layer)],
head_num = encoder_args.head_num,
size_per_head = encoder_args.size_per_head,
inter_size = encoder_args.inter_size,
num_layer = encoder_args.num_layer,
int8_mode = encoder_args.int8_mode,
remove_padding=encoder_args.remove_padding,
q_scaling = 1.0)
return outputs
| FasterTransformer-main | examples/tensorflow/bert/utils/bert.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.python.client import device_lib
import time
import contextlib
from tensorflow.python.client import timeline
import os
import tensorflow as tf
class Profiler():
def __init__(self, profile_name_pref):
self.profile_name_pref = profile_name_pref
self.run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
self.run_metadata = tf.RunMetadata()
self.ctr = 0
self.time_avg = 0
@contextlib.contextmanager
def prof_run(self):
start = time.time()
yield
end = time.time()
self.time_avg = (self.time_avg * self.ctr + end - start)/(self.ctr + 1)
fetched_timeline = timeline.Timeline(self.run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
file_name = self.profile_name_pref + '_' + str(self.ctr) + '.json'
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as f:
f.write(chrome_trace)
self.ctr += 1
def run_profile(graph_fn, jit_xla, num_iter, profiler=None, init_checkpoint=None, check_result=True, dryrun_iter=1):
config = tf.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = jit_xla
fetches = graph_fn()
with tf.Session(config=config) as sess:
# init
if init_checkpoint is None:
sess.run(tf.global_variables_initializer())
else:
saver = tf.train.Saver()
saver.restore(sess, init_checkpoint)
# dry run
for _ in range(dryrun_iter):
sess.run(fetches)
res = []
if profiler is None:
start_time = time.time()
if check_result:
for _ in range(num_iter):
res.append(sess.run(fetches))
else:
for _ in range(num_iter):
sess.run(fetches)
end_time = time.time()
time_avg = (end_time - start_time)/num_iter
else:
if check_result:
for _ in range(num_iter):
with profiler.prof_run():
res.append(sess.run(fetches, options=profiler.run_options, run_metadata=profiler.run_metadata))
else:
for _ in range(num_iter):
with profiler.prof_run():
sess.run(fetches, options=profiler.run_options, run_metadata=profiler.run_metadata)
time_avg = profiler.time_avg
return time_avg, res
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/profile_util.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
# export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
# export GLUE_DIR=/path/to/glue
# python run_classifier_wrap.py --floatx=float16 --task_name=MRPC --do_eval=true --data_dir=$GLUE_DIR/MRPC --vocab_file=$BERT_BASE_DIR/vocab.txt --bert_config_file=$BERT_BASE_DIR/bert_config.json --init_checkpoint=mrpc_output/fp16_model.ckpt --max_seq_length=128 --eval_batch_size=8 --output_dir=mrpc_output
# FP32 Tensorflow Transformer MRPC result
# INFO:tensorflow: eval_accuracy = 0.877451
# INFO:tensorflow: eval_loss = 0.44744828
# INFO:tensorflow: global_step = 0
# INFO:tensorflow: loss = 0.44744828
# FP32 Faster Transformer MRPC result
# INFO:tensorflow: eval_accuracy = 0.877451
# INFO:tensorflow: eval_loss = 0.4474482
# INFO:tensorflow: global_step = 0
# INFO:tensorflow: loss = 0.4474482
# FP16 Tensorflow Transformer MRPC result
# INFO:tensorflow: eval_accuracy = 0.875
# INFO:tensorflow: eval_loss = 0.44760832
# INFO:tensorflow: global_step = 0
# INFO:tensorflow: loss = 0.44760215
# FP16 Faster Transformer MRPC result
# INFO:tensorflow: eval_accuracy = 0.875
# INFO:tensorflow: eval_loss = 0.44731623
# INFO:tensorflow: global_step = 0
# INFO:tensorflow: loss = 0.44728807
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
bert_submodule = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bert')
sys.path.insert(0, bert_submodule)
import tensorflow as tf
# import run_classifier as rc
import run_squad as rs
import fast_infer_util as fiu
import my_modeling
flags = tf.flags
FLAGS = flags.FLAGS
# replace transformer implementation
my_modeling.transformer_model = fiu.fast_transformer_model_trans
# replace the model to support fp16 data type
rs.create_model = fiu.create_model_squad
# replace the input function to drop remainder
rs.file_based_input_fn_builder = fiu.file_based_input_fn_builder_drop
def get_act_seq_len(examples, tokenizer, max_seq_length,
doc_stride, max_query_length):
act_seq_len = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = rs.collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = rs._check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
act_seq_len.append(len(input_ids))
return act_seq_len
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
bert_config = rs.modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
rs.validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = rs.tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = rs.read_squad_examples(
input_file=FLAGS.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(train_examples)
model_fn = rs.model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_writer = rs.FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "train.tf_record"),
is_training=True)
rs.convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature)
train_writer.close()
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", len(train_examples))
tf.logging.info(" Num split examples = %d", train_writer.num_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
del train_examples
train_input_fn = rs.input_fn_builder(
input_file=train_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_predict:
eval_examples = rs.read_squad_examples(
input_file=FLAGS.predict_file, is_training=False)
act_seq_len = get_act_seq_len(eval_examples, tokenizer, FLAGS.max_seq_length,
FLAGS.doc_stride, FLAGS.max_query_length)
eval_writer = rs.FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
rs.convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature)
eval_writer.close()
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
all_results = []
predict_input_fn = rs.input_fn_builder(
input_file=eval_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
# If running eval on the TPU, you will need to specify the number of
# steps.
all_results = []
for idx, result in enumerate(estimator.predict(
predict_input_fn, yield_single_examples=True)):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(
rs.RawResult(
unique_id=unique_id,
start_logits=start_logits[:act_seq_len[idx]],
end_logits=end_logits[:act_seq_len[idx]]))
output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json")
output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json")
rs.write_predictions(eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file)
if __name__ == "__main__":
# flags.mark_flag_as_required("data_dir")
# flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
flags.DEFINE_string("floatx", None, "float32 or float16")
flags.mark_flag_as_required("floatx")
flags.DEFINE_bool("remove_padding", False, "Remove padding or Not")
flags.DEFINE_integer("int8_mode", 0, "whether use int8 or not; and how to use int8")
tf.app.run()
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/run_squad_wrap.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
import tensorflow as tf
import os
import sys
from my_modeling import *
build_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../lib')
bert_op_module = tf.load_op_library(
os.path.join('./lib/libtf_bert.so'))
ACTIVATION_AMAX_NUM = 72
INT8O_GEMM_NUM = 8
TRT_AMAX_NUM = 3
SCALE_RESERVE_NUM = 21
def file_based_input_fn_builder_drop(input_file, seq_length, is_training,
drop_remainder):
""" Re-implementation of file_based_input_fn_builder function from modeling.py from Google's BERT repository https://github.com/google-research/bert
with drop_remainder=True.
"""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
# FASTINFER: drop remainder always
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=True))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
dtype=tf.flags.FLAGS.floatx,
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels],
dtype=tf.flags.FLAGS.floatx,
initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.flags.FLAGS.floatx)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def create_model_squad(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings):
"""Creates a classification model."""
model = BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
final_hidden = model.get_sequence_output()
final_hidden_shape = get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
dtype=tf.flags.FLAGS.floatx,
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2],
dtype=tf.flags.FLAGS.floatx,
initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def fast_transformer_model_trans(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False,
sequence_length=None):
""" Re-implementation of transformer_model function from modeling.py from Google's BERT repository https://github.com/google-research/bert
using FasterTransformer Tensorflow op.
Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(
attention_output, hidden_dropout_prob)
attention_output = layer_norm(
attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
# amaxList
if tf.flags.FLAGS.int8_mode != 0:
amaxList = tf.get_variable(name="amaxList", shape=[ACTIVATION_AMAX_NUM + 9*hidden_size + INT8O_GEMM_NUM + TRT_AMAX_NUM + SCALE_RESERVE_NUM], dtype=tf.float32)
# FASTINFER: fast transformer encoder inference
inputs = input_tensor
int8_mode = tf.flags.FLAGS.int8_mode
remove_padding = tf.flags.FLAGS.remove_padding
graph = tf.get_default_graph()
if int8_mode == 0:
outputs = bert_op_module.bert(
inputs,
inputs,
sequence_length,
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/query/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/query/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/key/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/key/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/value/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/value/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/output/dense/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/output/dense/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/output/LayerNorm/beta:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/output/LayerNorm/gamma:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/intermediate/dense/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/intermediate/dense/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/output/dense/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/output/dense/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/output/LayerNorm/beta:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/output/LayerNorm/gamma:0' % id) for id in range(num_hidden_layers)],
head_num=num_attention_heads, size_per_head=attention_head_size,
inter_size = intermediate_size,
num_layer = num_hidden_layers, remove_padding=remove_padding,
q_scaling = 1.0)
else:
outputs = bert_op_module.bert_int8(
inputs,
inputs,
sequence_length,
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/query/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/query/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/key/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/key/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/value/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/self/value/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/output/dense/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/output/dense/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/output/LayerNorm/beta:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/attention/output/LayerNorm/gamma:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/intermediate/dense/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/intermediate/dense/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/output/dense/kernel:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/output/dense/bias:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/output/LayerNorm/beta:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/output/LayerNorm/gamma:0' % id) for id in range(num_hidden_layers)],
[graph.get_tensor_by_name('bert/encoder/layer_%d/amaxList:0' % id) for id in range(num_hidden_layers)],
head_num=num_attention_heads,
size_per_head=attention_head_size,
inter_size = intermediate_size,
num_layer = num_hidden_layers,
int8_mode = int8_mode,
remove_padding=remove_padding,
q_scaling = 1.0)
# return outputs
print(do_return_all_layers)
final_output = reshape_from_matrix(outputs, input_shape)
print(final_output)
if do_return_all_layers:
return [final_output]
else:
return final_output
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/fast_infer_util.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
# python ckpt_type_convert.py --init_checkpoint=mrpc_output/model.ckpt-343 --fp16_checkpoint=mrpc_output/fp16_model.ckpt
import numpy as np
import os
import tensorflow as tf
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.ops import io_ops
from tensorflow.python.training.saver import BaseSaverBuilder
def checkpoint_dtype_cast(in_checkpoint_file, out_checkpoint_file):
var_list = checkpoint_utils.list_variables(in_checkpoint_file)
def init_graph():
for name, shape in var_list:
var = checkpoint_utils.load_variable(in_checkpoint_file, name)
if "quant" in name or "amaxList" in name:
recon_dtype = var.dtype
else:
recon_dtype = tf.float16 if var.dtype == np.float32 else var.dtype
tf.get_variable(name, shape=shape, dtype=recon_dtype)
init_graph()
saver = tf.train.Saver(builder=CastFromFloat32SaverBuilder())
with tf.Session() as sess:
saver.restore(sess, in_checkpoint_file)
saver.save(sess, 'tmp-ckpt/tmp.ckpt')
tf.reset_default_graph()
init_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'tmp-ckpt/tmp.ckpt')
saver.save(sess, out_checkpoint_file)
os.system("rm tmp-ckpt -r")
class CastFromFloat32SaverBuilder(BaseSaverBuilder):
# Based on tensorflow.python.training.saver.BulkSaverBuilder.bulk_restore
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
restore_specs = []
for saveable in saveables:
for spec in saveable.specs:
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
names, slices, dtypes = zip(*restore_specs)
restore_dtypes = [tf.float32 if dtype.base_dtype==tf.float16 else dtype for dtype in dtypes]
# print info
for i in range(len(restore_specs)):
print(names[i], 'from', restore_dtypes[i], 'to', dtypes[i].base_dtype)
with tf.device("cpu:0"):
restored = io_ops.restore_v2(
filename_tensor, names, slices, restore_dtypes)
return [tf.cast(r, dt.base_dtype) for r, dt in zip(restored, dtypes)]
if __name__ == '__main__':
tf.flags.DEFINE_string("fp16_checkpoint", None, "fp16 checkpoint file")
tf.flags.DEFINE_string("init_checkpoint", None, "initial checkpoint file")
checkpoint_dtype_cast(tf.flags.FLAGS.init_checkpoint, tf.flags.FLAGS.fp16_checkpoint)
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/ckpt_type_convert.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
# export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
# python profile_transformer_inference.py --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt --tf_profile=false --output_dir=mrpc_output --profiling_output_file=time_elapsed --xla=false --floatx=float32
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import device_lib
import time
import contextlib
from tensorflow.python.client import timeline
import os
import tensorflow as tf
import fast_infer_util as fiu
import numpy as np
import profile_util
import sys
import my_modeling
bert_submodule = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bert')
sys.path.insert(0, bert_submodule)
import run_classifier
import optimization
flags = tf.flags
FLAGS = flags.FLAGS
# stacked transformer encoders
class TransformerModel(object):
def __init__(self,
config,
is_training,
input_tensor,
attention_mask,
transformer_model_fn,
scope=None,
sequence_length=None):
config = my_modeling.copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = my_modeling.get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("encoder"):
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model_fn(
input_tensor=input_tensor,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=my_modeling.get_activation(
config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True,
sequence_length=sequence_length)
self.sequence_output = self.all_encoder_layers[-1]
with tf.variable_scope("pooler"):
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=my_modeling.create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
return self.sequence_output
def model_fn_builder(bert_config, transformer_model_fn):
def model_fn(input_tensor, attention_mask, sequence_length=None): # pylint: disable=unused-argument
model = TransformerModel(
config=bert_config,
is_training=False,
input_tensor=input_tensor,
attention_mask=attention_mask,
transformer_model_fn=transformer_model_fn,
sequence_length=sequence_length)
seq_output = model.get_sequence_output()
return seq_output
return model_fn
def profile_model(config, jit_xla, num_iter):
# initialize data
input_data = np.random.randn(
FLAGS.predict_batch_size, FLAGS.max_seq_length, config.hidden_size)
if FLAGS.remove_padding:
sequence_length = (np.ones(FLAGS.predict_batch_size)*FLAGS.max_seq_length/2).astype(np.int32)
else:
sequence_length = (np.ones(FLAGS.predict_batch_size)*FLAGS.max_seq_length).astype(np.int32)
attention_mask = np.zeros((FLAGS.predict_batch_size, FLAGS.max_seq_length))
for i in range(len(sequence_length)):
attention_mask[i, 0:sequence_length[i]] = 1
attention_mask = np.repeat(
attention_mask[:, np.newaxis, :], FLAGS.max_seq_length, axis=1)
model_fn_tf = model_fn_builder(config, my_modeling.transformer_model)
model_fn_ft = model_fn_builder(config, fiu.fast_transformer_model_trans)
def graph_fn_builder(model_fn):
def graph_fn():
input_tensor = tf.constant(input_data, dtype=FLAGS.floatx)
mask_tensor = tf.constant(attention_mask, dtype=FLAGS.floatx)
output_var = model_fn(input_tensor, mask_tensor, sequence_length)
# for saving memcopy time
return tf.reduce_mean(output_var)
return graph_fn
if FLAGS.tf_profile:
tf.logging.info("***** Running tensorflow transformer*****")
p1 = profile_util.Profiler(os.path.join(
FLAGS.output_dir, 'prof/bert_origin'))
t1, r1 = profile_util.run_profile(graph_fn_builder(
model_fn_tf), jit_xla, num_iter, p1, init_checkpoint=FLAGS.init_checkpoint)
tf.reset_default_graph()
tf.logging.info("***** Running fast transformer*****")
p2 = profile_util.Profiler(os.path.join(
FLAGS.output_dir, 'prof/bert_fastinfer'))
t2, r2 = profile_util.run_profile(graph_fn_builder(
model_fn_ft), jit_xla, num_iter, p2, init_checkpoint=FLAGS.init_checkpoint)
else:
tf.logging.info("***** Running tensorflow transformer*****")
t1, r1 = profile_util.run_profile(graph_fn_builder(
model_fn_tf), jit_xla, num_iter, check_result=False, init_checkpoint=FLAGS.init_checkpoint)
tf.reset_default_graph()
tf.logging.info("***** Running fast transformer*****")
t2, r2 = profile_util.run_profile(graph_fn_builder(
model_fn_ft), jit_xla, num_iter, check_result=False, init_checkpoint=FLAGS.init_checkpoint)
# check errors
print('average time (seconds) elapsed original tensorflow: {} sec'.format(t1))
print('average time (seconds) elapsed fast transformer: {} sec'.format(t2))
if len(r1) + len(r2) > 0:
check_res = np.asarray([np.allclose(
r1[i], r2[i], atol=1e-4, rtol=0) for i in range(num_iter)])
if check_res.all():
print('Pass')
print(np.mean(r1))
print(np.mean(r2))
else:
for i in np.where(np.logical_not(check_res))[0]:
diff = np.fabs(r1[i] - r2[i])
idx = np.unravel_index(diff.argmax(), diff.shape)
print('Failed iter:', i, "max diff:",
diff[idx], idx, r1[i][idx], r2[i][idx])
return t1, t2
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
batch_size = [8]
seq_length = [128]
num_hidden_layers = [12]
attention_heads_num_size = [(12, 64)]
num_iter = 20
interval = 0
# collect results of both original bert and fast transformer
jit_xla = tf.OptimizerOptions.ON_1 if FLAGS.xla else 0
config = my_modeling.BertConfig(vocab_size=0)
tf.gfile.MakeDirs(FLAGS.output_dir)
local_device_protos = device_lib.list_local_devices()
with open(os.path.join(FLAGS.output_dir, FLAGS.profiling_output_file), 'w') as f:
for x in local_device_protos:
if x.device_type == 'GPU':
f.write(x.physical_device_desc + '\n')
f.write(str(FLAGS.floatx) + '\t' + 'XLA: ' + str(FLAGS.xla) + '\n')
f.write('batch_size\tseq_length\thidden_layers\tattention_heads\tattention_head_size\tTensorflow\tFasterTransformer\n')
for bs in batch_size:
FLAGS.predict_batch_size = bs
for sl in seq_length:
FLAGS.max_seq_length = sl
for hidden_layers in num_hidden_layers:
config.num_hidden_layers = hidden_layers
for head_num, head_size in attention_heads_num_size:
config.num_attention_heads = head_num
config.hidden_size = head_num * head_size
time.sleep(interval)
t1, t2 = profile_model(config, jit_xla, num_iter)
tmp = [FLAGS.predict_batch_size, FLAGS.max_seq_length, hidden_layers, head_num, head_size,
'{:.6}'.format(t1), '{:.6}'.format(t2)]
f.write('\t'.join([str(x) for x in tmp]) + '\n')
if __name__ == "__main__":
flags.mark_flag_as_required("output_dir")
flags.DEFINE_string("profiling_output_file", None,
"The output file for profiling results.")
flags.mark_flag_as_required("profiling_output_file")
flags.DEFINE_string("floatx", "float32", "float32 or float16")
flags.mark_flag_as_required("floatx")
flags.DEFINE_bool("xla", False, "whether to turn on XLA")
flags.mark_flag_as_required("xla")
flags.DEFINE_bool("tf_profile", False,
"whether to use tensorflow profiling")
flags.DEFINE_bool("remove_padding", False, "Whether remove the padding of sentences")
flags.DEFINE_integer("int8_mode", 0, "whether use int8 or not; and how to use int8")
flags.DEFINE_bool("allow_gemm_test", False, "whether allow gemm test inside FT.")
tf.app.run()
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/profile_transformer_inference.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
# export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
# export GLUE_DIR=/path/to/glue
# python run_classifier_wrap.py --floatx=float16 --task_name=MRPC --do_eval=true --data_dir=$GLUE_DIR/MRPC --vocab_file=$BERT_BASE_DIR/vocab.txt --bert_config_file=$BERT_BASE_DIR/bert_config.json --init_checkpoint=mrpc_output/fp16_model.ckpt --max_seq_length=128 --eval_batch_size=8 --output_dir=mrpc_output
# FP32 Tensorflow Transformer MRPC result
# INFO:tensorflow: eval_accuracy = 0.877451
# INFO:tensorflow: eval_loss = 0.44744828
# INFO:tensorflow: global_step = 0
# INFO:tensorflow: loss = 0.44744828
# FP32 Faster Transformer MRPC result
# INFO:tensorflow: eval_accuracy = 0.877451
# INFO:tensorflow: eval_loss = 0.4474482
# INFO:tensorflow: global_step = 0
# INFO:tensorflow: loss = 0.4474482
# FP16 Tensorflow Transformer MRPC result
# INFO:tensorflow: eval_accuracy = 0.875
# INFO:tensorflow: eval_loss = 0.44760832
# INFO:tensorflow: global_step = 0
# INFO:tensorflow: loss = 0.44760215
# FP16 Faster Transformer MRPC result
# INFO:tensorflow: eval_accuracy = 0.875
# INFO:tensorflow: eval_loss = 0.44731623
# INFO:tensorflow: global_step = 0
# INFO:tensorflow: loss = 0.44728807
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
bert_submodule = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bert')
sys.path.insert(0, bert_submodule)
import tensorflow as tf
import run_classifier as rc
import fast_infer_util as fiu
import my_modeling
flags = tf.flags
FLAGS = flags.FLAGS
# replace transformer implementation
my_modeling.transformer_model = fiu.fast_transformer_model_trans
# replace the model to support fp16 data type
rc.create_model = fiu.create_model
# replace the input function to drop remainder
rc.file_based_input_fn_builder = fiu.file_based_input_fn_builder_drop
main = rc.main
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
flags.DEFINE_string("floatx", None, "float32 or float16")
flags.mark_flag_as_required("floatx")
flags.DEFINE_bool("remove_padding", False, "Whether remove the padding of sentences")
flags.DEFINE_integer("int8_mode", 0, "whether use int8 or not; and how to use int8")
flags.DEFINE_bool("allow_gemm_test", False, "whether allow gemm test inside FT.")
tf.app.run()
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/run_classifier_wrap.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
# export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
# export GLUE_DIR=/path/to/glue
# python profile_bert_inference.py --task_name=MRPC --data_dir=$GLUE_DIR/MRPC --vocab_file=$BERT_BASE_DIR/vocab.txt --bert_config_file=$BERT_BASE_DIR/bert_config.json --predict_batch_size=8 --max_seq_length=128 --output_dir=mrpc_output --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt --tf_profile=true --profiling_output_file=time_elapsed --xla=false --floatx=float32
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import fast_infer_util as fiu
import profile_util
import tensorflow as tf
import os
from tensorflow.python.client import timeline
import contextlib
import time
from tensorflow.python.client import device_lib
import my_modeling
bert_submodule = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bert')
sys.path.insert(0, bert_submodule)
import tokenization
import run_classifier as rc
flags = tf.flags
FLAGS = flags.FLAGS
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids):
"""Creates a classification model."""
model = my_modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=False)
seq_output = model.get_sequence_output()
return seq_output
def model_fn_builder(bert_config):
def model_fn(features):
# print features
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" %
(name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
fetches = create_model(
bert_config, False, input_ids, input_mask, segment_ids)
# # fetch mrpc logits for prediction
# num_labels = 2 # for mrpc
# _, _, fetches, _ = fiu.create_model(bert_config, False, input_ids, input_mask, segment_ids, label_ids,
# num_labels, False)
return fetches
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
num_iter = 20
jit_xla = tf.OptimizerOptions.ON_1 if FLAGS.xla else 0
processors = {
"cola": rc.ColaProcessor,
"mnli": rc.MnliProcessor,
"mrpc": rc.MrpcProcessor,
"xnli": rc.XnliProcessor,
}
# sanity check
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
bert_config = my_modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
# prepare data
processor = processors[task_name]()
label_list = processor.get_labels()
predict_examples = processor.get_test_examples(FLAGS.data_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
rc.file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
# get model function and input function
# drop_remainder option should be turned on for fast transformer inference
drop_remainder = True
predict_input_fn = rc.file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=drop_remainder)
def graph_fn():
model_fn = model_fn_builder(bert_config=bert_config)
dataset = predict_input_fn({'batch_size': FLAGS.predict_batch_size})
next_item = dataset.make_one_shot_iterator().get_next()
output_var = model_fn(next_item)
return output_var
if FLAGS.tf_profile:
tf.logging.info("***** Running tensorflow transformer*****")
p1 = profile_util.Profiler(os.path.join(
FLAGS.output_dir, 'prof/bert_origin'))
t1, r1 = profile_util.run_profile(
graph_fn, jit_xla, num_iter, p1, init_checkpoint=FLAGS.init_checkpoint)
tf.reset_default_graph()
my_modeling.transformer_model = fiu.fast_transformer_model_trans
tf.logging.info("***** Running fast transformer*****")
p2 = profile_util.Profiler(os.path.join(
FLAGS.output_dir, 'prof/bert_fastinfer'))
t2, r2 = profile_util.run_profile(
graph_fn, jit_xla, num_iter, p2, init_checkpoint=FLAGS.init_checkpoint)
else:
tf.logging.info("***** Running tensorflow transformer*****")
t1, r1 = profile_util.run_profile(
graph_fn, jit_xla, num_iter, check_result=False, init_checkpoint=FLAGS.init_checkpoint)
tf.reset_default_graph()
my_modeling.transformer_model = fiu.fast_transformer_model_trans
tf.logging.info("***** Running fast transformer*****")
t2, r2 = profile_util.run_profile(
graph_fn, jit_xla, num_iter, check_result=False, init_checkpoint=FLAGS.init_checkpoint)
print('average time (seconds) elapsed original tensorflow:', t1)
print('average time (seconds) elapsed fast transformer:', t2)
if len(r1) + len(r2) > 0:
check_res = np.asarray([np.allclose(
r1[i], r2[i], atol=1e-4, rtol=0) for i in range(num_iter)])
if check_res.all():
print('Pass')
print(np.mean(r1))
print(np.mean(r2))
else:
for i in np.where(np.logical_not(check_res))[0]:
diff = np.fabs(r1[i] - r2[i])
idx = np.unravel_index(diff.argmax(), diff.shape)
print('Failed iter:', i, "max diff:",
diff[idx], idx, r1[i][idx], r2[i][idx])
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
flags.DEFINE_string("profiling_output_file", None,
"The output file for profiling results.")
flags.mark_flag_as_required("profiling_output_file")
flags.DEFINE_string("floatx", "float32", "float32 or float16")
flags.mark_flag_as_required("floatx")
flags.DEFINE_bool("xla", False, "whether to turn on XLA")
flags.mark_flag_as_required("xla")
flags.DEFINE_bool("tf_profile", False,
"whether to use tensorflow profiling")
flags.DEFINE_bool("remove_padding", False, "Whether remove the padding of sentences")
flags.DEFINE_integer("int8_mode", 0, "whether use int8 or not; and how to use int8")
flags.DEFINE_bool("allow_gemm_test", False, "whether allow gemm test inside FT.")
tf.app.run()
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/profile_bert_inference.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
#python ckpt_quantization.py --init_checkpoint=squad_model/QAT_noresidualQuant/model.ckpt-5474 --quantized_checkpoint=squad_model/QAT_noresidualQuant_quantized/model.ckpt
import tensorflow as tf
import numpy as np
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.ops import io_ops
from tensorflow.python.training.saver import BaseSaverBuilder
import os
import re
build_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../lib')
transformer_op_module = tf.load_op_library(
os.path.join('./lib/libtf_bert.so'))
ACTIVATION_AMAX_NUM = 72
INT8O_GEMM_NUM = 8
TRT_FUSED_MHA_AMAX_NUM = 3
SCALE_RESERVE_NUM = 21
def checkpoint_quantization(in_checkpoint_file, out_checkpoint_file, per_channel_quantization):
var_list = checkpoint_utils.list_variables(tf.flags.FLAGS.init_checkpoint)
def init_graph():
restore_vars = []
layer_num = 0
regex = re.compile('layer_\d+')
amaxTotalNum = 0
for name, shape in var_list:
var = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
if "intermediate/dense/kernel" in name and amaxTotalNum == 0:
amaxTotalNum = ACTIVATION_AMAX_NUM + 9*shape[0] + INT8O_GEMM_NUM + TRT_FUSED_MHA_AMAX_NUM + SCALE_RESERVE_NUM
print(amaxTotalNum, shape[0])
recon_dtype = var.dtype
restore_vars.append(tf.get_variable(name, shape=shape, dtype=var.dtype))
tmp = regex.findall(name)
if len(tmp) < 1:
continue
num_tmp = int(tmp[0].replace("layer_", ""))
if layer_num < num_tmp:
layer_num = num_tmp
layer_num = layer_num + 1
#add new var for amax
for i in range(layer_num):
tf.get_variable("bert/encoder/layer_{}/amaxList".format(i), shape=[amaxTotalNum], dtype=tf.float32)
return layer_num, amaxTotalNum, restore_vars
layer_num, amaxTotalNum, restore_vars = init_graph()
restorer = tf.train.Saver(restore_vars)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
restorer.restore(sess, in_checkpoint_file)
kernel_name_list = ["attention/self/query", "attention/self/key", "attention/self/value", "attention/output/dense", "intermediate/dense", "output/dense"]
#input_scale, 0
amax_name_list = ["attention/self/query/input_quantizer",
#Q_aftergemm_scale, 1
"attention/self/query/aftergemm_quantizer",
#Qbias_scale, 2
"attention/self/matmul_q_input_quantizer",
#K_aftergemm_scale, 3
"attention/self/key/aftergemm_quantizer",
#Kbias_scale, 4
"attention/self/matmul_k_input_quantizer",
#V_aftergemm_scale, 5
"attention/self/value/aftergemm_quantizer",
#Vbias_scale, 6
"attention/self/matmul_v_input_quantizer",
#bmm1_scale, 7
"attention/self/softmax_input_quantizer",
#Softmax_scale, 8
"attention/self/matmul_a_input_quantizer",
#bmm2_scale, 9
"attention/output/dense/input_quantizer",
#Proj_aftergemm_scale, 10
"attention/output/dense/aftergemm_quantizer",
#ProjBiasNorm_scale, 11
"intermediate/dense/input_quantizer",
#FC1_aftergemm_scale, 12
"intermediate/dense/aftergemm_quantizer",
#F1Bias_scale, 13
"output/dense/input_quantizer",
#FC2_aftergemm_scale, 14
"output/dense/aftergemm_quantizer",
#F2Bias_scale, 15
"special_F2Bias_scale",
]
int8O_gemm_weight_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
#Q_aftergemm
int8O_gemm_weight_list = ["attention/self/query",
#K_aftergemm
"attention/self/key",
#V_aftergemm
"attention/self/value",
#bmm1_aftergemm
"attention/self/matmul_k_input_quantizer",
#bmm2_aftergemm
"attention/self/matmul_v_input_quantizer",
#Proj_aftergemm
"attention/output/dense",
#FC1_aftergemm
"intermediate/dense",
#FC2_aftergemm
"output/dense"]
int8O_gemm_input_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
#Q_aftergemm
int8O_gemm_input_list = ["attention/self/query/input_quantizer",
#K_aftergemm
"attention/self/key/input_quantizer",
#V_aftergemm
"attention/self/value/input_quantizer",
#bmm1_aftergemm
"attention/self/matmul_q_input_quantizer",
#bmm2_aftergemm
"attention/self/matmul_a_input_quantizer",
#Proj_aftergemm
"attention/output/dense/input_quantizer",
#FC1_aftergemm
"intermediate/dense/input_quantizer",
#FC2_aftergemm
"output/dense/input_quantizer"]
int8O_gemm_output_amax_list = [0 for i in range(INT8O_GEMM_NUM)]
#Q_aftergemm
int8O_gemm_output_list = ["attention/self/query/aftergemm_quantizer",
#K_aftergemm
"attention/self/key/aftergemm_quantizer",
#V_aftergemm
"attention/self/value/aftergemm_quantizer",
#bmm1_aftergemm
"attention/self/softmax_input_quantizer",
#bmm2_aftergemm
"attention/output/dense/input_quantizer",
#Proj_aftergemm
"attention/output/dense/aftergemm_quantizer",
#FC1_aftergemm
"intermediate/dense/aftergemm_quantizer",
#FC2_aftergemm
"output/dense/aftergemm_quantizer"]
factor = 1000000.0
for i in range(layer_num):
amaxList = np.zeros([amaxTotalNum])
amax_id = 0
for amax_name in amax_name_list:
if amax_name == "special_F2Bias_scale":
if i != layer_num - 1:
name = "bert/encoder/layer_{}/{}/quant_max:0".format(i+1, amax_name_list[0])
quant_max = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
name = "bert/encoder/layer_{}/{}/quant_min:0".format(i+1, amax_name_list[0])
quant_min = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
if abs(quant_max) > abs(quant_min):
amax = abs(quant_max)#int(abs(quant_max)*factor)/factor
else:
amax = abs(quant_min)#int(abs(quant_min)*factor)/factor
else:
#not used, placeholder
amax = 1.0
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
continue
name = "bert/encoder/layer_{}/{}/quant_max:0".format(i, amax_name)
quant_max = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
name = "bert/encoder/layer_{}/{}/quant_min:0".format(i, amax_name)
quant_min = checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name)
if abs(quant_max) > abs(quant_min):
amax = abs(quant_max)#int(abs(quant_max)*factor)/factor
else:
amax = abs(quant_min)#int(abs(quant_min)*factor)/factor
if amax_name in int8O_gemm_input_list:
int8O_gemm_input_amax_list[int8O_gemm_input_list.index(amax_name)] = amax
if amax_name == "attention/self/query/input_quantizer":
int8O_gemm_input_amax_list[int8O_gemm_input_list.index("attention/self/key/input_quantizer")] = amax
int8O_gemm_input_amax_list[int8O_gemm_input_list.index("attention/self/value/input_quantizer")] = amax
if amax_name in int8O_gemm_output_list:
int8O_gemm_output_amax_list[int8O_gemm_output_list.index(amax_name)] = amax
if amax_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(amax_name)] = amax
amaxList[amax_id] = amax
amax_id += 1
amaxList[amax_id] = amax/127.0
amax_id += 1
amaxList[amax_id] = amax/127.0/127.0
amax_id += 1
amaxList[amax_id] = 127.0/amax
amax_id += 1
print("done process layer_{} activation amax".format(i))
#kernel amax starts from ACTIVATION_AMAX_NUM
amax_id = ACTIVATION_AMAX_NUM
for kernel_id, kernel_name in enumerate(kernel_name_list):
kernel = tf.get_default_graph().get_tensor_by_name("bert/encoder/layer_{}/{}/kernel:0".format(i, kernel_name))
name = "bert/encoder/layer_{}/{}/kernel_quantizer/quant_max:0".format(i, kernel_name)
quant_max2 = tf.convert_to_tensor(checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name))
name = "bert/encoder/layer_{}/{}/kernel_quantizer/quant_min:0".format(i, kernel_name)
quant_min2 = tf.convert_to_tensor(checkpoint_utils.load_variable(tf.flags.FLAGS.init_checkpoint, name))
kernel_processed, quant_max_processed = transformer_op_module.weight_quantize(kernel, quant_max2, quant_min2, per_channel_quantization = per_channel_quantization)
kernel_processed_, quant_max_processed_ = sess.run([kernel_processed, quant_max_processed])
sess.run(tf.assign(kernel, kernel_processed_))
if kernel_name in int8O_gemm_weight_list:
int8O_gemm_weight_amax_list[int8O_gemm_weight_list.index(kernel_name)] = quant_max_processed_[0]
for e in quant_max_processed_:
amaxList[amax_id] = e
amax_id += 1
#for int8O gemm deQuant
for j in range(INT8O_GEMM_NUM):
amaxList[amax_id] = (int8O_gemm_input_amax_list[j]*int8O_gemm_weight_amax_list[j])/(127.0*int8O_gemm_output_amax_list[j])
amax_id += 1
#for trt fused MHA amax
#### QKV_addBias_amax
amaxList[amax_id] = np.maximum(np.maximum(amaxList[8],amaxList[16]), amaxList[24])
amax_id += 1
#### softmax amax
amaxList[amax_id] = amaxList[32]
amax_id += 1
#### bmm2 amax
amaxList[amax_id] = amaxList[36]
amax_id += 1
amaxL = tf.get_default_graph().get_tensor_by_name("bert/encoder/layer_{}/amaxList:0".format(i))
sess.run(tf.assign(amaxL, amaxList))
print("done process layer_{} kernel weight".format(i))
saver.save(sess, out_checkpoint_file)
if __name__ == '__main__':
tf.flags.DEFINE_string("quantized_checkpoint", None, "quantized checkpoint file")
tf.flags.DEFINE_string("init_checkpoint", None, "initial checkpoint file")
tf.flags.DEFINE_integer("int8_mode", 1, "int8 mode in FasterTransformer, default as 1")
if tf.flags.FLAGS.int8_mode == 1:
per_channel_quantization = True
elif tf.flags.FLAGS.int8_mode == 2 or tf.flags.FLAGS.int8_mode == 3:
per_channel_quantization = False
else:
raise ValueError("wrong int8_mode argument")
quantized_checkpoint_folder = "/".join(tf.flags.FLAGS.quantized_checkpoint.split("/")[:-1])
if not os.path.exists(quantized_checkpoint_folder):
os.system("mkdir -p " + quantized_checkpoint_folder)
checkpoint_quantization(tf.flags.FLAGS.init_checkpoint, tf.flags.FLAGS.quantized_checkpoint, per_channel_quantization)
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/ckpt_quantization.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/squad_evaluate-v1.1.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is mostly the same as bert/modeling.py from Google's BERT repository https://github.com/google-research/bert
# with configurable float types by setting tf.flags.FLAGS.floatx
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
sequence_length = tf.reduce_sum(input_mask, axis=1)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True,
sequence_length=sequence_length)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range, dtype=tf.flags.FLAGS.floatx)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
dtype=tf.flags.FLAGS.floatx,
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
dtype=tf.flags.FLAGS.floatx,
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size, dtype=tf.flags.FLAGS.floatx)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
dtype=tf.flags.FLAGS.floatx,
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.flags.FLAGS.floatx)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.flags.FLAGS.floatx)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(size_per_head))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.flags.FLAGS.floatx)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False,
sequence_length=None):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/my_modeling.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
| FasterTransformer-main | examples/tensorflow/bert/tensorflow_bert/squad_evaluate_v1_1.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import json
import math
import re
import six
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.python.ops import init_ops
import numpy
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import nn
def fused_layer_norm(inputs,
center=True,
scale=True,
activation_fn=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
begin_norm_axis=1,
begin_params_axis=-1,
scope=None,
use_fused_batch_norm=False):
with tf.variable_scope(
scope, 'LayerNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.shape
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if begin_norm_axis < 0:
begin_norm_axis = inputs_rank + begin_norm_axis
if begin_params_axis >= inputs_rank or begin_norm_axis >= inputs_rank:
raise ValueError('begin_params_axis (%d) and begin_norm_axis (%d) '
'must be < rank(inputs) (%d)' %
(begin_params_axis, begin_norm_axis, inputs_rank))
params_shape = inputs_shape[begin_params_axis:]
if not params_shape.is_fully_defined():
raise ValueError(
'Inputs %s: shape(inputs)[%s:] is not fully defined: %s' %
(inputs.name, begin_params_axis, inputs_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer(),
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer(),
collections=gamma_collections,
trainable=trainable)
if use_fused_batch_norm:
# get static TensorShape if fully defined,
# otherwise retrieve shape tensor
norm_shape = inputs.shape[begin_norm_axis:]
if norm_shape.is_fully_defined():
bn_shape = [1, -1, 1, numpy.prod(norm_shape.as_list())]
else:
norm_shape = tf.shape(inputs)[begin_norm_axis:]
bn_shape = [1, -1, 1, tf.reduce_prod(norm_shape)]
if inputs.get_shape().is_fully_defined():
outputs_shape = inputs.get_shape()
else:
outputs_shape = tf.shape(inputs)
inputs = array_ops.reshape(inputs, bn_shape)
if inputs.get_shape().is_fully_defined():
# static inputs TensorShape fully defined after reshape.
ones = array_ops.ones(inputs.get_shape()[1], dtype=dtypes.float32)
zeros = array_ops.zeros(inputs.get_shape()[1], dtype=dtypes.float32)
else:
# static inputs TensorShape NOT fully defined after reshape.
# must use dynamic shape, which means these input tensors
# have to be created at runtime, which causes a slowdown.
scale_shape = tf.shape(inputs)[1]
ones = array_ops.ones(scale_shape, dtype=dtypes.float32)
zeros = array_ops.zeros(scale_shape, dtype=dtypes.float32)
outputs, mean, variance = nn.fused_batch_norm(
inputs,
ones, zeros,
epsilon=1e-4,
data_format="NCHW")
outputs = array_ops.reshape(outputs, outputs_shape)
if center and scale:
outputs = outputs * gamma + beta
elif center:
outputs = outputs + beta
elif scale:
outputs = outputs * gamma
else:
# Calculate the moments on the last axis (layer activations).
norm_axes = list(range(begin_norm_axis, inputs_rank))
mean, variance = nn.moments(inputs, norm_axes, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
variance_epsilon = 1e-4
outputs = nn.batch_normalization(
inputs,
mean,
variance,
offset=beta,
scale=gamma,
variance_epsilon=variance_epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/fused_layer_norm.py |
"""
Multiclass
from:
https://github.com/guillaumegenthial/tf_metrics/blob/master/tf_metrics/__init__.py
"""
__author__ = "Guillaume Genthial"
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.metrics_impl import _streaming_confusion_matrix
def precision(labels, predictions, num_classes, pos_indices=None,
weights=None, average='micro'):
"""Multi-class precision metric for Tensorflow
Parameters
----------
labels : Tensor of tf.int32 or tf.int64
The true labels
predictions : Tensor of tf.int32 or tf.int64
The predictions, same shape as labels
num_classes : int
The number of classes
pos_indices : list of int, optional
The indices of the positive classes, default is all
weights : Tensor of tf.int32, optional
Mask, must be of compatible shape with labels
average : str, optional
'micro': counts the total number of true positives, false
positives, and false negatives for the classes in
`pos_indices` and infer the metric from it.
'macro': will compute the metric separately for each class in
`pos_indices` and average. Will not account for class
imbalance.
'weighted': will compute the metric separately for each class in
`pos_indices` and perform a weighted average by the total
number of true labels for each class.
Returns
-------
tuple of (scalar float Tensor, update_op)
"""
cm, op = _streaming_confusion_matrix(
labels, predictions, num_classes, weights)
pr, _, _ = metrics_from_confusion_matrix(
cm, pos_indices, average=average)
op, _, _ = metrics_from_confusion_matrix(
op, pos_indices, average=average)
return (pr, op)
def recall(labels, predictions, num_classes, pos_indices=None, weights=None,
average='micro'):
"""Multi-class recall metric for Tensorflow
Parameters
----------
labels : Tensor of tf.int32 or tf.int64
The true labels
predictions : Tensor of tf.int32 or tf.int64
The predictions, same shape as labels
num_classes : int
The number of classes
pos_indices : list of int, optional
The indices of the positive classes, default is all
weights : Tensor of tf.int32, optional
Mask, must be of compatible shape with labels
average : str, optional
'micro': counts the total number of true positives, false
positives, and false negatives for the classes in
`pos_indices` and infer the metric from it.
'macro': will compute the metric separately for each class in
`pos_indices` and average. Will not account for class
imbalance.
'weighted': will compute the metric separately for each class in
`pos_indices` and perform a weighted average by the total
number of true labels for each class.
Returns
-------
tuple of (scalar float Tensor, update_op)
"""
cm, op = _streaming_confusion_matrix(
labels, predictions, num_classes, weights)
_, re, _ = metrics_from_confusion_matrix(
cm, pos_indices, average=average)
_, op, _ = metrics_from_confusion_matrix(
op, pos_indices, average=average)
return (re, op)
def f1(labels, predictions, num_classes, pos_indices=None, weights=None,
average='micro'):
return fbeta(labels, predictions, num_classes, pos_indices, weights,
average)
def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None,
average='micro', beta=1):
"""Multi-class fbeta metric for Tensorflow
Parameters
----------
labels : Tensor of tf.int32 or tf.int64
The true labels
predictions : Tensor of tf.int32 or tf.int64
The predictions, same shape as labels
num_classes : int
The number of classes
pos_indices : list of int, optional
The indices of the positive classes, default is all
weights : Tensor of tf.int32, optional
Mask, must be of compatible shape with labels
average : str, optional
'micro': counts the total number of true positives, false
positives, and false negatives for the classes in
`pos_indices` and infer the metric from it.
'macro': will compute the metric separately for each class in
`pos_indices` and average. Will not account for class
imbalance.
'weighted': will compute the metric separately for each class in
`pos_indices` and perform a weighted average by the total
number of true labels for each class.
beta : int, optional
Weight of precision in harmonic mean
Returns
-------
tuple of (scalar float Tensor, update_op)
"""
cm, op = _streaming_confusion_matrix(
labels, predictions, num_classes, weights)
_, _, fbeta = metrics_from_confusion_matrix(
cm, pos_indices, average=average, beta=beta)
_, _, op = metrics_from_confusion_matrix(
op, pos_indices, average=average, beta=beta)
return (fbeta, op)
def safe_div(numerator, denominator):
"""Safe division, return 0 if denominator is 0"""
numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)
zeros = tf.zeros_like(numerator, dtype=numerator.dtype)
denominator_is_zero = tf.equal(denominator, zeros)
return tf.where(denominator_is_zero, zeros, numerator / denominator)
def pr_re_fbeta(cm, pos_indices, beta=1):
"""Uses a confusion matrix to compute precision, recall and fbeta"""
num_classes = cm.shape[0]
neg_indices = [i for i in range(num_classes) if i not in pos_indices]
cm_mask = np.ones([num_classes, num_classes])
cm_mask[neg_indices, neg_indices] = 0
diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask))
cm_mask = np.ones([num_classes, num_classes])
cm_mask[:, neg_indices] = 0
tot_pred = tf.reduce_sum(cm * cm_mask)
cm_mask = np.ones([num_classes, num_classes])
cm_mask[neg_indices, :] = 0
tot_gold = tf.reduce_sum(cm * cm_mask)
pr = safe_div(diag_sum, tot_pred)
re = safe_div(diag_sum, tot_gold)
fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re)
return pr, re, fbeta
def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro',
beta=1):
"""Precision, Recall and F1 from the confusion matrix
Parameters
----------
cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)
The streaming confusion matrix.
pos_indices : list of int, optional
The indices of the positive classes
beta : int, optional
Weight of precision in harmonic mean
average : str, optional
'micro', 'macro' or 'weighted'
"""
num_classes = cm.shape[0]
if pos_indices is None:
pos_indices = [i for i in range(num_classes)]
if average == 'micro':
return pr_re_fbeta(cm, pos_indices, beta)
elif average in {'macro', 'weighted'}:
precisions, recalls, fbetas, n_golds = [], [], [], []
for idx in pos_indices:
pr, re, fbeta = pr_re_fbeta(cm, [idx], beta)
precisions.append(pr)
recalls.append(re)
fbetas.append(fbeta)
cm_mask = np.zeros([num_classes, num_classes])
cm_mask[idx, :] = 1
n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask)))
if average == 'macro':
pr = tf.reduce_mean(precisions)
re = tf.reduce_mean(recalls)
fbeta = tf.reduce_mean(fbetas)
return pr, re, fbeta
if average == 'weighted':
n_gold = tf.reduce_sum(n_golds)
pr_sum = sum(p * n for p, n in zip(precisions, n_golds))
pr = safe_div(pr_sum, n_gold)
re_sum = sum(r * n for r, n in zip(recalls, n_golds))
re = safe_div(re_sum, n_gold)
fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds))
fbeta = safe_div(fbeta_sum, n_gold)
return pr, re, fbeta
else:
raise NotImplementedError()
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/tf_metrics.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract pre-computed feature vectors from BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import collections
import json
import re
import modeling
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None, "")
flags.DEFINE_string("output_file", None, "")
flags.DEFINE_string("layers", "-1,-2,-3,-4", "")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string("master", None,
"If using a TPU, the address of the master.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"use_one_hot_embeddings", False,
"If True, tf.one_hot will be used for embedding lookups, otherwise "
"tf.nn.embedding_lookup will be used. On TPUs, this should be True "
"since it is much faster.")
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def input_fn_builder(features, seq_length):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
scaffold_fn = None
(assignment_map,
initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("unique_id: %s" % (example.unique_id))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with tf.io.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.info)
layer_indexes = [int(x) for x in FLAGS.layers.split(",")]
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
master=FLAGS.master,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
examples = read_examples(FLAGS.input_file)
features = convert_examples_to_features(
examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
layer_indexes=layer_indexes,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
predict_batch_size=FLAGS.batch_size)
input_fn = input_fn_builder(
features=features, seq_length=FLAGS.max_seq_length)
with codecs.getwriter("utf-8")(tf.io.gfile.Open(FLAGS.output_file,
"w")) as writer:
for result in estimator.predict(input_fn, yield_single_examples=True):
unique_id = int(result["unique_id"])
feature = unique_id_to_feature[unique_id]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
all_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = result["layer_output_%d" % j]
layers = collections.OrderedDict()
layers["index"] = layer_index
layers["values"] = [
round(float(x), 6) for x in layer_output[i:(i + 1)].flat
]
all_layers.append(layers)
features = collections.OrderedDict()
features["token"] = token
features["layers"] = all_layers
all_features.append(features)
output_json["features"] = all_features
writer.write(json.dumps(output_json) + "\n")
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("init_checkpoint")
flags.mark_flag_as_required("output_file")
tf.app.run()
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/extract_features.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from horovod.tensorflow.compression import Compression
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, hvd=None, manual_fp16=False, use_fp16=False, num_accumulation_steps=1,
optimizer_type="adam", allreduce_post_accumulation=False):
"""Creates an optimizer training op."""
global_step = tf.compat.v1.train.get_or_create_global_step()
# avoid step change in learning rate at end of warmup phase
if optimizer_type == "adam":
power = 1.0
decayed_learning_rate_at_crossover_point = init_lr * (
(1.0 - float(num_warmup_steps) / float(num_train_steps)) ** power)
else:
power = 0.5
decayed_learning_rate_at_crossover_point = init_lr
adjusted_init_lr = init_lr * (init_lr / decayed_learning_rate_at_crossover_point)
print('decayed_learning_rate_at_crossover_point = %e, adjusted_init_lr = %e' % (decayed_learning_rate_at_crossover_point, adjusted_init_lr))
learning_rate = tf.constant(value=adjusted_init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.compat.v1.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=power,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
if optimizer_type == "lamb":
print("Initializing LAMB Optimizer")
optimizer = LAMBOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
else:
print("Initializing ADAM Weight Decay Optimizer")
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if hvd is not None and (num_accumulation_steps == 1 or (not allreduce_post_accumulation)):
optimizer = hvd.DistributedOptimizer(optimizer, sparse_as_dense=True, compression=Compression.fp16 if use_fp16 or manual_fp16 else Compression.none)
if manual_fp16 or use_fp16:
loss_scale_manager = tf.contrib.mixed_precision.ExponentialUpdateLossScaleManager(init_loss_scale=2**32, incr_every_n_steps=1000, decr_every_n_nan_or_inf=2, decr_ratio=0.5)
optimizer = tf.contrib.mixed_precision.LossScaleOptimizer(optimizer, loss_scale_manager)
tvars = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss * 1.0 / num_accumulation_steps, tvars)
if num_accumulation_steps > 1:
local_step = tf.get_variable(name="local_step", shape=[], dtype=tf.int32, trainable=False,
initializer=tf.zeros_initializer)
batch_finite = tf.get_variable(name="batch_finite", shape=[], dtype=tf.bool, trainable=False,
initializer=tf.ones_initializer)
accum_vars = [tf.get_variable(
name=tvar.name.split(":")[0] + "/accum",
shape=tvar.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer()) for tvar in tf.trainable_variables()]
reset_step = tf.cast(tf.math.equal(local_step % num_accumulation_steps, 0), dtype=tf.bool)
local_step = tf.cond(reset_step, lambda:local_step.assign(tf.ones_like(local_step)), lambda:local_step.assign_add(1))
grads_and_vars_and_accums = [(gv[0],gv[1],accum_vars[i]) for i, gv in enumerate(grads_and_vars) if gv[0] is not None]
grads, tvars, accum_vars = list(zip(*grads_and_vars_and_accums))
all_are_finite = tf.reduce_all([tf.reduce_all(tf.is_finite(g)) for g in grads]) if manual_fp16 or use_fp16 else tf.constant(True, dtype=tf.bool)
batch_finite = tf.cond(reset_step,
lambda: batch_finite.assign(tf.math.logical_and(tf.constant(True, dtype=tf.bool), all_are_finite)),
lambda:batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite)))
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=1.0,
use_norm=tf.cond(
all_are_finite,
lambda: tf.global_norm(grads),
lambda: tf.constant(1.0)))
accum_vars = tf.cond(reset_step,
lambda: [accum_vars[i].assign(grad) for i, grad in enumerate(clipped_grads)],
lambda: [accum_vars[i].assign_add(grad) for i, grad in enumerate(clipped_grads)])
def update(accum_vars):
if allreduce_post_accumulation and hvd is not None:
accum_vars = [hvd.allreduce(tf.convert_to_tensor(accum_var), compression=Compression.fp16 if use_fp16 or manual_fp16 else Compression.none) if isinstance(accum_var, tf.IndexedSlices)
else hvd.allreduce(accum_var, compression=Compression.fp16 if use_fp16 or manual_fp16 else Compression.none) for accum_var in accum_vars]
return optimizer.apply_gradients(list(zip(accum_vars, tvars)), global_step=global_step)
update_step = tf.identity(tf.cast(tf.math.equal(local_step % num_accumulation_steps, 0), dtype=tf.bool), name="update_step")
update_op = tf.cond(update_step,
lambda: update(accum_vars), lambda: tf.no_op())
new_global_step = tf.cond(tf.math.logical_and(update_step, tf.cast(hvd.allreduce(tf.cast(batch_finite, tf.int32)), tf.bool)), lambda: global_step+1, lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(update_op, [global_step.assign(new_global_step)])
else:
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
grads, tvars = list(zip(*grads_and_vars))
all_are_finite = tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in grads]) if use_fp16 or manual_fp16 else tf.constant(True, dtype=tf.bool)
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=1.0,
use_norm=tf.cond(
all_are_finite,
lambda: tf.global_norm(grads),
lambda: tf.constant(1.0)))
train_op = optimizer.apply_gradients(
list(zip(clipped_grads, tvars)), global_step=global_step)
new_global_step = tf.cond(all_are_finite, lambda: global_step + 1, lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.compat.v1.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = tf.identity(learning_rate, name='learning_rate')
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None,
manual_fp16=False):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
has_shadow = manual_fp16 and param.dtype.base_dtype != tf.float32
if has_shadow:
# create shadow fp32 weights for fp16 variable
param_fp32 = tf.get_variable(
name=param_name + "/shadow",
dtype=tf.float32,
trainable=False,
initializer=tf.cast(param.initialized_value(),tf.float32))
else:
param_fp32 = param
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param_fp32
update_with_lr = self.learning_rate * update
next_param = param_fp32 - update_with_lr
if has_shadow:
# cast shadow fp32 weights to fp16 and assign to trainable variable
param.assign(tf.cast(next_param, param.dtype.base_dtype))
assignments.extend(
[param_fp32.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
class LAMBOptimizer(tf.compat.v1.train.Optimizer):
"""A LAMB optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="LAMBOptimizer"):
"""Constructs a LAMBOptimizer."""
super(LAMBOptimizer, self).__init__(False, name)
self.learning_rate = tf.identity(learning_rate, name='learning_rate')
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step, name=None,
manual_fp16=False):
"""See base class."""
assignments = []
steps = tf.cast(global_step, tf.float32)
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
has_shadow = manual_fp16 and param.dtype.base_dtype != tf.float32
if has_shadow:
# create shadow fp32 weights for fp16 variable
param_fp32 = tf.get_variable(
name=param_name + "/shadow",
dtype=tf.float32,
trainable=False,
initializer=tf.cast(param.initialized_value(),tf.float32))
else:
param_fp32 = param
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# LAMB update
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
beta1_correction = (1 - self.beta_1 ** steps)
beta2_correction = (1 - self.beta_2 ** steps)
next_m_unbiased = next_m / beta1_correction
next_v_unbiased = next_v / beta2_correction
update = next_m_unbiased / (tf.sqrt(next_v_unbiased) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param_fp32
w_norm = linalg_ops.norm(param, ord=2)
g_norm = linalg_ops.norm(update, ord=2)
ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(
math_ops.greater(g_norm, 0), (w_norm / g_norm), 1.0), 1.0)
update_with_lr = ratio * self.learning_rate * update
next_param = param_fp32 - update_with_lr
if has_shadow:
# cast shadow fp32 weights to fp16 and assign to trainable variable
param.assign(tf.cast(next_param, param.dtype.base_dtype))
assignments.extend(
[param_fp32.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/optimization.py |
# coding=utf-8
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import, division, print_function
import collections
import json
import math
import os
import random
import shutil
import time
import pickle
import horovod.tensorflow as hvd
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.client import device_lib
import modeling
import optimization
import tokenization
from utils.create_squad_data import *
from utils.utils import LogEvalRunHook, LogTrainRunHook
from ft_tensorflow_quantization import get_calibrators, QuantDense, QuantDescriptor
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 8, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-6, "The initial learning rate for Adam.")
flags.DEFINE_bool("use_trt", False, "Whether to use TF-TRT")
flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("num_accumulation_steps", 1,
"Number of accumulation steps before gradient update"
"Global batch size = num_accumulation_steps * train_batch_size")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
flags.DEFINE_bool("use_fp16", False, "Whether to use fp32 or fp16 arithmetic on GPU.")
flags.DEFINE_bool("use_xla", False, "Whether to enable XLA JIT compilation.")
flags.DEFINE_integer("num_eval_iterations", None,
"How many eval iterations to run - performs inference on subset")
# TRTIS Specific flags
flags.DEFINE_bool("export_trtis", False, "Whether to export saved model or run inference with TRTIS")
flags.DEFINE_string("trtis_model_name", "bert", "exports to appropriate directory for TRTIS")
flags.DEFINE_integer("trtis_model_version", 1, "exports to appropriate directory for TRTIS")
flags.DEFINE_string("trtis_server_url", "localhost:8001", "exports to appropriate directory for TRTIS")
flags.DEFINE_bool("trtis_model_overwrite", False, "If True, will overwrite an existing directory with the specified 'model_name' and 'version_name'")
flags.DEFINE_integer("trtis_max_batch_size", 8, "Specifies the 'max_batch_size' in the TRTIS model config. See the TRTIS documentation for more info.")
flags.DEFINE_float("trtis_dyn_batching_delay", 0, "Determines the dynamic_batching queue delay in milliseconds(ms) for the TRTIS model config. Use '0' or '-1' to specify static batching. See the TRTIS documentation for more info.")
flags.DEFINE_integer("trtis_engine_count", 1, "Specifies the 'instance_group' count value in the TRTIS model config. See the TRTIS documentation for more info.")
flags.DEFINE_bool("do_calib", False, "Whether to do calibration.")
flags.DEFINE_bool("if_quant", False, "Whether to quantize.")
flags.DEFINE_integer("calib_batch", 4, "Number of batches for calibration.")
flags.DEFINE_string("calib_method", "percentile", "calibration method [percentile, mse, max, entropy]")
flags.DEFINE_float("percentile", 99.99, "percentile for percentile calibrator")
flags.DEFINE_string("calibrator_file", "calibrators.pkl", "pickle file for calibrators")
flags.DEFINE_string("quant_mode", 'ft2', "predefined quantization mode, choices: ['ft1', 'ft2', 'ft3', 'trt']")
flags.DEFINE_bool("distillation", False, "Whether or not to use the techer-student model for finetuning (Knowledge distillation)")
flags.DEFINE_string("teacher", None, "teacher checkpoint file for distillation")
flags.DEFINE_float("distillation_loss_scale", 10000., "scale applied to distillation component of loss")
if FLAGS.quant_mode == 'ft1':
KERNEL_AXIS = 1
ACTIVATION_NARROW_RANGE = False
DISABLE_LIST = ['aftergemm', 'softmax_input', 'residual_input', 'local_input', 'final_input']
FUSE_QKV = False
elif FLAGS.quant_mode == 'ft2':
KERNEL_AXIS = None
ACTIVATION_NARROW_RANGE = False
DISABLE_LIST = ['local_input', 'softmax_input', 'final_input']
FUSE_QKV = True
elif FLAGS.quant_mode == 'ft3':
KERNEL_AXIS = None
ACTIVATION_NARROW_RANGE = False
DISABLE_LIST = ['local_input', 'final_input']
FUSE_QKV = True
elif FLAGS.quant_mode == 'trt':
# for demobert
KERNEL_AXIS = None
ACTIVATION_NARROW_RANGE = False
DISABLE_LIST = ['aftergemm', 'softmax_input']
FUSE_QKV = True
else:
raise ValueError("wrong argument value for 'quant_mode'")
input_desc = QuantDescriptor('input', narrow_range=ACTIVATION_NARROW_RANGE, disable_key_words=DISABLE_LIST)
kernel_desc = QuantDescriptor('kernel', axis=KERNEL_AXIS, disable_key_words=DISABLE_LIST)
QuantDense.set_default_quant_desc_input(input_desc)
QuantDense.set_default_quant_desc_kernel(kernel_desc)
class CalibrationHook(tf.train.SessionRunHook):
def __init__(self, layer_num):
self.layer_num = layer_num
self.calibrator_lists = {}
def begin(self):
self.saver = tf.train.Saver()
tf.compat.v1.logging.info("initializing calibrators")
graph = tf.compat.v1.get_default_graph()
self.calibrator_lists['input'] = get_calibrators('input', collector_type='histogram')
self.calibrator_lists['kernel'] = get_calibrators('kernel', collector_type='max', axis=KERNEL_AXIS)
for k in ['input', 'kernel']:
tf.compat.v1.logging.info("There are {} calibrators in collection '{}'".format(len(self.calibrator_lists[k]), k))
self.calib_step = [
calibrator.calib_step_op(graph) for _, calib_list in self.calibrator_lists.items() for calibrator in calib_list]
self.placeholders = {}
self.load_min_op = {}
self.load_max_op = {}
self.calibrator_reverse_map = {}
for _, calib_list in self.calibrator_lists.items():
for i, calibrator in enumerate(calib_list):
if calibrator.tensor_name_prefix in self.placeholders:
raise ValueError("repeated name prefix")
self.placeholders[calibrator.tensor_name_prefix] = tf.placeholder(tf.float32)
self.load_min_op[calibrator.tensor_name_prefix] = tf.compat.v1.assign(graph.get_tensor_by_name(calibrator.quant_min_name),
self.placeholders[calibrator.tensor_name_prefix])
self.load_max_op[calibrator._tensor_name_prefix] = tf.compat.v1.assign(graph.get_tensor_by_name(calibrator.quant_max_name),
self.placeholders[calibrator.tensor_name_prefix])
self.calibrator_reverse_map[calibrator.tensor_name_prefix] = calibrator
def before_run(self, run_context):
tf.compat.v1.logging.info("registering calibration step")
return tf.estimator.SessionRunArgs(
fetches=self.calib_step)
def end(self, session):
tf.compat.v1.logging.info("computing calibration ranges")
if FLAGS.calib_method == 'max':
tf.compat.v1.logging.info("max calibration.")
for calibrator in self.calibrator_lists['input']:
calibrator.compute_range('max')
elif FLAGS.calib_method == 'percentile':
tf.compat.v1.logging.info("percentile calibration with value {}.".format(FLAGS.percentile))
for calibrator in self.calibrator_lists['input']:
calibrator.compute_range('percentile', percentile=FLAGS.percentile)
elif FLAGS.calib_method == 'mse':
tf.compat.v1.logging.info("mse calibration.")
for calibrator in self.calibrator_lists['input']:
calibrator.compute_range('mse')
elif FLAGS.calib_method == 'entropy':
tf.compat.v1.logging.info("entropy calibration.")
for calibrator in self.calibrator_lists['input']:
calibrator.compute_range('entropy')
else:
raise ValueError("Unsupported calibration method.")
for calibrator in self.calibrator_lists['kernel']:
calibrator.compute_range('max')
if FUSE_QKV:
tf.compat.v1.logging.info("fusing QKV")
for i in range(self.layer_num):
prefix = f"bert/encoder/layer_{i}/attention/self"
tf.compat.v1.logging.info(f"FUSE_QKV: {prefix:50}")
fuse_list = [self.calibrator_reverse_map[prefix + f"/{name}/kernel_quantizer"] for name in ['query', 'key', 'value']]
self.fuse3(*fuse_list)
fuse_list = [self.calibrator_reverse_map[prefix + f"/{name}/aftergemm_quantizer"] for name in ['query', 'key', 'value']]
self.fuse3(*fuse_list)
fuse_list = [self.calibrator_reverse_map[prefix + f"/matmul_{name}_input_quantizer"] for name in ['q', 'k', 'v']]
self.fuse3(*fuse_list)
tf.compat.v1.logging.info("loading calibration ranges")
session.run(self.load_min_op, {self.placeholders[calibrator.tensor_name_prefix]:calibrator.calib_min for _, calib_list in self.calibrator_lists.items() for calibrator in calib_list})
session.run(self.load_max_op, {self.placeholders[calibrator.tensor_name_prefix]:calibrator.calib_max for _, calib_list in self.calibrator_lists.items() for calibrator in calib_list})
tf.compat.v1.logging.info("saving calibrated model")
with open(os.path.join(FLAGS.output_dir, FLAGS.calibrator_file), 'wb') as f:
pickle.dump(self.calibrator_lists, f)
self.saver.save(session, os.path.join(FLAGS.output_dir, 'model.ckpt-calibrated'))
def fuse3(self, qq, qk, qv):
if not hasattr(qq, 'calib_min') or not hasattr(qk, 'calib_min') or not hasattr(qv, 'calib_min') or \
not hasattr(qq, 'calib_max') or not hasattr(qk, 'calib_max') or not hasattr(qv, 'calib_max'):
raise RuntimeError('missing min/max buffer, unable to fuse')
qmax = qq.calib_max
kmax = qk.calib_max
vmax = qv.calib_max
qmin = qq.calib_min
kmin = qk.calib_min
vmin = qv.calib_min
amax = max(qmax, kmax, vmax)
qq._calib_max = amax
qk._calib_max = amax
qv._calib_max = amax
amin = min(qmin, kmin, vmin)
qq._calib_min = amin
qk._calib_min = amin
qv._calib_min = amin
tf.compat.v1.logging.info(
f' q={qmin:7.4f}/{qmax:7.4f} k={kmin:7.4f}/{kmax:7.4f} v={vmin:7.4f}/{vmax:7.4f} -> {amin:7.4f}/{amax:7.4f}')
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings, if_quant):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float32,
if_quant=if_quant)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0, name='unstack')
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def get_frozen_tftrt_model(bert_config, shape, use_one_hot_embeddings, init_checkpoint):
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True
output_node_names = ['unstack']
with tf.Session(config=tf_config) as tf_sess:
input_ids = tf.placeholder(tf.int32, shape, 'input_ids')
input_mask = tf.placeholder(tf.int32, shape, 'input_mask')
segment_ids = tf.placeholder(tf.int32, shape, 'segment_ids')
(start_logits, end_logits) = create_model(bert_config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf_sess.run(tf.global_variables_initializer())
print("LOADED!")
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
else:
init_string = ", *NOTTTTTTTTTTTTTTTTTTTTT"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
frozen_graph = tf.graph_util.convert_variables_to_constants(tf_sess,
tf_sess.graph.as_graph_def(), output_node_names)
num_nodes = len(frozen_graph.node)
print('Converting graph using TensorFlow-TensorRT...')
from tensorflow.python.compiler.tensorrt import trt_convert as trt
converter = trt.TrtGraphConverter(
input_graph_def=frozen_graph,
nodes_blacklist=output_node_names,
max_workspace_size_bytes=(4096 << 20) - 1000,
precision_mode = "FP16" if FLAGS.use_fp16 else "FP32",
minimum_segment_size=4,
is_dynamic_op=True,
maximum_cached_engines=1000
)
frozen_graph = converter.convert()
print('Total node count before and after TF-TRT conversion:',
num_nodes, '->', len(frozen_graph.node))
print('TRT node count:',
len([1 for n in frozen_graph.node if str(n.op) == 'TRTEngineOp']))
with tf.io.gfile.GFile("frozen_modelTRT.pb", "wb") as f:
f.write(frozen_graph.SerializeToString())
return frozen_graph
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
hvd=None, use_fp16=False, use_one_hot_embeddings=False):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator."""
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
if FLAGS.if_quant:
if_quant = True
else:
if_quant = False
if FLAGS.do_calib and (mode == tf.estimator.ModeKeys.TRAIN):
is_training = False
if_quant = False
if not is_training and FLAGS.use_trt:
trt_graph = get_frozen_tftrt_model(bert_config, input_ids.shape, use_one_hot_embeddings, init_checkpoint)
(start_logits, end_logits) = tf.import_graph_def(trt_graph,
input_map={'input_ids':input_ids, 'input_mask':input_mask, 'segment_ids':segment_ids},
return_elements=['unstack:0', 'unstack:1'],
name='')
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
return output_spec
if is_training and FLAGS.distillation:
tf.compat.v1.logging.info("initializing teacher model.")
with tf.variable_scope("teacher"):
(start_logits_teacher, end_logits_teacher) = create_model(
bert_config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
if_quant=False)
tvars = tf.trainable_variables()
initialized_variable_names_t = {}
if not FLAGS.teacher:
raise ValueError("no teacher checkpoint is supplied.")
if (hvd is None or hvd.rank() == 0):
(assignment_map_t, initialized_variable_names_t
) = modeling.get_assignment_map_from_checkpoint(tvars, FLAGS.teacher, "teacher/")
tf.train.init_from_checkpoint(FLAGS.teacher, assignment_map_t)
trainable_vars = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES)
del trainable_vars[:]
tf.compat.v1.logging.info("!!!!!!!!!!if_quant is {}!!!!!!!!!!".format(if_quant))
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
if_quant=if_quant)
tvars = tf.trainable_variables()
qvars = tf.get_collection("quantization_variables")
initialized_variable_names = {}
if init_checkpoint and (hvd is None or hvd.rank() == 0):
tf.compat.v1.logging.info("restore from checkpoint: " + init_checkpoint)
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
(assignment_map_q, initialized_variable_names_q
) = modeling.get_assignment_map_from_checkpoint(qvars, init_checkpoint, allow_shape_mismatch=True)
assignment_map.update(assignment_map_q)
initialized_variable_names.update(initialized_variable_names_q)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" %d name = %s, shape = %s%s", 0 if hvd is None else hvd.rank(), var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
def fixprob(att, T=3):
att = tf.nn.softmax(att/T, axis=-1) + 1e-9
return att
def kl_loss(x, y):
x = fixprob(x)
y = fixprob(y)
X = tf.distributions.Categorical(probs=x)
Y = tf.distributions.Categorical(probs=y)
return tf.math.reduce_mean(tf.distributions.kl_divergence(X, Y))
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
if FLAGS.distillation:
dloss = kl_loss(start_logits, start_logits_teacher) + kl_loss(end_logits, end_logits_teacher)
total_loss = total_loss + dloss * FLAGS.distillation_loss_scale
if FLAGS.do_calib:
global_step = tf.compat.v1.train.get_or_create_global_step()
new_global_step = global_step + 1
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(tf.no_op(), [global_step.assign(new_global_step)])
else:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, hvd, False, use_fp16, FLAGS.num_accumulation_steps)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def input_fn_builder(input_file, batch_size, seq_length, is_training, drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
name_to_features = {
"unique_ids": tf.io.FixedLenFeature([], tf.int64),
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.io.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.io.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn():
"""The actual input function."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.TFRecordDataset(input_file, num_parallel_reads=4)
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.apply(tf.data.experimental.ignore_errors())
d = d.shuffle(buffer_size=100)
d = d.repeat()
else:
d = tf.data.TFRecordDataset(input_file)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.compat.v1.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.compat.v1.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.io.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.io.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if FLAGS.version_2_with_negative:
with tf.io.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.compat.v1.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_calib and not FLAGS.do_predict and not FLAGS.export_trtis:
raise ValueError("At least one of `do_train` or `do_predict` or `export_SavedModel` must be True.")
if FLAGS.do_train or FLAGS.do_calib:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` or `do_calib` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def export_model(estimator, export_dir, init_checkpoint):
"""Exports a checkpoint in SavedModel format in a directory structure compatible with TRTIS."""
def serving_input_fn():
label_ids = tf.placeholder(tf.int32, [None,], name='unique_ids')
input_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids')
input_mask = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_mask')
segment_ids = tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids')
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'unique_ids': label_ids,
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids,
})()
return input_fn
saved_dir = estimator.export_savedmodel(
export_dir,
serving_input_fn,
assets_extra=None,
as_text=False,
checkpoint_path=init_checkpoint,
strip_default_attrs=False)
model_name = FLAGS.trtis_model_name
model_folder = export_dir + "/trtis_models/" + model_name
version_folder = model_folder + "/" + str(FLAGS.trtis_model_version)
final_model_folder = version_folder + "/model.savedmodel"
if not os.path.exists(version_folder):
os.makedirs(version_folder)
if (not os.path.exists(final_model_folder)):
os.rename(saved_dir, final_model_folder)
print("Model saved to dir", final_model_folder)
else:
if (FLAGS.trtis_model_overwrite):
shutil.rmtree(final_model_folder)
os.rename(saved_dir, final_model_folder)
print("WARNING: Existing model was overwritten. Model dir: {}".format(final_model_folder))
else:
print("ERROR: Could not save TRTIS model. Folder already exists. Use '--trtis_model_overwrite=True' if you would like to overwrite an existing model. Model dir: {}".format(final_model_folder))
return
# Now build the config for TRTIS. Check to make sure we can overwrite it, if it exists
config_filename = os.path.join(model_folder, "config.pbtxt")
if (os.path.exists(config_filename) and not FLAGS.trtis_model_overwrite):
print("ERROR: Could not save TRTIS model config. Config file already exists. Use '--trtis_model_overwrite=True' if you would like to overwrite an existing model config. Model config: {}".format(config_filename))
return
config_template = r"""
name: "{model_name}"
platform: "tensorflow_savedmodel"
max_batch_size: {max_batch_size}
input [
{{
name: "unique_ids"
data_type: TYPE_INT32
dims: [ 1 ]
reshape: {{ shape: [ ] }}
}},
{{
name: "segment_ids"
data_type: TYPE_INT32
dims: {seq_length}
}},
{{
name: "input_ids"
data_type: TYPE_INT32
dims: {seq_length}
}},
{{
name: "input_mask"
data_type: TYPE_INT32
dims: {seq_length}
}}
]
output [
{{
name: "end_logits"
data_type: TYPE_FP32
dims: {seq_length}
}},
{{
name: "start_logits"
data_type: TYPE_FP32
dims: {seq_length}
}}
]
{dynamic_batching}
instance_group [
{{
count: {engine_count}
kind: KIND_GPU
gpus: [{gpu_list}]
}}
]"""
batching_str = ""
max_batch_size = FLAGS.trtis_max_batch_size
if (FLAGS.trtis_dyn_batching_delay > 0):
# Use only full and half full batches
pref_batch_size = [int(max_batch_size / 2.0), max_batch_size]
batching_str = r"""
dynamic_batching {{
preferred_batch_size: [{0}]
max_queue_delay_microseconds: {1}
}}""".format(", ".join([str(x) for x in pref_batch_size]), int(FLAGS.trtis_dyn_batching_delay * 1000.0))
config_values = {
"model_name": model_name,
"max_batch_size": max_batch_size,
"seq_length": FLAGS.max_seq_length,
"dynamic_batching": batching_str,
"gpu_list": ", ".join([x.name.split(":")[-1] for x in device_lib.list_local_devices() if x.device_type == "GPU"]),
"engine_count": FLAGS.trtis_engine_count
}
with open(model_folder + "/config.pbtxt", "w") as file:
final_config_str = config_template.format_map(config_values)
file.write(final_config_str)
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
if FLAGS.horovod:
hvd.init()
if FLAGS.use_fp16:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.io.gfile.makedirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
master_process = True
training_hooks = []
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps
hvd_rank = 0
config = tf.compat.v1.ConfigProto()
learning_rate = FLAGS.learning_rate
if FLAGS.horovod:
tf.compat.v1.logging.info("Multi-GPU training with TF Horovod")
tf.compat.v1.logging.info("hvd.size() = %d hvd.rank() = %d", hvd.size(), hvd.rank())
global_batch_size = FLAGS.train_batch_size * hvd.size() * FLAGS.num_accumulation_steps
learning_rate = learning_rate * hvd.size()
master_process = (hvd.rank() == 0)
hvd_rank = hvd.rank()
config.gpu_options.visible_device_list = str(hvd.local_rank())
if hvd.size() > 1:
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir if master_process else None,
session_config=config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps if master_process else None,
keep_checkpoint_max=1)
if master_process:
tf.compat.v1.logging.info("***** Configuration *****")
for key in FLAGS.__flags.keys():
tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key)))
tf.compat.v1.logging.info("**************************")
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_calib:
training_hooks.append(CalibrationHook(bert_config.num_hidden_layers))
training_hooks.append(LogTrainRunHook(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps))
# Prepare Training Data
if FLAGS.do_train or (FLAGS.do_calib and master_process):
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True,
version_2_with_negative=FLAGS.version_2_with_negative)
num_train_steps = int(
len(train_examples) / global_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(train_examples)
if FLAGS.do_calib:
num_train_steps = FLAGS.calib_batch
start_index = 0
if FLAGS.do_calib:
end_index = min(len(train_examples), num_train_steps * global_batch_size)
else:
end_index = len(train_examples)
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record")]
if FLAGS.horovod:
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record{}".format(i)) for i in range(hvd.size())]
num_examples_per_rank = len(train_examples) // hvd.size()
remainder = len(train_examples) % hvd.size()
if hvd.rank() < remainder:
start_index = hvd.rank() * (num_examples_per_rank+1)
end_index = start_index + num_examples_per_rank + 1
else:
start_index = hvd.rank() * num_examples_per_rank + remainder
end_index = start_index + (num_examples_per_rank)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
hvd=None if not FLAGS.horovod else hvd,
use_fp16=FLAGS.use_fp16)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train or (FLAGS.do_calib and master_process):
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_writer = FeatureWriter(
filename=tmp_filenames[hvd_rank],
is_training=True)
convert_examples_to_features(
examples=train_examples[start_index:end_index],
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
verbose_logging=FLAGS.verbose_logging)
train_writer.close()
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Num orig examples = %d", end_index - start_index)
tf.compat.v1.logging.info(" Num split examples = %d", train_writer.num_features)
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.compat.v1.logging.info(" Num steps = %d", num_train_steps)
tf.compat.v1.logging.info(" LR = %f", learning_rate)
del train_examples
train_input_fn = input_fn_builder(
input_file=tmp_filenames,
batch_size=FLAGS.train_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
hvd=None if not FLAGS.horovod else hvd)
train_start_time = time.time()
estimator.train(input_fn=train_input_fn, hooks=training_hooks, max_steps=num_train_steps)
train_time_elapsed = time.time() - train_start_time
train_time_wo_overhead = training_hooks[-1].total_time
avg_sentences_per_second = num_train_steps * global_batch_size * 1.0 / train_time_elapsed
ss_sentences_per_second = (num_train_steps - training_hooks[-1].skipped) * global_batch_size * 1.0 / train_time_wo_overhead
if master_process:
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Training Time = %0.2f for Sentences = %d", train_time_elapsed,
num_train_steps * global_batch_size)
tf.compat.v1.logging.info("Total Training Time W/O Overhead = %0.2f for Sentences = %d", train_time_wo_overhead,
(num_train_steps - training_hooks[-1].skipped) * global_batch_size)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) with overhead = %0.2f", avg_sentences_per_second)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
tf.compat.v1.logging.info("-----------------------------")
if FLAGS.export_trtis and master_process:
export_model(estimator, FLAGS.output_dir, FLAGS.init_checkpoint)
if FLAGS.do_predict and master_process:
eval_examples = read_squad_examples(
input_file=FLAGS.predict_file, is_training=False,
version_2_with_negative=FLAGS.version_2_with_negative)
# Perform evaluation on subset, useful for profiling
if FLAGS.num_eval_iterations is not None:
eval_examples = eval_examples[:FLAGS.num_eval_iterations*FLAGS.predict_batch_size]
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature,
verbose_logging=FLAGS.verbose_logging)
eval_writer.close()
tf.compat.v1.logging.info("***** Running predictions *****")
tf.compat.v1.logging.info(" Num orig examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Num split examples = %d", len(eval_features))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_input_fn = input_fn_builder(
input_file=eval_writer.filename,
batch_size=FLAGS.predict_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
all_results = []
eval_hooks = [LogEvalRunHook(FLAGS.predict_batch_size)]
eval_start_time = time.time()
for result in estimator.predict(
predict_input_fn, yield_single_examples=True, hooks=eval_hooks):
if len(all_results) % 1000 == 0:
tf.compat.v1.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
eval_time_elapsed = time.time() - eval_start_time
eval_time_wo_overhead = eval_hooks[-1].total_time
time_list = eval_hooks[-1].time_list
time_list.sort()
num_sentences = (eval_hooks[-1].count - eval_hooks[-1].skipped) * FLAGS.predict_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed,
eval_hooks[-1].count * FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", eval_time_wo_overhead,
(eval_hooks[-1].count - eval_hooks[-1].skipped) * FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Summary Inference Statistics")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.use_fp16 else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
tf.compat.v1.logging.info("-----------------------------")
output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json")
output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json")
write_predictions(eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.compat.v1.app.run()
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/run_squad.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/__init__.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
import re
import os
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name, do_lower_case=True):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
else:
vocab_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file)
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, do_lower_case)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
pretrained_model_name))
tokenizer = None
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically controlled characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/tokenization.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import modeling
import optimization
import tensorflow as tf
import glob
from utils.utils import LogEvalRunHook
from tensorflow.core.protobuf import rewriter_config_pb2
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_files_dir", None,
"Directory with input files, comma separated or single directory.")
flags.DEFINE_string(
"eval_files_dir", None,
"Directory with eval files, comma separated or single directory. ")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"optimizer_type", "lamb",
"Optimizer used for training - LAMB or ADAM")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 80,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("display_loss_steps", 10,
"How often to print loss")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_integer("num_accumulation_steps", 1,
"Number of accumulation steps before gradient update."
"Global batch size = num_accumulation_steps * train_batch_size")
flags.DEFINE_bool("allreduce_post_accumulation", False, "Whether to all reduce after accumulation of N steps or after each step")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the trainable parameters are printed")
flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs")
flags.DEFINE_bool("report_loss", True, "Whether to report total loss during training.")
flags.DEFINE_bool("manual_fp16", False, "Whether to use fp32 or fp16 arithmetic on GPU. "
"Manual casting is done instead of using AMP")
flags.DEFINE_bool("use_xla", False, "Whether to enable XLA JIT compilation.")
flags.DEFINE_bool("use_fp16", False, "Whether to enable AMP ops.")
# report samples/sec, total loss and learning rate during training
class _LogSessionRunHook(tf.estimator.SessionRunHook):
def __init__(self, global_batch_size, num_accumulation_steps, display_every=10, hvd_rank=-1):
self.global_batch_size = global_batch_size
self.display_every = display_every
self.hvd_rank = hvd_rank
self.num_accumulation_steps = num_accumulation_steps
def after_create_session(self, session, coord):
self.elapsed_secs = 0.
self.count = 0
self.all_count = 0
self.avg_loss = 0.0
def before_run(self, run_context):
self.t0 = time.time()
if self.num_accumulation_steps <= 1:
if FLAGS.manual_fp16 or FLAGS.use_fp16:
return tf.estimator.SessionRunArgs(
fetches=['step_update:0', 'total_loss:0',
'learning_rate:0', 'nsp_loss:0',
'mlm_loss:0', 'loss_scale:0'])
else:
return tf.estimator.SessionRunArgs(
fetches=['step_update:0', 'total_loss:0',
'learning_rate:0', 'nsp_loss:0',
'mlm_loss:0'])
else:
if FLAGS.manual_fp16 or FLAGS.use_fp16:
return tf.estimator.SessionRunArgs(
fetches=['step_update:0', 'update_step:0', 'total_loss:0',
'learning_rate:0', 'nsp_loss:0',
'mlm_loss:0', 'loss_scale:0'])
else:
return tf.estimator.SessionRunArgs(
fetches=['step_update:0', 'update_step:0', 'total_loss:0',
'learning_rate:0', 'nsp_loss:0',
'mlm_loss:0'])
def after_run(self, run_context, run_values):
self.elapsed_secs += time.time() - self.t0
if self.num_accumulation_steps <=1:
if FLAGS.manual_fp16 or FLAGS.use_fp16:
global_step, total_loss, lr, nsp_loss, mlm_loss, loss_scaler = run_values.results
else:
global_step, total_loss, lr, nsp_loss, mlm_loss = run_values. \
results
update_step = True
else:
if FLAGS.manual_fp16 or FLAGS.use_fp16:
global_step, update_step, total_loss, lr, nsp_loss, mlm_loss, loss_scaler = run_values.results
else:
global_step, update_step, total_loss, lr, nsp_loss, mlm_loss = run_values.\
results
print_step = global_step + 1 # One-based index for printing.
self.avg_loss += total_loss
self.all_count += 1
if update_step:
self.count += 1
if (print_step == 1 or print_step % self.display_every == 0):
dt = self.elapsed_secs / self.count
sent_per_sec = self.global_batch_size / dt
avg_loss_step = self.avg_loss / self.all_count
if self.hvd_rank >= 0:
if FLAGS.manual_fp16 or FLAGS.use_fp16:
print('Rank = %2d :: Step = %6i Throughput = %11.1f MLM Loss = %10.4e NSP Loss = %10.4e Loss = %6.3f Average Loss = %6.3f LR = %6.4e Loss scale = %6.4e' %
(self.hvd_rank, print_step, sent_per_sec, mlm_loss, nsp_loss, total_loss, avg_loss_step, lr, loss_scaler))
else:
print('Rank = %2d :: Step = %6i Throughput = %11.1f MLM Loss = %10.4e NSP Loss = %10.4e Loss = %6.3f Average Loss = %6.3f LR = %6.4e' %
(self.hvd_rank, print_step, sent_per_sec, mlm_loss, nsp_loss, total_loss, avg_loss_step, lr))
else:
if FLAGS.manual_fp16 or FLAGS.use_fp16:
print('Step = %6i Throughput = %11.1f MLM Loss = %10.4e NSP Loss = %10.4e Loss = %6.3f Average Loss = %6.3f LR = %6.4e Loss scale = %6.4e' %
(print_step, sent_per_sec, mlm_loss, nsp_loss, total_loss, avg_loss_step, lr, loss_scaler))
else:
print('Step = %6i Throughput = %11.1f MLM Loss = %10.4e NSP Loss = %10.4e Loss = %6.3f Average Loss = %6.3f LR = %6.4e' %
(print_step, sent_per_sec, mlm_loss, nsp_loss, total_loss, avg_loss_step, lr))
self.elapsed_secs = 0.
self.count = 0
self.avg_loss = 0.0
self.all_count = 0
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings, hvd=None):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float16 if FLAGS.manual_fp16 else tf.float32)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(),
masked_lm_positions, masked_lm_ids,
masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
masked_lm_loss = tf.identity(masked_lm_loss, name="mlm_loss")
next_sentence_loss = tf.identity(next_sentence_loss, name="nsp_loss")
total_loss = masked_lm_loss + next_sentence_loss
total_loss = tf.identity(total_loss, name='total_loss')
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint and (hvd is None or hvd.rank() == 0):
print("Loading checkpoint", init_checkpoint)
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" %d :: name = %s, shape = %s%s", 0 if hvd is None else hvd.rank(), var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
hvd, FLAGS.manual_fp16, FLAGS.use_fp16, FLAGS.num_accumulation_steps, FLAGS.optimizer_type, FLAGS.allreduce_post_accumulation)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metric_ops = metric_fn(
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels
)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(tf.cast(input_tensor, tf.float32), output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(tf.cast(input_tensor, tf.float32), output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
batch_size,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4,
hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
def input_fn():
"""The actual input function."""
name_to_features = {
"input_ids":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.io.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.io.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we won't cover
# every sample.
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True if is_training else False))
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if FLAGS.use_fp16:
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
if FLAGS.horovod:
import horovod.tensorflow as hvd
hvd.init()
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.io.gfile.makedirs(FLAGS.output_dir)
input_files = []
for input_file_dir in FLAGS.input_files_dir.split(","):
input_files.extend(tf.io.gfile.glob(os.path.join(input_file_dir, "*")))
if FLAGS.horovod and len(input_files) < hvd.size():
raise ValueError("Input Files must be sharded")
if FLAGS.use_fp16 and FLAGS.manual_fp16:
raise ValueError("AMP and Manual Mixed Precision Training are both activated! Error")
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
config = tf.compat.v1.ConfigProto()
if FLAGS.horovod:
config.gpu_options.visible_device_list = str(hvd.local_rank())
if hvd.rank() == 0:
tf.compat.v1.logging.info("***** Configuration *****")
for key in FLAGS.__flags.keys():
tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key)))
tf.compat.v1.logging.info("**************************")
# config.gpu_options.per_process_gpu_memory_fraction = 0.7
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
config.graph_options.rewrite_options.memory_optimization = rewriter_config_pb2.RewriterConfig.NO_MEM_OPT
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir,
session_config=config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps if not FLAGS.horovod or hvd.rank() == 0 else None,
# This variable controls how often estimator reports examples/sec.
# Default value is every 100 steps.
# When --report_loss is True, we set to very large value to prevent
# default info reporting from estimator.
# Ideally we should set it to None, but that does not work.
log_step_count_steps=10000 if FLAGS.report_loss else 100)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate*hvd.size(),
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_one_hot_embeddings=False,
hvd=None if not FLAGS.horovod else hvd)
training_hooks = []
if FLAGS.report_loss and (not FLAGS.horovod or hvd.rank() == 0):
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps if not FLAGS.horovod else FLAGS.train_batch_size * FLAGS.num_accumulation_steps * hvd.size()
training_hooks.append(_LogSessionRunHook(global_batch_size, FLAGS.num_accumulation_steps, FLAGS.display_loss_steps))
if FLAGS.horovod and hvd.size() > 1:
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
batch_size=FLAGS.train_batch_size,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True,
hvd=None if not FLAGS.horovod else hvd)
estimator.train(input_fn=train_input_fn, hooks=training_hooks, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval and (not FLAGS.horovod or hvd.rank() == 0):
tf.compat.v1.logging.info("***** Running evaluation *****")
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_files = []
for eval_file_dir in FLAGS.eval_files_dir.split(","):
eval_files.extend(tf.io.gfile.glob(os.path.join(eval_file_dir, "*")))
eval_input_fn = input_fn_builder(
input_files=eval_files,
batch_size=FLAGS.eval_batch_size,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False,
hvd=None if not FLAGS.horovod else hvd)
eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)]
eval_start_time = time.time()
result = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps, hooks=eval_hooks)
eval_time_elapsed = time.time() - eval_start_time
eval_time_wo_overhead = eval_hooks[-1].total_time
num_sentences = (eval_hooks[-1].count - eval_hooks[-1].skipped) * FLAGS.eval_batch_size
ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed,
eval_hooks[-1].count * FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", eval_time_wo_overhead,
(eval_hooks[-1].count - eval_hooks[-1].skipped) * FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Summary Inference Statistics on EVAL set")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.use_fp16 else "fp32")
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
tf.compat.v1.logging.info("-----------------------------")
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
tf.compat.v1.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.compat.v1.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
flags.mark_flag_as_required("input_files_dir")
if FLAGS.do_eval:
flags.mark_flag_as_required("eval_files_dir")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
if FLAGS.use_xla and FLAGS.manual_fp16:
print('WARNING! Combining --use_xla with --manual_fp16 may prevent convergence.')
print(' This warning message will be removed when the underlying')
print(' issues have been fixed and you are running a TF version')
print(' that has that fix.')
tf.compat.v1.app.run()
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/run_pretraining.py |
# coding=utf-8
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None,
trainable=True,
*args, **kwargs):
"""Custom variable getter that forces trainable variables to be stored in
float32 precision and then casts them to the training precision.
"""
storage_dtype = tf.float32 if trainable else dtype
variable = getter(name, shape, dtype=storage_dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable,
*args, **kwargs)
if trainable and dtype != tf.float32:
variable = tf.cast(variable, dtype)
return variable
def get_custom_getter(compute_type):
return float32_variable_storage_getter if compute_type == tf.float16 else None
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/gpu_environment.py |
# coding=utf-8
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
from gpu_environment import get_custom_getter
from ft_tensorflow_quantization import FakeQuantizer, QuantDense
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.io.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None,
compute_type=tf.float32,
if_quant=False):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings. On the TPU,
it is much faster if this is True, on the CPU or GPU, it is faster if
this is False.
scope: (optional) variable scope. Defaults to "bert".
compute_type: (optional) either float32 or float16. Only applies to GPUs.
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert", custom_getter=get_custom_getter(compute_type)):
with tf.variable_scope("embeddings"):
# For good convergence with mixed precision training,
# it is important that the embedding codes remain fp32.
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob,
use_one_hot_embeddings=use_one_hot_embeddings)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=tf.saturate_cast(self.embedding_output, compute_type),
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True,
if_quant=if_quant)
# self.sequence_output = tf.cast(self.all_encoder_layers[-1], tf.float32)
final_input_quantizer = FakeQuantizer(QuantDense.default_quant_desc_input, 'final_input_quantizer', if_quant)
self.sequence_output = tf.cast(final_input_quantizer(self.all_encoder_layers[-1]), tf.float32)
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint, scope_prefix="", allow_shape_mismatch=False):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
scope_prefix_len = len(scope_prefix)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if scope_prefix + name not in name_to_variable:
if 'adam' not in name:
tf.compat.v1.logging.warn(name + " in the checkpoint is not used.")
continue
if allow_shape_mismatch and name_to_variable[scope_prefix + name].shape != var:
tf.compat.v1.logging.warn(name + " shape mismatch, skipped.")
continue
assignment_map[name] = scope_prefix + name
initialized_variable_names[scope_prefix + name] = 1
initialized_variable_names[scope_prefix + name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
if input_tensor.dtype == tf.float16:
try:
from fused_layer_norm import fused_layer_norm
return fused_layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name,
use_fused_batch_norm=True)
except ImportError:
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
else:
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1,
use_one_hot_embeddings=False):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
if use_one_hot_embeddings:
# This vocab will be small so we always do one-hot here, since it is
# always faster for a small vocabulary.
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
else:
token_type_embeddings = tf.gather(token_type_table, flat_token_type_ids)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, width])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
to_mask = tf.cast(to_mask, dtype=tf.float32)
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.reshape(to_mask, [batch_size, 1, to_seq_length])
# The mask will be automatically broadcasted to
# [batch_size, from_seq_length, to_seq_length] when it is used in the
# attention layer.
return to_mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None,
if_quant=False):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
# query_layer = tf.layers.dense(
# from_tensor_2d,
# num_attention_heads * size_per_head,
# activation=query_act,
# name="query",
# kernel_initializer=create_initializer(initializer_range))
query_dense = QuantDense(
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range),
if_quant=if_quant,
)
query_layer = query_dense(from_tensor_2d)
# `key_layer` = [B*T, N*H]
# key_layer = tf.layers.dense(
# to_tensor_2d,
# num_attention_heads * size_per_head,
# activation=key_act,
# name="key",
# kernel_initializer=create_initializer(initializer_range))
key_dense = QuantDense(
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range),
if_quant=if_quant,
)
key_layer = key_dense(to_tensor_2d)
# `value_layer` = [B*T, N*H]
# value_layer = tf.layers.dense(
# to_tensor_2d,
# num_attention_heads * size_per_head,
# activation=value_act,
# name="value",
# kernel_initializer=create_initializer(initializer_range))
value_dense = QuantDense(
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range),
if_quant=if_quant,
)
value_layer = value_dense(to_tensor_2d)
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
query_layer_quantizer = FakeQuantizer(QuantDense.default_quant_desc_input, 'matmul_q_input_quantizer', if_quant)
query_layer = query_layer_quantizer(query_layer)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
key_layer_quantizer = FakeQuantizer(QuantDense.default_quant_desc_input, 'matmul_k_input_quantizer', if_quant)
key_layer = key_layer_quantizer(key_layer)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores_quantizer = FakeQuantizer(QuantDense.default_quant_desc_input, 'softmax_input_quantizer', if_quant)
attention_scores = attention_scores_quantizer(attention_scores)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, attention_scores.dtype)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
value_layer_quantizer = FakeQuantizer(QuantDense.default_quant_desc_input, 'matmul_v_input_quantizer', if_quant)
value_layer = value_layer_quantizer(value_layer)
attention_probs_quantizer = FakeQuantizer(QuantDense.default_quant_desc_input, 'matmul_a_input_quantizer', if_quant)
attention_probs = attention_probs_quantizer(attention_probs)
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False,
if_quant=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length,
if_quant=if_quant)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
# attention_output = tf.layers.dense(
# attention_output,
# hidden_size,
# kernel_initializer=create_initializer(initializer_range))
attention_dense = QuantDense(
hidden_size,
kernel_initializer=create_initializer(initializer_range),
if_quant=if_quant,
)
attention_output = attention_dense(attention_output)
attention_output = dropout(attention_output, hidden_dropout_prob)
layer_input_quantizer = FakeQuantizer(QuantDense.default_quant_desc_input, 'add_residual_input_quantizer', if_quant)
layer_input = layer_input_quantizer(layer_input)
attention_output_quantizer = FakeQuantizer(QuantDense.default_quant_desc_input, 'add_local_input_quantizer', if_quant)
attention_output = attention_output_quantizer(attention_output)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
# intermediate_output = tf.layers.dense(
# attention_output,
# intermediate_size,
# activation=intermediate_act_fn,
# kernel_initializer=create_initializer(initializer_range))
intermediate_dense = QuantDense(
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range),
if_quant=if_quant,
)
intermediate_output = intermediate_dense(attention_output)
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
# layer_output = tf.layers.dense(
# intermediate_output,
# hidden_size,
# kernel_initializer=create_initializer(initializer_range))
layer_dense = QuantDense(
hidden_size,
kernel_initializer=create_initializer(initializer_range),
if_quant=if_quant,
)
layer_output = layer_dense(intermediate_output)
layer_output = dropout(layer_output, hidden_dropout_prob)
attention_output_quantizer_4 = FakeQuantizer(QuantDense.default_quant_desc_input, 'add_residual_input_quantizer', if_quant)
attention_output = attention_output_quantizer_4(attention_output)
layer_output_quantizer = FakeQuantizer(QuantDense.default_quant_desc_input, 'add_local_input_quantizer', if_quant)
layer_output = layer_output_quantizer(layer_output)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/modeling.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None,
trainable=True,
*args, **kwargs):
"""Custom variable getter that forces trainable variables to be stored in
float32 precision and then casts them to the training precision.
"""
storage_dtype = tf.float32 if trainable else dtype
variable = getter(name, shape, dtype=storage_dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable,
*args, **kwargs)
if trainable and dtype != tf.float32:
variable = tf.cast(variable, dtype)
return variable
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/fp16_utils.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Setup script"""
from setuptools import setup, find_packages
setup(name="TensorFlow_FastTransformer_Quantization",
package=["ft_tensorflow_quantization"],
package_dir={'ft_tensorflow_quantization': 'ft_tensorflow_quantization'},
version="0.1.0",
description="TensorFlow FasterTransformer Quantization",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
zip_safe=False)
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/setup.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""TensorFlow Quantization"""
from ft_tensorflow_quantization.python.ops.fake_quantize import *
from ft_tensorflow_quantization.python.layers.tensor_quantizer import *
from ft_tensorflow_quantization.python.layers.dense import *
from ft_tensorflow_quantization.python.calib.max import *
from ft_tensorflow_quantization.python.calib.histogram import *
from ft_tensorflow_quantization.python.calib.calibrator import *
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/__init__.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/__init__.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Max collector for calibrations"""
import numpy as np
__all__ = ["MaxCollector"]
class MaxCollector():
"""Collecting min/max values
Args:
axis: None or integer. axis which will have its own max for computing scaling factor.
If None, collect per tensor min/max. Default None
track_minmax: A boolean. If true, track all min/max it sees in addition to the returned calib_min/calib_max.
Default False
"""
def __init__(self, axis=None, track_minmax=False):
self._axis = axis
self._track_minmax = track_minmax
self._calib_min = None
self._calib_max = None
if self._track_minmax:
self._min_list = []
self._max_list = []
def collect(self, x_np):
"""Collect min/max values
Args:
x_np: A numpy array to be processed.
Raises:
RuntimeError: when the input shape changed
"""
if self._axis is None:
reduce_axis = None
else:
reduce_axis = []
axis = self._axis + len(x_np.shape) if self._axis < 0 else self._axis
for i in range(len(x_np.shape)):
if i != axis:
reduce_axis.append(i)
reduce_axis = tuple(reduce_axis)
local_min = np.min(x_np, axis=reduce_axis)
local_max = np.max(x_np, axis=reduce_axis)
if self._calib_min is None and self._calib_max is None:
self._calib_min = local_min
self._calib_max = local_max
else:
if local_min.shape != self._calib_min.shape or local_max.shape != self._calib_max.shape:
raise RuntimeError("quant min/max shape changed!")
self._calib_min = np.minimum(self._calib_min, local_min)
self._calib_max = np.maximum(self._calib_max, local_max)
if self._track_minmax:
self._min_list.append(local_min)
self._max_list.append(local_max)
def reset(self):
"""Reset the collected values"""
self._calib_min = None
self._calib_max = None
if self._track_minmax:
self._min_list = []
self._max_list = []
# pylint:disable=missing-docstring
@property
def calib_min(self):
return self._calib_min
@property
def calib_max(self):
return self._calib_max
@property
def min_list(self):
return self._min_list
@property
def max_list(self):
return self._max_list
def __str__(self):
return self.__repr__()
def __repr__(self):
s = "MaxCollector("
s += "axis={_axis}"
s += " track_minmax={_track_minmax}"
s += " calib_min={_calib_min}"
s += " calib_max={_calib_max}"
if self._track_minmax:
s += " min_list={_min_list}"
s += " max_list={_max_list}"
s += ")"
return s.format(**self.__dict__)
# pylint:enable=missing-docstring
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/calib/max.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Histogram collector"""
import numpy as np
__all__ = ["HistogramCollector"]
class HistogramCollector():
"""Collecting histograms and do calibration
Args:
num_bins: An integer. Number of histograms bins. Default 2048
grow_method: A string. Method to grow histogram, `append` or `stretch`. Default `append`.
If 'stretch', increase the size of the last bin to capture outliers.
If 'append', add more bins of the same size.
skip_zeros: A boolean. count zeros in data. Default False
affine: A boolean. If True, collect histogram for affine quantization. Default False.
Raises:
ValueError: If invalid grow_method is given.
"""
def __init__(self, num_bins=2048, grow_method='append', skip_zeros=False, affine=False):
self._num_bins = num_bins
if grow_method not in ['stretch', 'append']:
raise ValueError("grow_method must be one of 'stretch', 'append'")
self._grow_method = grow_method
self._skip_zeros = skip_zeros
self._affine = affine
self._calib_bin_edges = None
self._calib_hist = None
def collect(self, x_np):
"""Collect histogram
Args:
x_np: A numpy array to be processed.
"""
if self._skip_zeros:
x_np = x_np[np.where(x_np != 0)]
if not self._affine:
x_np = np.abs(x_np)
else:
raise NotImplementedError("No affine support for now.")
temp_max = np.max(x_np)
if self._calib_bin_edges is None and self._calib_hist is None:
# first time it uses num_bins to compute histogram.
width = temp_max / self._num_bins
self._calib_bin_edges = np.arange(0, temp_max + width, width)
self._calib_hist, self._calib_bin_edges = np.histogram(x_np, bins=self._calib_bin_edges)
else:
width = self._calib_bin_edges[1] - self._calib_bin_edges[0]
if temp_max > self._calib_bin_edges[-1]:
if self._grow_method == 'append':
# increase the number of bins
self._calib_bin_edges = np.arange(self._calib_bin_edges[0], temp_max + width, width)
elif self._grow_method == 'stretch':
# stretch the last bin edge to capture the new range
self._calib_bin_edges[-1] = temp_max
else:
raise ValueError("unknown grow_method '{}'".format(self._grow_method))
hist, self._calib_bin_edges = np.histogram(x_np, bins=self._calib_bin_edges)
hist[:len(self._calib_hist)] += self._calib_hist
self._calib_hist = hist
def reset(self):
"""Reset the collected histogram"""
self._calib_bin_edges = None
self._calib_hist = None
# pylint:disable=missing-docstring
@property
def calib_bin_edges(self):
return self._calib_bin_edges
@property
def calib_hist(self):
return self._calib_hist
@property
def affine(self):
return self._affine
def __str__(self):
return self.__repr__()
def __repr__(self):
s = "HistogramCollector("
s += "num_bins={_num_bins}"
s += " grow_method={_grow_method}"
s += " skip_zeros={_skip_zeros}"
s += " affine={_affine}"
s += " calib_bin_edges={_calib_bin_edges}"
s += " calib_hist={_calib_hist})"
return s.format(**self.__dict__)
# pylint:enable=missing-docstring
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/calib/histogram.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/calib/__init__.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""calibrator class"""
from collections import Counter
import numpy as np
from scipy.stats import entropy
import tensorflow as tf
from ft_tensorflow_quantization.python.calib.max import MaxCollector
from ft_tensorflow_quantization.python.calib.histogram import HistogramCollector
from ft_tensorflow_quantization.python.utils.utils import scaled_quant_np
__all__ = ["Calibrator", "get_calibrators"]
class Calibrator():
"""A calibrator that wraps up a collector and relevant tensors and does calibration
Args:
tensor_name_prefix: A string. The common name prefix of `quant_min`, `quant_max`, and `calib_tensor`.
collector: :func:`MaxCollector <quantization.MaxCollector>`
or :func:`HistogramCollector <quantization.HistogramCollector>`.
quant_min_name: The name of corresponding `quant_min` tensor in the graph.
quant_max_name:The name of corresponding `quant_max` tensor in the graph.
calib_tensor_name: The name of the tensor need be calibrated.
Attributes:
- tensor_name_prefix: Read-only property for the common name prefix of
`quant_min`, `quant_max`, and `calib_tensor`.
- calib_min: Read-only property for the min value the calibrator collected/computed.
- calib_max: Read-only property for the max value the calibrator collected/computed.
- quant_min_name: Read-only property for the name of `quant_min` tensor in the fakequant node in the graph.
- quant_max_name: Read-only property for the name of `quant_max` tensor in the fakequant node in the graph.
"""
def __init__(self, tensor_name_prefix, collector, quant_min_name, quant_max_name, calib_tensor_name):
self._tensor_name_prefix = tensor_name_prefix
self._collector = collector
self._quant_min_name = quant_min_name
self._quant_max_name = quant_max_name
self._calib_tensor_name = calib_tensor_name
self._calib_min = None
self._calib_max = None
def calib_step_op(self, graph):
"""get the op for one step of calibration
Args:
graph: The being executed TensorFlow Graph.
Returns:
A wrapped TensorFlow op of `tf.py_function` for one calib step.
"""
return tf.py_function(self._collector.collect, inp=[graph.get_tensor_by_name(self._calib_tensor_name)], Tout=[])
def compute_range(self, calibration_method, **kwargs):
"""calculate min/max values from collector
if :func:`MaxCollector <quantization.MaxCollector>` is used, kwargs should be None.
if :func:`HistogramCollector <quantization.HistogramCollector>` is used,
there should be `calibration_method` in kwargs and other corresponding arguments.
Args:
calibration_method: A string indicates the calibration method.
One of `["max", "percentile", "mse", "entropy"]`.
Keyword Arguments:
percentile: A float. Set range to p-th percentile of collected data. `0 <= p <= 100`.
Only needed when `calibration_method == "percentile"`.
start_bin: An integer. Histogram bin to start sweep. Default 128.
Only needed when `calibration_method == "mse"` or `calibration_method == "entropy"`.
stride: An integer. Stride of histogram bins swept. Default 1.
Only needed when `calibration_method == "mse"` or `calibration_method == "entropy"`.
num_bits: An integer. Number of bits of quantization. Default 8.
Only needed when `calibration_method == "mse"` or `calibration_method == "entropy"`.
unsigned: A boolean. using unsigned quantization. Default False.
Only needed when `calibration_method == "mse"` or `calibration_method == "entropy"`.
Raises:
ValueError: Wrong arguments is provided.
RuntimeError: compute range before collecting.
"""
if calibration_method not in ["max", "percentile", "mse", "entropy"]:
raise ValueError('calibration_method should be one of ["max", "percentile", "mse", "entropy"]')
if isinstance(self._collector, MaxCollector):
assert calibration_method == "max"
if self._collector.calib_min is None or self._collector.calib_max is None:
raise RuntimeError("The collector have not collected anything, cannot compute the range.")
if kwargs:
raise ValueError("Unexpected keys: {}".format(kwargs.keys()))
self._calib_min, self._calib_max = self._collector.calib_min, self._collector.calib_max
elif isinstance(self._collector, HistogramCollector):
if self._collector.calib_bin_edges is None or self._collector.calib_hist is None:
raise RuntimeError("The collector have not collected anything, cannot compute the range.")
if calibration_method == 'percentile':
percentile = kwargs.pop('percentile', None)
if percentile is None:
raise ValueError("A percentile value should be provided")
if kwargs:
raise ValueError("Unexpected keys: {}".format(kwargs.keys()))
self._calib_min, self._calib_max = self._compute_percentile_range(percentile)
elif calibration_method in ['mse', 'entropy']:
start_bin = kwargs.pop('start_bin', 128)
stride = kwargs.pop('stride', 1)
num_bits = kwargs.pop('num_bits', 8)
unsigned = kwargs.pop('unsigned', False)
if kwargs:
raise ValueError("Unexpected keys: {}".format(kwargs.keys()))
if calibration_method == 'mse':
self._calib_min, self._calib_max = self._compute_mse_range(start_bin, stride, num_bits, unsigned)
else:
self._calib_min, self._calib_max = self._compute_entropy_range(start_bin, stride, num_bits, unsigned)
else:
raise ValueError("calibration_method must be one of ['percentile', 'mse', 'entropy']")
def _compute_percentile_range(self, percentile):
"""compute min/max value with percentile method and return a tuple of (min, max)
Choose min/max to clip the top P percentile of data
"""
if percentile < 0 or percentile > 100:
raise ValueError("Invalid percentile. Must be in range 0 <= percentile <= 100.")
if not self._collector.affine:
total = self._collector.calib_hist.sum()
cdf = np.cumsum(self._collector.calib_hist / total)
idx = np.searchsorted(cdf, percentile / 100)
calib_max = self._collector.calib_bin_edges[idx]
result = -calib_max.astype('float32'), calib_max.astype('float32')
else:
raise NotImplementedError("No affine support for now.")
return result
def _compute_mse_range(self, start_bin, stride, num_bits, unsigned):
"""compute min/max value that minimizes MSE of the collected histogram
and return a tuple of (min, max)
"""
if not self._collector.affine:
centers = (self._collector.calib_bin_edges[1:] + self._collector.calib_bin_edges[:-1]) / 2
mses = []
arguments = []
for i in range(start_bin, len(centers), stride):
amax = centers[i]
quant_centers = scaled_quant_np(centers, amax, num_bits, axis=None, unsigned=unsigned)
mse = ((quant_centers - centers)**2 * self._collector.calib_hist).mean()
mses.append(mse)
arguments.append(i)
argmin = np.argmin(mses)
calib_max = centers[arguments[argmin]]
result = -calib_max.astype('float32'), calib_max.astype('float32')
else:
raise NotImplementedError("No affine support for now.")
return result
def _compute_entropy_range(self, start_bin, stride, num_bits, unsigned):
"""compute min/max value that minimizes KL-Divergence of the collected histogram
and return a tuple of (min, max)
"""
def _normalize_distr(distr):
summ = np.sum(distr)
if summ != 0:
distr = distr / summ
if not self._collector.affine:
bins = self._collector.calib_hist[:]
bins[0] = bins[1]
total_data = np.sum(bins)
divergences = []
arguments = []
# we are quantizing to 128 values + sign if num_bits=8
nbins = 1 << (num_bits - 1 + int(unsigned))
stop = len(bins)
new_density_counts = np.zeros(nbins, dtype=np.float64)
for i in range(start_bin, stop + 1, stride):
new_density_counts.fill(0)
space = np.linspace(0, i, num=nbins + 1)
digitized_space = np.digitize(range(i), space) - 1
digitized_space[bins[:i] == 0] = -1
for idx, digitized in enumerate(digitized_space):
if digitized != -1:
new_density_counts[digitized] += bins[idx]
counter = Counter(digitized_space)
for key, val in counter.items():
if key != -1:
new_density_counts[key] = new_density_counts[key] / val
new_density = np.zeros(i, dtype=np.float64)
for idx, digitized in enumerate(digitized_space):
if digitized != -1:
new_density[idx] = new_density_counts[digitized]
total_counts_new = np.sum(new_density) + np.sum(bins[i:])
_normalize_distr(new_density)
reference_density = np.array(bins[:len(digitized_space)])
reference_density[-1] += np.sum(bins[i:])
total_counts_old = np.sum(reference_density)
if round(total_counts_new) != total_data or round(total_counts_old) != total_data:
raise RuntimeError("Count mismatch! total_counts_new={}, total_counts_old={}, total_data={}".format(
total_counts_new, total_counts_old, total_data))
_normalize_distr(reference_density)
ent = entropy(reference_density, new_density)
divergences.append(ent)
arguments.append(i)
divergences = np.array(divergences)
last_argmin = len(divergences) - 1 - np.argmin(divergences[::-1])
calib_max = self._collector.calib_bin_edges[last_argmin * stride + start_bin]
result = -calib_max.astype('float32'), calib_max.astype('float32')
else:
raise NotImplementedError("No affine support for now.")
return result
def load_range(self, sess):
"""load min/max values to the graph
Args:
sess: A TensorFlow Session.
"""
if self._calib_min is None or self._calib_max is None:
raise RuntimeError("load_range should be called after compute_range")
sess.run(tf.compat.v1.assign(sess.graph.get_tensor_by_name(self._quant_min_name), tf.constant(self._calib_min)))
sess.run(tf.compat.v1.assign(sess.graph.get_tensor_by_name(self._quant_max_name), tf.constant(self._calib_max)))
def compute_and_load_range(self, sess, **compute_range_args):
"""wraps :func:`compute_range <quantization.Calibrator.compute_range>`
and :func:`load_range <quantization.Calibrator.load_range>` for convenience"""
self.compute_range(**compute_range_args)
self.load_range(sess)
# pylint:disable=missing-docstring
@property
def tensor_name_prefix(self):
return self._tensor_name_prefix
@property
def calib_min(self):
if self._calib_min is None:
raise RuntimeError("Accessing calib_min need compute_range called first.")
return self._calib_min
@property
def calib_max(self):
if self._calib_max is None:
raise RuntimeError("Accessing calib_max need compute_range called first.")
return self._calib_max
@property
def quant_min_name(self):
return self._quant_min_name
@property
def quant_max_name(self):
return self._quant_max_name
def __str__(self):
return self.__repr__()
def __repr__(self):
return "Calibrator({})".format(self._tensor_name_prefix)
# pylint:enable=missing-docstring
def get_calibrators(collection_name_prefix,
graph=None,
collector_type='max',
**collector_args):
"""Prepare collector and relevant tensors for calibration and return a list of calibrators.
Args:
collection_name_prefix: A string. Determine the collection of tensors. Need to be unified with FakeQuantizer.
graph: an instance of `tf.Graph`, if None, use default graph. Default None.
collector_types: A string. What collector to use. One of `["max", "histogram"]`. Default `"max"`.
Collector arguments can be passed by collector_args.
If :func:`MaxCollector <quantization.MaxCollector>` is used,
only `axis` and `track_minmax` can be passed to collector_args.
If :func:`HistogramCollector <quantization.HistogramCollector>` is used,
only `num_bins`, `grow_method`, `skip_zeros` and `affine` can be passed.
For details of these arguments, please refer to the docs of :func:`MaxCollector <quantization.MaxCollector>`
or :func:`HistogramCollector <quantization.HistogramCollector>`.
Return:
A list of calibrators. Each calibrator processes tensors
in a corresponding :func:`FakeQuantizer <quantization.FakeQuantizer>`.
"""
if graph is None:
graph = tf.compat.v1.get_default_graph()
qmin_collection = graph.get_collection(collection_name_prefix + '_quant_min')
qmax_collection = graph.get_collection(collection_name_prefix + '_quant_max')
calib_tensor_collection = graph.get_collection(collection_name_prefix + '_calib_tensor')
collection_size = len(calib_tensor_collection)
assert len(qmin_collection) == collection_size
assert len(qmax_collection) == collection_size
def get_name_prefix(tensor_name):
tensor_name = tensor_name.split('/')
prefix = '/'.join(tensor_name[:-1])
return prefix
def verify_collector_args(collector_args, acceptable_args, collector_name):
for k, _ in collector_args.items():
if k not in acceptable_args:
raise ValueError("Wrong arguments {} for {} collector, only {} are supported.".format(
k, collector_name, acceptable_args))
if collector_type == 'max':
verify_collector_args(collector_args, ['axis', 'track_minmax'], collector_type)
collector_class = MaxCollector
elif collector_type == 'histogram':
verify_collector_args(collector_args, ['num_bins', 'grow_method', 'skip_zeros', 'affine'], collector_type)
collector_class = HistogramCollector
else:
raise ValueError("collector_type must be one of ['max', 'histogram']")
result = []
for i in range(collection_size):
name_prefix = get_name_prefix(calib_tensor_collection[i])
assert get_name_prefix(qmin_collection[i]) == name_prefix
assert get_name_prefix(qmax_collection[i]) == name_prefix
calibrator = Calibrator(name_prefix, collector_class(**collector_args), qmin_collection[i], qmax_collection[i],
calib_tensor_collection[i])
result.append(calibrator)
return result
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/calib/calibrator.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Quantized Dense Layer"""
import tensorflow as tf
from tensorflow.python.ops import standard_ops
from tensorflow.python.eager import context
from tensorflow.python.ops import math_ops
from tensorflow.python.keras import backend as K
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import nn
from ft_tensorflow_quantization.python.layers.tensor_quantizer import QuantDescriptor, FakeQuantizer
from ft_tensorflow_quantization.python.layers.utils import QuantMixin, pop_quant_desc_in_kwargs
__all__ = ["Dense", "QuantDense"]
# TensorFlow use cls.__name__ as default scope name, so keep the name `Dense` for checkpoint restoring
# there is an alias `QuantDense` below
class Dense(tf.layers.Dense, QuantMixin):
"""Quantized version of tf.layers.Dense
Apply quantized dense to the incoming data, `y = dequant(quant(x)quant(W) + b)`.
Quantization descriptors are passed in in kwargs. If not presents, `default_quant_desc_input` and
`default_quant_desc_kernel` are used.
Args:
if_quant: A boolean. Whether do quantization. If False, behavior like the original Dense. Default False.
others: the same as tf.layers.Dense
Keyword Arguments:
quant_desc_input: An instance of :func:`QuantDescriptor <quantization.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_wegiht: An instance of :func:`QuantDescriptor <quantization.QuantDescriptor>`.
Quantization descriptor of kernel.
Raises:
ValueError: If unsupported arguments are passed in.
"""
default_quant_desc_input = QuantDescriptor('input')
default_quant_desc_kernel = QuantDescriptor('kernel', axis=1)
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
if_quant=False,
**kwargs):
self.quant_desc_input, self.quant_desc_kernel = pop_quant_desc_in_kwargs(self.__class__, **kwargs)
self.if_quant = if_quant
super().__init__(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
def build(self, input_shape):
self.kernel_quantizer = FakeQuantizer(self.quant_desc_kernel, 'kernel_quantizer', self.if_quant)
self.input_quantizer = FakeQuantizer(self.quant_desc_input, 'input_quantizer', self.if_quant)
self.aftergemm_quantizer = FakeQuantizer(self.quant_desc_input, 'aftergemm_quantizer', self.if_quant)
super().build(input_shape)
def call(self, inputs):
"""Forward pass, modified from `tf.layers.Dense.call`"""
rank = len(inputs.shape)
kernel = self.kernel_quantizer(self.kernel)
inputs = self.input_quantizer(inputs)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
inputs = math_ops.cast(inputs, self._compute_dtype)
if K.is_sparse(inputs):
outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, kernel)
else:
outputs = gen_math_ops.mat_mul(inputs, kernel)
outputs = self.aftergemm_quantizer(outputs)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
QuantDense = Dense
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/layers/dense.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/layers/__init__.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Some helper functions for implementing quantized layers"""
import copy
from ft_tensorflow_quantization.python.layers.tensor_quantizer import QuantDescriptor
class QuantMixin():
"""Mixin class for adding basic quantization logic to quantized modules"""
default_quant_desc_input = QuantDescriptor('input')
default_quant_desc_kernel = QuantDescriptor('kernel', axis=-1)
@classmethod
def set_default_quant_desc_input(cls, value):
"""
Args:
value: An instance of :func:`QuantDescriptor <quantization.QuantDescriptor>`
"""
if not isinstance(value, QuantDescriptor):
raise ValueError("{} is not an instance of QuantDescriptor!")
cls.default_quant_desc_input = copy.deepcopy(value)
@classmethod
def set_default_quant_desc_kernel(cls, value):
"""
Args:
value: An instance of :func:`QuantDescriptor <quantization.QuantDescriptor>`
"""
if not isinstance(value, QuantDescriptor):
raise ValueError("{} is not an instance of QuantDescriptor!")
cls.default_quant_desc_kernel = copy.deepcopy(value)
def pop_quant_desc_in_kwargs(quant_cls, **kwargs):
"""Pop quant descriptors in kwargs
If there is no descriptor in kwargs, the default one in quant_cls will be used
Arguments:
quant_cls: A class that has default quantization descriptors
Keyword Arguments:
quant_desc_input: An instance of QuantDescriptor. Quantization descriptor of input.
quant_desc_kernel: An instance of QuantDescriptor. Quantization descriptor of kernel.
"""
quant_desc_input = kwargs.pop('quant_desc_input', quant_cls.default_quant_desc_input)
quant_desc_kernel = kwargs.pop('quant_desc_kernel', quant_cls.default_quant_desc_kernel)
# base layers may use kwargs, so do not check if anything is left in **kwargs
return quant_desc_input, quant_desc_kernel
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/layers/utils.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Tensor quantizer"""
import re
import tensorflow as tf
from ft_tensorflow_quantization.python.ops.fake_quantize import fake_quantize
__all__ = ["QuantDescriptor", "FakeQuantizer"]
class QuantDescriptor():
"""Supportive descriptor of quantization. Describe how a tensor should be quantized.
Args:
collection_name_prefix: A string. Determine which collection to put for reverant tensors.
num_bits: An integer. Number of bits of quantization. It is used to calculate scaling factor. Default 8.
Keyword Arguments:
axis: None or integer. axes which will have its own max for computing scaling factor.
If None (the default), use per tensor scale.
Must be in the range `[-rank(input_tensor), rank(input_tensor)]`.
e.g. For a KCRS weight tensor, `axis=0` will yield per channel scaling.
Default None.
unsigned: A Boolean. If True, use unsigned. Default False.
affine: A Boolean. If True, use affine quantization. Default False.
narrow_range: A Boolean. If True, use narrow range. Default True.
disable_key_words: A list of string, indicates disabled quantizer.
Raises:
TypeError: Wrong argument type.
ValueError:Wrong argument value.
Attributes:
- collection_name_prefix: read-only property.
- num_bits: read-only property.
- unsigned: read-only property.
- affine: read-only property.
- narrow_range: read-only property.
- axis: read-only property.
- disable_key_words: read-only property.
"""
def __init__(self, collection_name_prefix, num_bits=8, **kwargs):
if not isinstance(num_bits, int):
raise TypeError("num_bits must be an integer, not {}.".format(type(num_bits)))
if num_bits <= 0:
raise ValueError("num_bits must be > 0, not {}.".format(num_bits))
self._num_bits = num_bits
self._unsigned = kwargs.pop('unsigned', False)
self._affine = kwargs.pop('affine', False)
self._narrow_range = kwargs.pop('narrow_range', True)
self._axis = kwargs.pop('axis', None)
self._collection_name_prefix = collection_name_prefix
if not isinstance(self._collection_name_prefix, str):
raise TypeError("collection_name_prefix must be a string, not {}.".format(type(self._collection_name_prefix)))
self._disable_key_words = kwargs.pop('disable_key_words', [])
if kwargs:
raise TypeError("Unrecognized keys: {}".format(kwargs.keys()))
# pylint:disable=missing-docstring
@property
def collection_name_prefix(self):
return self._collection_name_prefix
@property
def num_bits(self):
return self._num_bits
@property
def unsigned(self):
return self._unsigned
@property
def affine(self):
return self._affine
@property
def narrow_range(self):
return self._narrow_range
@property
def axis(self):
return self._axis
@property
def disable_key_words(self):
return self._disable_key_words
def __str__(self):
return self.__repr__()
def __repr__(self):
s = "QuantDescriptor("
s += "num_bits={_num_bits}"
s += " unsigned={_unsigned}"
s += " affine={_affine}"
s += " axis={_axis}"
s += " collection_name_prefix='{_collection_name_prefix}'"
s += ")"
return s.format(**self.__dict__)
# pylint:enable=missing-docstring
class FakeQuantizer():
"""Fake Tensor quantizer module
This module quantize a tensor and wraps variable. It also can collect relevant variables for calibration.
Args:
quant_desc: An instance of :func:`QuantDescriptor <quantization.QuantDescriptor>`.
scope_name: A string. Indicates the name of the quantizer.
if_quant: A boolean. Determine whether do quantization or not. Default True.
If False, quantizaton will be disabled.
This quantizer will always set collections for calibration.
Raises:
TypeError: when wrong type of `quant_desc`.
"""
def __init__(self, quant_desc: QuantDescriptor, scope_name="tensor_quantizer", if_quant=True):
if not isinstance(quant_desc, QuantDescriptor):
raise TypeError("quant_desc should be a QuantDescriptor")
self._num_bits = quant_desc.num_bits
self._axis = quant_desc.axis
self._unsigned = quant_desc.unsigned
self._affine = quant_desc.affine
self._narrow_range = quant_desc.narrow_range
self._collection_name_prefix = quant_desc.collection_name_prefix
self._scope_name = scope_name
self._disable_key_words = quant_desc._disable_key_words
self._if_quant = if_quant
self._quant_min = None
self._quant_max = None
# pylint:disable=missing-docstring
@property
def quant_min(self):
return self._quant_min
@property
def quant_max(self):
return self._quant_max
# pylint:enable=missing-docstring
def __call__(self, inputs):
if self._axis is None:
quant_shape = tuple()
else:
quant_shape = (inputs.shape.as_list()[self._axis],)
with tf.compat.v1.variable_scope(None, default_name=self._scope_name):
self._quant_min = tf.compat.v1.get_variable("quant_min", shape=quant_shape, trainable=False)
self._quant_max = tf.compat.v1.get_variable("quant_max", shape=quant_shape, trainable=False)
# add tensor to collection `quantization_variables` to convenient initializing from checkpoint
tf.compat.v1.add_to_collection('quantization_variables', self._quant_min)
tf.compat.v1.add_to_collection('quantization_variables', self._quant_max)
# add tensor name to collections for calibration
tf.compat.v1.add_to_collection(self._collection_name_prefix + '_quant_min', self._quant_min.name)
tf.compat.v1.add_to_collection(self._collection_name_prefix + '_quant_max', self._quant_max.name)
# use identity to put these variables to a unified name scope for calibration
tensor_for_calib = tf.identity(inputs, name="tensor_for_calib")
tf.compat.v1.add_to_collection(self._collection_name_prefix + '_calib_tensor', tensor_for_calib.name)
name_prefix = '/'.join(tensor_for_calib.name.split('/')[:-1])
for key in self._disable_key_words:
if re.search(key, name_prefix):
if self._if_quant:
tf.compat.v1.logging.info(f"turn off quanitzer: {name_prefix}")
self._if_quant = False
if self._if_quant:
outputs = fake_quantize(inputs, self._quant_min, self._quant_max, self._num_bits, self._axis, self._unsigned,
self._affine, self._narrow_range)
else:
outputs = inputs
return outputs
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/layers/tensor_quantizer.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/utils/__init__.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""utilities"""
import numpy as np
def expand_dims(x, v, axis):
if axis < 0:
axis += len(x.shape)
for i in range(len(x.shape)):
if i != axis:
v = np.expand_dims(v, i)
return v
def scaled_quant_np(x, amax, num_bits=8, axis=None, unsigned=False):
"""Scaled quantize x using numpy."""
if axis is not None:
amax = expand_dims(x, amax, axis)
quant_bound = 2.0**(num_bits - 1 + int(unsigned)) - 1
quant_scale = quant_bound / amax
x_q = np.round(np.clip(x, -amax, amax) * quant_scale)
x_q /= quant_scale
return x_q
def affine_quant_np(x, qmin, qmax, num_bits=8, axis=None, unsigned=False):
"""Affine quantize x using numpy."""
if axis is not None:
qmin = expand_dims(x, qmin, axis)
qmax = expand_dims(x, qmax, axis)
if unsigned:
min_bound = 0
max_bound = 2.0**num_bits - 1.0
else:
min_bound = -2.0**(num_bits - 1)
max_bound = 2.0**(num_bits - 1) - 1.0
step_size = (qmax - qmin) / (2.0**num_bits - 1.0)
quant_zero = np.round(qmin / step_size) - min_bound
x_q = np.round(x / step_size) - quant_zero
x_q = np.clip(x_q, min_bound, max_bound)
x_q = (x_q + quant_zero) * step_size
return x_q
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/utils/utils.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Basic tensor quantization functions"""
import tensorflow as tf
__all__ = ["fake_quantize"]
def fake_quantize(inputs, quant_min=None, quant_max=None, num_bits=8, axis=None, unsigned=False, affine=False, narrow_range=True):
"""Universal tensor fake quantization function
Args:
inputs: A Tensor of dtype float32.
quant_min: Scalar (0-d Tensor), 1-d Tensor or None
quant_max: Scalar (0-d Tensor), 1-d Tensor or None.
num_bits: An integer used to calculate scaling factor, `scale = (2^(num_bits-1) - 1) / max`.
Effectively, it indicates how many integer bits is used to represent the value.
axis: Integer or None. If specified, quant_min and quant_max must be vector and will be broadcasted to inputs.
Default None, indicates per tensor quantization.
unsigned: A boolean. If True, use unsigned int8. Default False.
affine: A boolean. If True, use affine quantization. Default False.
narrow_range: A boolean. If True, use narrow range. Default False.
Returns:
outputs: A Tensor with same type as inputs
Raises:
TypeError: Wrong input types.
ValueError: Wrong input values.
"""
if not tf.is_tensor(inputs):
raise TypeError("inputs should be a Tensor")
if not isinstance(num_bits, int):
raise TypeError("num_bits should be an integer")
if num_bits <= 0:
raise ValueError("num_bits should > 0")
if quant_max is None and quant_min is None:
raise NotImplementedError("dynamic quantization is not supported yet")
if quant_min is not None and quant_max is not None:
if not tf.is_tensor(quant_max) or not tf.is_tensor(quant_min):
raise TypeError("quant_min and quant_max should be Scalar (0-d Tensor), 1-d Tensor or None")
if quant_max.shape != quant_min.shape:
raise ValueError("shape mismatch between quant_min and quant_max")
if len(quant_max.shape) == 0:
if axis is not None:
raise ValueError("quan_min/quant_max is a Scalar, support per tensor quantization, axis must be None")
elif len(quant_max.shape) == 1:
if axis is None:
raise ValueError("quan_min/quant_max is a Tensor, support per axis quantization, axis must be set")
if not isinstance(axis, int):
raise TypeError("axis should be an integer")
if not -len(inputs.shape) <= axis < len(inputs.shape):
raise ValueError("invalid axis {} for inputs with dimentsion {}".format(axis, len(inputs.shape)))
else:
raise ValueError("quant_min and quant_max should be Scalar (0-d Tensor), 1-d Tensor or None")
else:
raise ValueError("one of quant_min and quant_max is None")
# do broadcast obviously for per axis quantization
if axis is not None:
if axis < 0:
axis += len(inputs.shape)
for i in range(len(inputs.shape)):
if i != axis:
quant_min = tf.expand_dims(quant_min, i)
quant_max = tf.expand_dims(quant_max, i)
epsilon = 1. / (1 << 24) # Minimum fp16 representable
@tf.custom_gradient
def fake_quantize_core(inputs, quant_min, quant_max):
def _scaled_fake_quantize(inputs, quant_min, quant_max):
# TODO(haow): Add check for negative values in inputs if unsigned
bound = 2.0**(num_bits - 1 + int(unsigned)) - 1.0
if unsigned:
min_bound = 0
elif narrow_range:
min_bound = -bound
else:
min_bound = -bound - 1
quant_amax = tf.maximum(tf.abs(quant_min), tf.abs(quant_max))
scale = bound / quant_amax
# Treat quant_max smaller than minimum representable of fp16 0.
# Value quantized with quant_amax=0 should all be 0, thus set scale to 1
scale = tf.compat.v2.where(tf.math.less_equal(quant_amax, epsilon), tf.constant(1.), scale)
quantized = tf.clip_by_value(tf.math.round(inputs * scale), min_bound, bound)
outputs = quantized / scale
return outputs
def _affine_fake_quantize(inputs, quant_min, quant_max):
if unsigned:
min_bound = 0
max_bound = 2.0**num_bits - 1.0
zero_point = 2.0**(num_bits - 1)
else:
min_bound = -2.0**(num_bits - 1)
max_bound = 2.0**(num_bits - 1) - 1.0
zero_point = 0.0
step_size = (quant_max - quant_min) / (2.0**num_bits - 1.0)
# in case step_size is too small, may need improved
quant_zero = tf.compat.v2.where(tf.math.less_equal(step_size, epsilon), tf.constant(zero_point),
tf.math.round(quant_min / step_size) - min_bound)
quantized = tf.compat.v2.where(
tf.math.less_equal(step_size, epsilon), quant_zero,
tf.clip_by_value(tf.math.round(inputs / step_size) - quant_zero, min_bound, max_bound))
outputs = tf.compat.v2.where(tf.math.less_equal(step_size, epsilon), quant_max,
(quantized + quant_zero) * step_size)
return outputs
if not affine:
outputs = _scaled_fake_quantize(inputs, quant_min, quant_max)
else:
outputs = _affine_fake_quantize(inputs, quant_min, quant_max)
def grad(grad_outputs):
# Boundary check is exclusive in case we'll need to support inplace
if not affine:
pass_condition = tf.math.less(tf.abs(inputs), tf.maximum(tf.abs(quant_min), tf.abs(quant_max)))
else:
pass_condition = tf.math.logical_and(tf.math.greater(inputs, quant_min), tf.math.less(inputs, quant_max))
grad_inputs = tf.compat.v2.where(pass_condition, grad_outputs, tf.constant(0.))
return grad_inputs, None, None
return outputs, grad
return fake_quantize_core(inputs, quant_min, quant_max)
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/ops/fake_quantize.py |
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/ft-tensorflow-quantization/ft_tensorflow_quantization/python/ops/__init__.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import time
# report latency and throughput during eval
class LogEvalRunHook(tf.estimator.SessionRunHook):
def __init__(self, global_batch_size, hvd_rank=-1):
self.global_batch_size = global_batch_size
self.hvd_rank = hvd_rank
self.total_time = 0.0
self.count = 0
self.skipped = 0
self.time_list = []
def before_run(self, run_context):
self.t0 = time.time()
def after_run(self, run_context, run_values):
elapsed_secs = time.time() - self.t0
self.count += 1
# Removing first 2 (arbitrary) number of startup iterations from perf evaluations
if self.count <= 2:
print("Skipping time record for ", self.count, " due to overhead")
self.skipped += 1
else:
self.time_list.append(elapsed_secs)
self.total_time += elapsed_secs
# report throughput during training
class LogTrainRunHook(tf.estimator.SessionRunHook):
def __init__(self, global_batch_size, hvd_rank=-1, save_checkpoints_steps=1000):
self.global_batch_size = global_batch_size
self.hvd_rank = hvd_rank
self.save_checkpoints_steps = save_checkpoints_steps
self.total_time = 0.0
self.count = 0 # Holds number of iterations, including skipped iterations for fp16 loss scaling
def after_create_session(self, session, coord):
self.init_global_step = session.run(tf.train.get_global_step())
def before_run(self, run_context):
self.t0 = time.time()
return tf.estimator.SessionRunArgs(
fetches=['step_update:0'])
def after_run(self, run_context, run_values):
elapsed_secs = time.time() - self.t0
self.global_step = run_values.results[0]
self.count += 1
# Removing first step + first two steps after every checkpoint save
if (self.global_step - self.init_global_step) % self.save_checkpoints_steps <= 1:
print("Skipping time record for ", self.global_step, " due to checkpoint-saving/warmup overhead")
else:
self.total_time += elapsed_secs
def end(self, session):
num_global_steps = self.global_step - self.init_global_step
self.skipped = (num_global_steps // self.save_checkpoints_steps) * 2 + \
min(2, num_global_steps % self.save_checkpoints_steps) - 1 | FasterTransformer-main | examples/tensorflow/bert/bert-quantization/utils/utils.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm TF examples for BERT."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import h5py
import tensorflow as tf
import numpy as np
from tqdm import tqdm, trange
from tokenization import BertTokenizer
import tokenization as tokenization
import random
import collections
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
s += "is_random_next: %s\n" % self.is_random_next
s += "masked_lm_positions: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions]))
s += "masked_lm_labels: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_files(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files, output_formats="tfrecord"):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
if 'hdf5' in output_formats:
features_hdf5 = collections.OrderedDict()
num_instances = len(instances)
features_hdf5["input_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features_hdf5["input_mask"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features_hdf5["segment_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
features_hdf5["masked_lm_positions"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
features_hdf5["masked_lm_ids"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
features_hdf5["next_sentence_labels"] = np.zeros(num_instances, dtype="int32")
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
features["next_sentence_labels"] = create_int_feature([next_sentence_label])
if 'tfrecord' in output_formats:
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
if 'hdf5' in output_formats:
features_hdf5["input_ids"][inst_index] = input_ids
features_hdf5["input_mask"][inst_index] = input_mask
features_hdf5["segment_ids"][inst_index] = segment_ids
features_hdf5["masked_lm_positions"][inst_index] = masked_lm_positions
features_hdf5["masked_lm_ids"][inst_index] = masked_lm_ids
features_hdf5["next_sentence_labels"][inst_index] = next_sentence_label
if 'tfrecord' not in output_formats and 'hdf5' not in output_formats:
assert False, 'Either empty output_formats list or unsupported type specified. Try: tfrecord or hdf5'
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.compat.v1.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
if 'hdf5' in output_formats:
f = h5py.File(output_file, 'w')
f.create_dataset("input_ids", data=features_hdf5["input_ids"], dtype='i4', compression='gzip')
f.create_dataset("input_mask", data=features_hdf5["input_mask"], dtype='i1', compression='gzip')
f.create_dataset("segment_ids", data=features_hdf5["segment_ids"], dtype='i1', compression='gzip')
f.create_dataset("masked_lm_positions", data=features_hdf5["masked_lm_positions"], dtype='i4', compression='gzip')
f.create_dataset("masked_lm_ids", data=features_hdf5["masked_lm_ids"], dtype='i4', compression='gzip')
f.create_dataset("next_sentence_labels", data=features_hdf5["next_sentence_labels"], dtype='i1', compression='gzip')
f.flush()
f.close()
tf.compat.v1.logging.info("Wrote %d total instances", total_written)
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
print("creating instance from {}".format(input_file))
with open(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
#If picked random document is the same as the current document
if random_document_index == document_index:
is_random_next = False
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--vocab_file",
default=None,
type=str,
required=True,
help="The vocabulary the BERT model will train on.")
parser.add_argument("--input_file",
default=None,
type=str,
required=True,
help="The input train corpus. can be directory with .txt files or a path to a single file")
parser.add_argument("--output_file",
default=None,
type=str,
required=True,
help="The output file where the model checkpoints will be written.")
## Other parameters
# int
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--dupe_factor",
default=10,
type=int,
help="Number of times to duplicate the input data (with different masks).")
parser.add_argument("--max_predictions_per_seq",
default=20,
type=int,
help="Maximum sequence length.")
# floats
parser.add_argument("--masked_lm_prob",
default=0.15,
type=float,
help="Masked LM probability.")
parser.add_argument("--short_seq_prob",
default=0.1,
type=float,
help="Probability to create a sequence shorter than maximum sequence length")
parser.add_argument("--do_lower_case",
action='store_true',
default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument('--random_seed',
type=int,
default=12345,
help="random seed for initialization")
args = parser.parse_args()
tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case)
input_files = []
if os.path.isfile(args.input_file):
input_files.append(args.input_file)
elif os.path.isdir(args.input_file):
input_files = [os.path.join(args.input_file, f) for f in os.listdir(args.input_file) if
(os.path.isfile(os.path.join(args.input_file, f)) and f.endswith('.txt'))]
else:
raise ValueError("{} is not a valid path".format(args.input_file))
rng = random.Random(args.random_seed)
instances = create_training_instances(
input_files, tokenizer, args.max_seq_length, args.dupe_factor,
args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq,
rng)
output_files = args.output_file.split(",")
print("*** Writing to output files ***")
for output_file in output_files:
print(output_file)
write_instance_to_example_files(instances, tokenizer, args.max_seq_length,
args.max_predictions_per_seq, output_files)
if __name__ == "__main__":
main()
| FasterTransformer-main | examples/tensorflow/bert/bert-quantization/utils/create_pretraining_data.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
import modeling
import optimization
import tokenization
import six
import tensorflow as tf
import horovod.tensorflow as hvd
import time
import csv
flags = tf.flags
FLAGS = None
def extract_flags():
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
return flags.FLAGS
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer, verbose_logging=False):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5 and verbose_logging:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("guid: %s" % (example.guid))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.compat.v1.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.compat.v1.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, FLAGS.verbose_logging)
features.append(feature)
return features
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.compat.v1.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def main():
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tf.gfile.MakeDirs(FLAGS.data_dir + "final_tfrecords_sharded")
train_examples = processor.get_train_examples(FLAGS.data_dir)
train_file = os.path.join(FLAGS.data_dir, "final_tfrecords_sharded/" + task_name + "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.data_dir, "final_tfrecords_sharded/" + task_name + "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.data_dir, "final_tfrecords_sharded/" + task_name + "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
if __name__ == "__main__":
main() | FasterTransformer-main | examples/tensorflow/bert/bert-quantization/utils/create_glue_data.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
import modeling
import optimization
import tokenization
import six
import tensorflow as tf
import horovod.tensorflow as hvd
import time
flags = tf.flags
FLAGS = None
def extract_flags():
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"squad_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.mark_flag_as_required("train_file")
flags.mark_flag_as_required("predict_file")
flags.mark_flag_as_required("squad_dir")
flags.mark_flag_as_required("vocab_file")
return flags.FLAGS
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative=False):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn, verbose_logging=False):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if verbose_logging and example_index < 20:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("unique_id: %s" % (unique_id))
tf.compat.v1.logging.info("example_index: %s" % (example_index))
tf.compat.v1.logging.info("doc_span_index: %s" % (doc_span_index))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.compat.v1.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.compat.v1.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.compat.v1.logging.info("start_position: %d" % (start_position))
tf.compat.v1.logging.info("end_position: %d" % (end_position))
tf.compat.v1.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def main():
FLAGS = extract_flags()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tf.gfile.MakeDirs(FLAGS.squad_dir + "/final_tfrecords_sharded")
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True,
version_2_with_negative=FLAGS.version_2_with_negative)
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/train.tf_record"),
is_training=True)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
verbose_logging=FLAGS.verbose_logging)
train_writer.close()
eval_examples = read_squad_examples(
input_file=FLAGS.predict_file, is_training=False,
version_2_with_negative=FLAGS.version_2_with_negative)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature,
verbose_logging=FLAGS.verbose_logging)
eval_writer.close()
if __name__ == "__main__":
main() | FasterTransformer-main | examples/tensorflow/bert/bert-quantization/utils/create_squad_data.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage example
# python ./runData.py -i $data_file -o $output_file -j $json_file -m $ckpt_file -b $batch_size -l $seq_len -f $use_float16 -n $index
import getopt
import modeling
import numpy as np
from tensorflow.python.client import timeline
import tensorflow as tf
from datetime import datetime
import json
import sys
import absl.logging as _logging # pylint: disable=unused-import
from absl import flags
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def getInput(input_file, batch_size, i):
data = np.load(input_file)
arr_input_ids = data["input_ids:0"]
arr_input_mask = data["input_mask:0"]
arr_segment_ids = data["segment_ids:0"]
input_ids = np.transpose(arr_input_ids[i * batch_size:(i + 1) * batch_size, :])
input_mask = np.transpose(arr_input_mask[i * batch_size:(i + 1) * batch_size, :])
segment_ids = np.transpose(arr_segment_ids[i * batch_size:(i + 1) * batch_size, :])
print("Get input batch {}, {}, {}".format(input_ids.dtype, input_mask.dtype, segment_ids.dtype))
data.close()
return input_ids, input_mask, segment_ids
def getJson(json_file):
json_f = open(json_file)
data = json.load(json_f)
n_token = data["n_token"]
untie_r = data["untie_r"]
ff_activation = data["ff_activation"]
d_inner = data["d_inner"]
d_head = data["d_head"]
n_head = data["n_head"]
d_model = data["d_model"]
n_head = data["n_head"]
n_layer = data["n_layer"]
json_f.close()
return n_token, untie_r, ff_activation, d_inner, d_head, n_head, d_model, n_head, n_layer
def runTest(json_file, seq_len, batch_size, input_ids, input_mask, segment_ids, use_float16):
# Acquire network settings
n_token, untie_r, ff_activation, d_inner, d_head, n_head, d_model, n_head, n_layer = getJson(json_file)
# Set Running parameters
attn_type = "bi" # attn_type="uni"
bi_data = False
dropout = 0.1
dropatt = 0.1
is_training = False
reuse = False
use_tpu = False
mem_len = None
reuse_len = None
initializer = tf.initializers.random_normal(
stddev=0.02,
seed=None)
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
output, arr_output = modeling.transformer_xl(
input_ids, n_token, n_layer, d_model, n_head,
d_head, d_inner, dropout, dropatt, attn_type,
bi_data, initializer, is_training, mem_len, untie_r=untie_r,
ff_activation=ff_activation, input_mask=input_mask, seg_id=segment_ids, use_float16=use_float16,
use_tpu=use_tpu, reuse_len=reuse_len)
return output, arr_output, n_layer
def usage():
print(" -i input_file")
print(" -o output_file")
print(" -j json_file")
print(" -m model_file")
print(" -l max_seq_length")
print(" -b batch_size")
print(" -o output_file")
print(" -f use_float16")
print(" -n index of the inputdata batch")
print(" -h output help info")
print("Example: python runData.py -i ./data.npz -o output.npz -j xlnet_cased_L-12_H-768_A-12/xlnet_config.json \
-m xlnet_cased_L-12_H-768_A-12/xlnet_model.ckpt -b 8 -l 128 -n 12")
if __name__ == "__main__":
# Init perameter
seq_len = 128
batch_size = 8
use_float16 = False
input_file = "./data.npz"
index = 0
json_file = "../../../Data/xlnet_cased_L-12_H-768_A-12/xlnet_config.json"
model_file = "../../../Data/xlnet_cased_L-12_H-768_A-12/xlnet_model.ckpt"
output_file = "./output.npz"
# Set perameter
opts, args = getopt.getopt(sys.argv[1:], "hi:j:m:b:l:f:o:n:")
for op, value in opts:
if op == "-i":
input_file = value
elif op == "-o":
output_file = value
elif op == "-j":
json_file = value
elif op == "-m":
model_file = value
elif op == "-b":
batch_size = int(value)
elif op == "-l":
seq_len = int(value)
elif op == "-f":
use_float16 = bool(int(value))
elif op == "-n":
index = int(value)
elif op == "-h":
usage()
sys.exit()
print("USE FLOAT 16: ", str(use_float16))
# Get Input Value
input_ids, input_mask, segment_ids = getInput(input_file, batch_size, index)
tf.reset_default_graph()
# Set input
t_input_ids = tf.placeholder(tf.int32, shape=[seq_len, batch_size])
t_input_mask = tf.placeholder(tf.float32, shape=[seq_len, batch_size])
t_segment_ids = tf.placeholder(tf.int32, shape=[seq_len, batch_size])
# Define Output
output, arr_output, n_layer = runTest(json_file, seq_len, batch_size, t_input_ids, t_input_mask, t_segment_ids,
use_float16)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
with tf.Session() as sess:
# Restore variables from disk.
saver.restore(sess, model_file)
# Run
arr_output = sess.run(arr_output, feed_dict={t_input_ids: input_ids,
t_input_mask: input_mask, t_segment_ids: segment_ids})
# Save result
data = {}
i = 0
data["attn_mask"] = arr_output[0].transpose((2, 0, 1, 3))
data["output_h"] = arr_output[1].transpose((1, 0, 2))
data["seg_mat"] = arr_output[2].transpose((2, 0, 1, 3))
data["pos_emb"] = arr_output[3].transpose((1, 0, 2))
arr_output = arr_output[4:]
for r in arr_output:
print(r.shape)
CACHE_NUM = 8
for i in range(n_layer):
data["layer_{}_q_head_h".format(i)] = arr_output[i * CACHE_NUM].transpose((1, 0, 2, 3))
data["layer_{}_k_head_h".format((i))] = arr_output[i * CACHE_NUM + 1].transpose((1, 0, 2, 3))
data["layer_{}_v_head_h".format((i))] = arr_output[i * CACHE_NUM + 2].transpose((1, 0, 2, 3))
data["layer_{}_k_head_r".format((i))] = arr_output[i * CACHE_NUM + 3].transpose((1, 0, 2, 3))
data["layer_{}_attn_vec".format((i))] = arr_output[i * CACHE_NUM + 4].transpose((1, 0, 2, 3))
data["layer_{}_attn_output".format((i))] = arr_output[i * CACHE_NUM + 5].transpose((1, 0, 2))
data["layer_{}_layer_1".format((i))] = arr_output[i * CACHE_NUM + 6].transpose((1, 0, 2))
data["layer_{}".format((i))] = arr_output[i * CACHE_NUM + 7].transpose((1, 0, 2))
np.savez(output_file, **data)
| FasterTransformer-main | examples/tensorflow/xlnet/runData.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.