diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/LICENSE b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..fb9ed3577fd6f2d97e52e06a8fc80fd50ea1cc69 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/LICENSE @@ -0,0 +1,203 @@ +Copyright (c) 2021 Habana Labs, Ltd. an Intel Company + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/bert_loss.png b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/bert_loss.png new file mode 100644 index 0000000000000000000000000000000000000000..e9f96e802ec9f7261d18cb09f16a09f7fbf4bad2 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/bert_loss.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6b8d1935b2fd6bba39446fa0f2f332fcd4dd390301fd830c51c3a75edfbc175 +size 23703 diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/bf16_config/bert.json b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/bf16_config/bert.json new file mode 100644 index 0000000000000000000000000000000000000000..e66b63df0d4ba1af88dd540f07085f897509790f --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/bf16_config/bert.json @@ -0,0 +1,60 @@ +{ + "allowlist": [ + "_ScopedAllocatorSplit", + "_ScopedAllocatorConcat", + "_ScopedAllocator", + "BatchMatMul", + "BatchMatMulV2", + "BiasAdd", + "BiasAddGrad", + "EuclideanNorm", + "Exp", + "HabanaDropout", + "HabanaDropoutGrad", + "HabanaDropoutStateful", + "HabanaGelu", + "HabanaGeluGrad", + "HabanaLayerNorm", + "HabanaLayerNormV2", + "HabanaLayerNormGrad", + "HabanaLayerNormGradV2", + "HabanaMaskedSoftmax", + "HabanaSoftmaxGrad", + "HabanaLogSoftmaxGrad", + "HorovodAllgather", + "HorovodAllreduce", + "L2Loss", + "Log", + "LogSoftmax", + "MatMul", + "Softmax", + "Sum", + "Tanh", + "TanhGrad" + ], + "conditional_list": [ + "Add", + "AddV2", + "AddN", + "ExpandDims", + "Identity", + "Neg", + "Reshape", + "Slice", + "Split", + "StridedSliceGrad", + "Transpose" + ], + "strict_conditional_list": [], + "non_convertible_exceptions": [ + [".*KEEP_FP32_PRECISION.*", ""] + ], + "convertible_exceptions": [ + ["bert/encoder/layer_[0-9]+/attention/self/add", "AddV2"], + ["bert/encoder/layer_[0-9]+/attention/self/Mul", "Mul"], + ["clip_by_global_norm/mul", "Mul"], + ["global_norm/mul", "Mul"], + ["global_norm/global_norm", "Sqrt"], + [".*FORCE_BF16_PRECISION.*", ""] + ] +} diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data/sample_text.txt b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data/sample_text.txt new file mode 100644 index 0000000000000000000000000000000000000000..a42812060c576bae870eb29b1ac083fda0d239d3 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data/sample_text.txt @@ -0,0 +1,33 @@ +This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত +Text should be one-sentence-per-line, with empty lines between documents. +This sample text is public domain and was randomly selected from Project Guttenberg. + +The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors. +Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity. +Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them. +"Cass" Beard had risen early that morning, but not with a view to discovery. +A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets. +The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency. +This was nearly opposite. +Mr. Cassius crossed the highway, and stopped suddenly. +Something glittered in the nearest red pool before him. +Gold, surely! +But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring. +Looking at it more attentively, he saw that it bore the inscription, "May to Cass." +Like most of his fellow gold-seekers, Cass was superstitious. + +The fountain of classic wisdom, Hypatia herself. +As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge. +From my youth I felt in me a soul above the matter-entangled herd. +She revealed to me the glorious fact, that I am a spark of Divinity itself. +A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's. +There is a philosophic pleasure in opening one's treasures to the modest young. +Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street. +Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide; +but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind. +Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now. +His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert; +while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts. +At last they reached the quay at the opposite end of the street; +and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers. +He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him. diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BooksDownloader.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BooksDownloader.py new file mode 100644 index 0000000000000000000000000000000000000000..7b21eba9e0e355f71327bf2268ab01998cdb6661 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BooksDownloader.py @@ -0,0 +1,38 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +############################################################################### +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company +# +# Changes: +# - Modified path hard-coded for Nvidia container +############################################################################### + +import subprocess +import sys + +class BooksDownloader: + def __init__(self, save_path): + self.save_path = save_path + pass + + + def download(self): + import os + working_dir = os.getcwd() + args = '--list ' + working_dir + '/bookcorpus/url_list.jsonl --out' + bookscorpus_download_command = f'{sys.executable} ' + working_dir + '/bookcorpus/download_files.py ' + args + bookscorpus_download_command += ' ' + self.save_path + '/bookscorpus' + bookscorpus_download_command += ' --trash-bad-count' + print("Downloading BookCorpus command: ", bookscorpus_download_command) + bookscorpus_download_process = subprocess.run(bookscorpus_download_command, shell=True, check=True) \ No newline at end of file diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BookscorpusTextFormatting.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BookscorpusTextFormatting.py new file mode 100644 index 0000000000000000000000000000000000000000..22e48d4b2e12867bc455bd964833d39f0f34a26e --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BookscorpusTextFormatting.py @@ -0,0 +1,32 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os + +class BookscorpusTextFormatting: + def __init__(self, books_path, output_filename, recursive = False): + self.books_path = books_path + self.recursive = recursive + self.output_filename = output_filename + + + # This puts one book per line + def merge(self): + with open(self.output_filename, mode='w', newline='\n') as ofile: + for filename in glob.glob(self.books_path + '/' + '*.txt', recursive=True): + with open(filename, mode='r', encoding='utf-8-sig', newline='\n') as file: + for line in file: + if line.strip() != '': + ofile.write(line.strip() + ' ') + ofile.write("\n\n") \ No newline at end of file diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/Downloader.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/Downloader.py new file mode 100644 index 0000000000000000000000000000000000000000..75db77fd23f3b34c1979deb5bcc28dffb85ca343 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/Downloader.py @@ -0,0 +1,68 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +############################################################################### +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company +# +# Changes: +# - Removed downloading datasets that are not related to BERT pretrain +############################################################################### + +from GooglePretrainedWeightDownloader import GooglePretrainedWeightDownloader +from WikiDownloader import WikiDownloader +from BooksDownloader import BooksDownloader + +class Downloader: + def __init__(self, dataset_name, save_path): + self.dataset_name = dataset_name + self.save_path = save_path + + + def download(self): + if self.dataset_name == 'bookscorpus': + self.download_bookscorpus() + + elif self.dataset_name == 'wikicorpus_en': + self.download_wikicorpus('en') + + elif self.dataset_name == 'wikicorpus_zh': + self.download_wikicorpus('zh') + + elif self.dataset_name == 'google_pretrained_weights': + self.download_google_pretrained_weights() + + elif self.dataset_name == 'all': + self.download_bookscorpus() + self.download_wikicorpus('en') + self.download_wikicorpus('zh') + self.download_google_pretrained_weights() + + else: + print(self.dataset_name) + assert False, 'Unknown dataset_name provided to downloader' + + + def download_bookscorpus(self): + downloader = BooksDownloader(self.save_path) + downloader.download() + + + def download_wikicorpus(self, language): + downloader = WikiDownloader(language, self.save_path) + downloader.download() + + + def download_google_pretrained_weights(self): + downloader = GooglePretrainedWeightDownloader(self.save_path) + downloader.download() + diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/GooglePretrainedWeightDownloader.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/GooglePretrainedWeightDownloader.py new file mode 100644 index 0000000000000000000000000000000000000000..bb0684d34b5c9b9a2ef24080af819cf013f63639 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/GooglePretrainedWeightDownloader.py @@ -0,0 +1,158 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib +import os +import urllib.request +import zipfile + +class GooglePretrainedWeightDownloader: + def __init__(self, save_path): + self.save_path = save_path + '/google_pretrained_weights' + + if not os.path.exists(self.save_path): + os.makedirs(self.save_path) + + # Download urls + self.model_urls = { + 'bert_base_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-12_H-768_A-12.zip'), + 'bert_large_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'uncased_L-24_H-1024_A-16.zip'), + 'bert_base_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'cased_L-12_H-768_A-12.zip'), + 'bert_large_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip', 'cased_L-24_H-1024_A-16.zip'), + 'bert_base_multilingual_cased': ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'), + 'bert_large_multilingual_uncased': ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'), + 'bert_base_chinese': ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip') + } + + # SHA256sum verification for file download integrity (and checking for changes from the download source over time) + self.bert_base_uncased_sha = { + 'bert_config.json': '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc', + 'bert_model.ckpt.data-00000-of-00001': '58580dc5e0bf0ae0d2efd51d0e8272b2f808857f0a43a88aaf7549da6d7a8a84', + 'bert_model.ckpt.index': '04c1323086e2f1c5b7c0759d8d3e484afbb0ab45f51793daab9f647113a0117b', + 'bert_model.ckpt.meta': 'dd5682170a10c3ea0280c2e9b9a45fee894eb62da649bbdea37b38b0ded5f60e', + 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', + } + + self.bert_large_uncased_sha = { + 'bert_config.json': 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb', + 'bert_model.ckpt.data-00000-of-00001': 'bc6b3363e3be458c99ecf64b7f472d2b7c67534fd8f564c0556a678f90f4eea1', + 'bert_model.ckpt.index': '68b52f2205ffc64dc627d1120cf399c1ef1cbc35ea5021d1afc889ffe2ce2093', + 'bert_model.ckpt.meta': '6fcce8ff7628f229a885a593625e3d5ff9687542d5ef128d9beb1b0c05edc4a1', + 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', + } + + self.bert_base_cased_sha = { + 'bert_config.json': 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc', + 'bert_model.ckpt.data-00000-of-00001': '734d5a1b68bf98d4e9cb6b6692725d00842a1937af73902e51776905d8f760ea', + 'bert_model.ckpt.index': '517d6ef5c41fc2ca1f595276d6fccf5521810d57f5a74e32616151557790f7b1', + 'bert_model.ckpt.meta': '5f8a9771ff25dadd61582abb4e3a748215a10a6b55947cbb66d0f0ba1694be98', + 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', + } + + self.bert_large_cased_sha = { + 'bert_config.json': '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57', + 'bert_model.ckpt.data-00000-of-00001': '6ff33640f40d472f7a16af0c17b1179ca9dcc0373155fb05335b6a4dd1657ef0', + 'bert_model.ckpt.index': 'ef42a53f577fbe07381f4161b13c7cab4f4fc3b167cec6a9ae382c53d18049cf', + 'bert_model.ckpt.meta': 'd2ddff3ed33b80091eac95171e94149736ea74eb645e575d942ec4a5e01a40a1', + 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', + } + + self.bert_base_multilingual_cased_sha = { + 'bert_config.json': 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0', + 'bert_model.ckpt.data-00000-of-00001': '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5', + 'bert_model.ckpt.index': '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37', + 'bert_model.ckpt.meta': '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa', + 'vocab.txt': 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c', + } + + self.bert_large_multilingual_uncased_sha = { + 'bert_config.json': '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624', + 'bert_model.ckpt.data-00000-of-00001': '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429', + 'bert_model.ckpt.index': '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7', + 'bert_model.ckpt.meta': '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29', + 'vocab.txt': '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f', + } + + self.bert_base_chinese_sha = { + 'bert_config.json': '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015', + 'bert_model.ckpt.data-00000-of-00001': '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba', + 'bert_model.ckpt.index': '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e', + 'bert_model.ckpt.meta': 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047', + 'vocab.txt': '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c', + } + + # Relate SHA to urls for loop below + self.model_sha = { + 'bert_base_uncased': self.bert_base_uncased_sha, + 'bert_large_uncased': self.bert_large_uncased_sha, + 'bert_base_cased': self.bert_base_cased_sha, + 'bert_large_cased': self.bert_large_cased_sha, + 'bert_base_multilingual_cased': self.bert_base_multilingual_cased_sha, + 'bert_large_multilingual_uncased': self.bert_large_multilingual_uncased_sha, + 'bert_base_chinese': self.bert_base_chinese_sha + } + + # Helper to get sha256sum of a file + def sha256sum(self, filename): + h = hashlib.sha256() + b = bytearray(128*1024) + mv = memoryview(b) + with open(filename, 'rb', buffering=0) as f: + for n in iter(lambda : f.readinto(mv), 0): + h.update(mv[:n]) + + return h.hexdigest() + + def download(self): + # Iterate over urls: download, unzip, verify sha256sum + found_mismatch_sha = False + for model in self.model_urls: + url = self.model_urls[model][0] + file = self.save_path + '/' + self.model_urls[model][1] + + print('Downloading', url) + response = urllib.request.urlopen(url) + with open(file, 'wb') as handle: + handle.write(response.read()) + + print('Unzipping', file) + zip = zipfile.ZipFile(file, 'r') + zip.extractall(self.save_path) + zip.close() + + sha_dict = self.model_sha[model] + for extracted_file in sha_dict: + sha = sha_dict[extracted_file] + if sha != self.sha256sum(file[:-4] + '/' + extracted_file): + found_mismatch_sha = True + print('SHA256sum does not match on file:', extracted_file, 'from download url:', url) + else: + print(file[:-4] + '/' + extracted_file, '\t', 'verified') + + if not found_mismatch_sha: + print("All downloads pass sha256sum verification.") + + def serialize(self): + pass + + def deserialize(self): + pass + + def listAvailableWeights(self): + print("Available Weight Datasets") + for item in self.model_urls: + print(item) + + def listLocallyStoredWeights(self): + pass + diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/TextSharding.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/TextSharding.py new file mode 100644 index 0000000000000000000000000000000000000000..85012a53c59b22487eda63fcdd9d1533ba78eb0d --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/TextSharding.py @@ -0,0 +1,331 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict +from itertools import islice + +import multiprocessing +import os +import statistics + +class Sharding: + def __init__(self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set): + assert len(input_files) > 0, 'The input file list must contain at least one file.' + assert n_training_shards > 0, 'There must be at least one output shard.' + assert n_test_shards > 0, 'There must be at least one output shard.' + + self.n_training_shards = n_training_shards + self.n_test_shards = n_test_shards + self.fraction_test_set = fraction_test_set + + self.input_files = input_files + + self.output_name_prefix = output_name_prefix + self.output_training_identifier = '_training' + self.output_test_identifier = '_test' + self.output_file_extension = '.txt' + + self.articles = {} # key: integer identifier, value: list of articles + self.sentences = {} # key: integer identifier, value: list of sentences + self.output_training_files = {} # key: filename, value: list of articles to go into file + self.output_test_files = {} # key: filename, value: list of articles to go into file + + self.init_output_files() + + + # Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines) + def load_articles(self): + print('Start: Loading Articles') + + global_article_count = 0 + for input_file in self.input_files: + print('input file:', input_file) + with open(input_file, mode='r', newline='\n') as f: + for i, line in enumerate(f): + if line.strip(): + self.articles[global_article_count] = line.rstrip() + global_article_count += 1 + + print('End: Loading Articles: There are', len(self.articles), 'articles.') + + + def segment_articles_into_sentences(self, segmenter): + print('Start: Sentence Segmentation') + if len(self.articles) is 0: + self.load_articles() + + assert len(self.articles) is not 0, 'Please check that input files are present and contain data.' + + # TODO: WIP: multiprocessing (create independent ranges and spawn processes) + use_multiprocessing = 'serial' + + def chunks(data, size=len(self.articles)): + it = iter(data) + for i in range(0, len(data), size): + yield {k: data[k] for k in islice(it, size)} + + if use_multiprocessing == 'manager': + manager = multiprocessing.Manager() + return_dict = manager.dict() + jobs = [] + n_processes = 7 # in addition to the main process, total = n_proc+1 + + def work(articles, return_dict): + sentences = {} + for i, article in enumerate(articles): + sentences[i] = segmenter.segment_string(articles[article]) + + if i % 5000 == 0: + print('Segmenting article', i) + + return_dict.update(sentences) + + for item in chunks(self.articles, len(self.articles)): + p = multiprocessing.Process(target=work, args=(item, return_dict)) + + # Busy wait + while len(jobs) >= n_processes: + pass + + jobs.append(p) + p.start() + + for proc in jobs: + proc.join() + + elif use_multiprocessing == 'queue': + work_queue = multiprocessing.Queue() + jobs = [] + + for item in chunks(self.articles, len(self.articles)): + pass + + else: # serial option + for i, article in enumerate(self.articles): + self.sentences[i] = segmenter.segment_string(self.articles[article]) + + if i % 5000 == 0: + print('Segmenting article', i) + + print('End: Sentence Segmentation') + + + def init_output_files(self): + print('Start: Init Output Files') + assert len(self.output_training_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' + assert len(self.output_test_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' + + for i in range(self.n_training_shards): + name = self.output_name_prefix + self.output_training_identifier + '_' + str(i) + self.output_file_extension + self.output_training_files[name] = [] + + for i in range(self.n_test_shards): + name = self.output_name_prefix + self.output_test_identifier + '_' + str(i) + self.output_file_extension + self.output_test_files[name] = [] + + print('End: Init Output Files') + + + def get_sentences_per_shard(self, shard): + result = 0 + for article_id in shard: + result += len(self.sentences[article_id]) + + return result + + + def distribute_articles_over_shards(self): + print('Start: Distribute Articles Over Shards') + assert len(self.articles) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.' + + # Create dictionary with - key: sentence count per article, value: article id number + sentence_counts = defaultdict(lambda: []) + + max_sentences = 0 + total_sentences = 0 + + for article_id in self.sentences: + current_length = len(self.sentences[article_id]) + sentence_counts[current_length].append(article_id) + max_sentences = max(max_sentences, current_length) + total_sentences += current_length + + n_sentences_assigned_to_training = int((1 - self.fraction_test_set) * total_sentences) + nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards + nominal_sentences_per_test_shard = (total_sentences - n_sentences_assigned_to_training) // self.n_test_shards + + consumed_article_set = set({}) + unused_article_set = set(self.articles.keys()) + + # Make first pass and add one article worth of lines per file + for file in self.output_training_files: + current_article_id = sentence_counts[max_sentences][-1] + sentence_counts[max_sentences].pop(-1) + self.output_training_files[file].append(current_article_id) + consumed_article_set.add(current_article_id) + unused_article_set.remove(current_article_id) + + # Maintain the max sentence count + while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: + max_sentences -= 1 + + if len(self.sentences[current_article_id]) > nominal_sentences_per_training_shard: + nominal_sentences_per_training_shard = len(self.sentences[current_article_id]) + print('Warning: A single article contains more than the nominal number of sentences per training shard.') + + for file in self.output_test_files: + current_article_id = sentence_counts[max_sentences][-1] + sentence_counts[max_sentences].pop(-1) + self.output_test_files[file].append(current_article_id) + consumed_article_set.add(current_article_id) + unused_article_set.remove(current_article_id) + + # Maintain the max sentence count + while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: + max_sentences -= 1 + + if len(self.sentences[current_article_id]) > nominal_sentences_per_test_shard: + nominal_sentences_per_test_shard = len(self.sentences[current_article_id]) + print('Warning: A single article contains more than the nominal number of sentences per test shard.') + + training_counts = [] + test_counts = [] + + for shard in self.output_training_files: + training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard])) + + for shard in self.output_test_files: + test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard])) + + training_median = statistics.median(training_counts) + test_median = statistics.median(test_counts) + + # Make subsequent passes over files to find articles to add without going over limit + history_remaining = [] + n_history_remaining = 4 + + while len(consumed_article_set) < len(self.articles): + for fidx, file in enumerate(self.output_training_files): + nominal_next_article_size = min(nominal_sentences_per_training_shard - training_counts[fidx], max_sentences) + + # Maintain the max sentence count + while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: + max_sentences -= 1 + + while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0: + nominal_next_article_size -= 1 + + if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or training_counts[fidx] > training_median: + continue # skip adding to this file, will come back later if no file can accept unused articles + + current_article_id = sentence_counts[nominal_next_article_size][-1] + sentence_counts[nominal_next_article_size].pop(-1) + + self.output_training_files[file].append(current_article_id) + consumed_article_set.add(current_article_id) + unused_article_set.remove(current_article_id) + + for fidx, file in enumerate(self.output_test_files): + nominal_next_article_size = min(nominal_sentences_per_test_shard - test_counts[fidx], max_sentences) + + # Maintain the max sentence count + while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: + max_sentences -= 1 + + while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0: + nominal_next_article_size -= 1 + + if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or test_counts[fidx] > test_median: + continue # skip adding to this file, will come back later if no file can accept unused articles + + current_article_id = sentence_counts[nominal_next_article_size][-1] + sentence_counts[nominal_next_article_size].pop(-1) + + self.output_test_files[file].append(current_article_id) + consumed_article_set.add(current_article_id) + unused_article_set.remove(current_article_id) + + # If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed + if len(history_remaining) == n_history_remaining: + history_remaining.pop(0) + history_remaining.append(len(unused_article_set)) + + history_same = True + for i in range(1, len(history_remaining)): + history_same = history_same and (history_remaining[i-1] == history_remaining[i]) + + if history_same: + nominal_sentences_per_training_shard += 1 + # nominal_sentences_per_test_shard += 1 + + training_counts = [] + test_counts = [] + for shard in self.output_training_files: + training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard])) + + for shard in self.output_test_files: + test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard])) + + training_median = statistics.median(training_counts) + test_median = statistics.median(test_counts) + + print('Distributing data over shards:', len(unused_article_set), 'articles remaining.') + + + if len(unused_article_set) != 0: + print('Warning: Some articles did not make it into output files.') + + + for shard in self.output_training_files: + print('Training shard:', self.get_sentences_per_shard(self.output_training_files[shard])) + + for shard in self.output_test_files: + print('Test shard:', self.get_sentences_per_shard(self.output_test_files[shard])) + + print('End: Distribute Articles Over Shards') + + + def write_shards_to_disk(self): + print('Start: Write Shards to Disk') + for shard in self.output_training_files: + self.write_single_shard(shard, self.output_training_files[shard], 'training') + + for shard in self.output_test_files: + self.write_single_shard(shard, self.output_test_files[shard], 'test') + + print('End: Write Shards to Disk') + + + def write_single_shard(self, shard_name, shard, split): + shard_split = os.path.split(shard_name) + shard_name = shard_split[0] + '/' + split + '/' + shard_split[1] + + with open(shard_name, mode='w', newline='\n') as f: + for article_id in shard: + for line in self.sentences[article_id]: + f.write(line + '\n') + + f.write('\n') # Line break between articles + + +import nltk + +nltk.download('punkt') + +class NLTKSegmenter: + def __init(self): + pass + + def segment_string(self, article): + return nltk.tokenize.sent_tokenize(article) + diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikiDownloader.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikiDownloader.py new file mode 100644 index 0000000000000000000000000000000000000000..18146060ff6bbbab529b8625a839d1e29903e827 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikiDownloader.py @@ -0,0 +1,68 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company +# +# Changes: +# - Replaced bzip2 with lbzip2 that uses multiprocessing during decompression +# and provides linear speedup. +# - Added timing code to measure the decompression process duration. +# - Removed unused imports. +############################################################################### + +import os +import subprocess +import time + +class WikiDownloader: + def __init__(self, language, save_path): + self.save_path = save_path + '/wikicorpus_' + language + + if not os.path.exists(self.save_path): + os.makedirs(self.save_path) + + self.language = language + self.download_urls = { + 'en' : 'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2', + 'zh' : 'https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2' + } + + self.output_files = { + 'en' : 'wikicorpus_en.xml.bz2', + 'zh' : 'wikicorpus_zh.xml.bz2' + } + + + def download(self): + if self.language in self.download_urls: + url = self.download_urls[self.language] + filename = self.output_files[self.language] + + print('Downloading:', url) + if os.path.isfile(self.save_path + '/' + filename): + print('** Download file already exists, skipping download') + else: + cmd = ['wget', url, '--output-document={}'.format(self.save_path + '/' + filename)] + print('Running:', cmd) + status = subprocess.run(cmd) + if status.returncode != 0: + raise RuntimeError('Wiki download not successful') + + # Always unzipping since this is relatively fast and will overwrite + start = time.time() + print('Unzipping:', self.output_files[self.language]) + subprocess.run('lbzip2 -dk ' + self.save_path + '/' + filename, shell=True, check=True) + print("Unzip time:", time.time() - start) + + else: + assert False, 'WikiDownloader not implemented for this language yet.' diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikicorpusTextFormatting.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikicorpusTextFormatting.py new file mode 100644 index 0000000000000000000000000000000000000000..9d356b1360d6e9b4b0037afac1e35d8bb9688af8 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikicorpusTextFormatting.py @@ -0,0 +1,46 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os + +class WikicorpusTextFormatting: + def __init__(self, wiki_path, output_filename, recursive = False): + self.wiki_path = wiki_path + self.recursive = recursive + self.output_filename = output_filename + + + # This puts one article per line + def merge(self): + with open(self.output_filename, mode='w', newline='\n') as ofile: + for dirname in glob.glob(self.wiki_path + '/*/', recursive=False): + for filename in glob.glob(dirname + 'wiki_*', recursive=self.recursive): + print(filename) + article_lines = [] + article_open = False + + with open(filename, mode='r', newline='\n') as file: + for line in file: + if '' in line: + article_open = False + for oline in article_lines[1:]: + if oline != '\n': + ofile.write(oline.rstrip() + " ") + ofile.write("\n\n") + article_lines = [] + else: + if article_open: + article_lines.append(line) \ No newline at end of file diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_datasets_from_start.sh b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_datasets_from_start.sh new file mode 100644 index 0000000000000000000000000000000000000000..b563a6ce8b56bace309e5a659728514dc985faf0 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_datasets_from_start.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +############################################################################### +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company +# +# Changes: +# - Removed downloading and preprocessing datasets that are not related to BERT pretrain +# - Modified file structures originally for NVidia container +# - Added downloading WikiExtractor and bookcorpus repositories +############################################################################### + + +to_download=${1:-"wiki_only"} # By default, we don't download BooksCorpus dataset due to recent issues with the host server + +data_dir=$(pwd) +BERT_PREP_WORKING_DIR=${2:-"/data/tensorflow/bert/books_wiki_en_corpus"} +export BERT_PREP_WORKING_DIR="${BERT_PREP_WORKING_DIR}" + +echo "Checkout WikiExtractor repository" +# checkout WikiExtractor scripts +git clone https://github.com/attardi/wikiextractor.git && cd wikiextractor && git checkout 6408a430fc504a38b04d37ce5e7fc740191dee16 && cd .. + +# Download Wikipedia dataset and/or Bookscorpus dataset +echo "Download dataset ${to_download}" +if [ "$to_download" = "wiki_books" ] ; then + # checkout BookCorpus download scripts + git clone https://github.com/soskek/bookcorpus.git + $PYTHON ${data_dir}/bertPrep.py --action download --dataset bookscorpus +fi +$PYTHON ${data_dir}/bertPrep.py --action download --dataset wikicorpus_en + +echo "Download pretrained weights" +echo "${data_dir}" +$PYTHON ${data_dir}/bertPrep.py --action download --dataset google_pretrained_weights # Includes vocab + +DATASET="wikicorpus_en" + +# Properly format the text files +if [ "$to_download" = "wiki_books" ] ; then + $PYTHON ${data_dir}/bertPrep.py --action text_formatting --dataset bookscorpus + DATASET="books_wiki_en_corpus" +fi +$PYTHON ${data_dir}/bertPrep.py --action text_formatting --dataset wikicorpus_en + +# Shard the text files +$PYTHON ${data_dir}/bertPrep.py --action sharding --dataset ${DATASET} + +# Create TFRecord files Phase 1 +$PYTHON ${data_dir}/bertPrep.py --action create_tfrecord_files --dataset ${DATASET} --max_seq_length 128 \ + --max_predictions_per_seq 20 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt + + +# Create TFRecord files Phase 2 +$PYTHON ${data_dir}/bertPrep.py --action create_tfrecord_files --dataset ${DATASET} --max_seq_length 512 \ + --max_predictions_per_seq 80 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_pretraining_data.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_pretraining_data.py new file mode 100644 index 0000000000000000000000000000000000000000..e59c42c632aa12dbac3902576f5938a74675d645 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_pretraining_data.py @@ -0,0 +1,512 @@ +# coding=utf-8 +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +############################################################################### +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company +# +# Changes: +# - Fixed compatibility issues with TensorFlow 2 +############################################################################### + +"""Create masked LM/next sentence masked_lm TF examples for BERT.""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import argparse +import logging +import os +import random +from io import open +import h5py +import tensorflow as tf +import numpy as np +from tqdm import tqdm, trange + +from TensorFlow.nlp.bert.data_preprocessing.tokenization import BertTokenizer +import tokenization as tokenization + +import random +import collections + +class TrainingInstance(object): + """A single training instance (sentence pair).""" + + def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, + is_random_next): + self.tokens = tokens + self.segment_ids = segment_ids + self.is_random_next = is_random_next + self.masked_lm_positions = masked_lm_positions + self.masked_lm_labels = masked_lm_labels + + def __str__(self): + s = "" + s += "tokens: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.tokens])) + s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids])) + s += "is_random_next: %s\n" % self.is_random_next + s += "masked_lm_positions: %s\n" % (" ".join( + [str(x) for x in self.masked_lm_positions])) + s += "masked_lm_labels: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.masked_lm_labels])) + s += "\n" + return s + + def __repr__(self): + return self.__str__() + + +def write_instance_to_example_files(instances, tokenizer, max_seq_length, + max_predictions_per_seq, output_files, output_formats="tfrecord"): + """Create TF example files from `TrainingInstance`s.""" + writers = [] + for output_file in output_files: + writers.append(tf.compat.v1.python_io.TFRecordWriter(output_file)) + + writer_index = 0 + + total_written = 0 + if 'hdf5' in output_formats: + features_hdf5 = collections.OrderedDict() + num_instances = len(instances) + features_hdf5["input_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32") + features_hdf5["input_mask"] = np.zeros([num_instances, max_seq_length], dtype="int32") + features_hdf5["segment_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32") + features_hdf5["masked_lm_positions"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32") + features_hdf5["masked_lm_ids"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32") + features_hdf5["next_sentence_labels"] = np.zeros(num_instances, dtype="int32") + + for (inst_index, instance) in enumerate(instances): + input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) + input_mask = [1] * len(input_ids) + segment_ids = list(instance.segment_ids) + assert len(input_ids) <= max_seq_length + + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + masked_lm_positions = list(instance.masked_lm_positions) + masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) + masked_lm_weights = [1.0] * len(masked_lm_ids) + + while len(masked_lm_positions) < max_predictions_per_seq: + masked_lm_positions.append(0) + masked_lm_ids.append(0) + masked_lm_weights.append(0.0) + + next_sentence_label = 1 if instance.is_random_next else 0 + + features = collections.OrderedDict() + features["input_ids"] = create_int_feature(input_ids) + features["input_mask"] = create_int_feature(input_mask) + features["segment_ids"] = create_int_feature(segment_ids) + features["masked_lm_positions"] = create_int_feature(masked_lm_positions) + features["masked_lm_ids"] = create_int_feature(masked_lm_ids) + features["masked_lm_weights"] = create_float_feature(masked_lm_weights) + features["next_sentence_labels"] = create_int_feature([next_sentence_label]) + + if 'tfrecord' in output_formats: + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + writers[writer_index].write(tf_example.SerializeToString()) + if 'hdf5' in output_formats: + features_hdf5["input_ids"][inst_index] = input_ids + features_hdf5["input_mask"][inst_index] = input_mask + features_hdf5["segment_ids"][inst_index] = segment_ids + features_hdf5["masked_lm_positions"][inst_index] = masked_lm_positions + features_hdf5["masked_lm_ids"][inst_index] = masked_lm_ids + features_hdf5["next_sentence_labels"][inst_index] = next_sentence_label + if 'tfrecord' not in output_formats and 'hdf5' not in output_formats: + assert False, 'Either empty output_formats list or unsupported type specified. Try: tfrecord or hdf5' + + writer_index = (writer_index + 1) % len(writers) + + total_written += 1 + + if inst_index < 20: + tf.compat.v1.logging.info("*** Example ***") + tf.compat.v1.logging.info("tokens: %s" % " ".join( + [tokenization.printable_text(x) for x in instance.tokens])) + + for feature_name in features.keys(): + feature = features[feature_name] + values = [] + if feature.int64_list.value: + values = feature.int64_list.value + elif feature.float_list.value: + values = feature.float_list.value + tf.compat.v1.logging.info( + "%s: %s" % (feature_name, " ".join([str(x) for x in values]))) + + for writer in writers: + writer.close() + + if 'hdf5' in output_formats: + f = h5py.File(output_file, 'w') + f.create_dataset("input_ids", data=features_hdf5["input_ids"], dtype='i4', compression='gzip') + f.create_dataset("input_mask", data=features_hdf5["input_mask"], dtype='i1', compression='gzip') + f.create_dataset("segment_ids", data=features_hdf5["segment_ids"], dtype='i1', compression='gzip') + f.create_dataset("masked_lm_positions", data=features_hdf5["masked_lm_positions"], dtype='i4', compression='gzip') + f.create_dataset("masked_lm_ids", data=features_hdf5["masked_lm_ids"], dtype='i4', compression='gzip') + f.create_dataset("next_sentence_labels", data=features_hdf5["next_sentence_labels"], dtype='i1', compression='gzip') + f.flush() + f.close() + + tf.compat.v1.logging.info("Wrote %d total instances", total_written) + + +def create_int_feature(values): + feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + return feature + + +def create_float_feature(values): + feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) + return feature + + +def create_training_instances(input_files, tokenizer, max_seq_length, + dupe_factor, short_seq_prob, masked_lm_prob, + max_predictions_per_seq, rng): + """Create `TrainingInstance`s from raw text.""" + all_documents = [[]] + + # Input file format: + # (1) One sentence per line. These should ideally be actual sentences, not + # entire paragraphs or arbitrary spans of text. (Because we use the + # sentence boundaries for the "next sentence prediction" task). + # (2) Blank lines between documents. Document boundaries are needed so + # that the "next sentence prediction" task doesn't span between documents. + for input_file in input_files: + print("creating instance from {}".format(input_file)) + with open(input_file, "r") as reader: + while True: + line = tokenization.convert_to_unicode(reader.readline()) + if not line: + break + line = line.strip() + + # Empty lines are used as document delimiters + if not line: + all_documents.append([]) + tokens = tokenizer.tokenize(line) + if tokens: + all_documents[-1].append(tokens) + + # Remove empty documents + all_documents = [x for x in all_documents if x] + rng.shuffle(all_documents) + + vocab_words = list(tokenizer.vocab.keys()) + instances = [] + for _ in range(dupe_factor): + for document_index in range(len(all_documents)): + instances.extend( + create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) + + rng.shuffle(instances) + return instances + + +def create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng): + """Creates `TrainingInstance`s for a single document.""" + document = all_documents[document_index] + + # Account for [CLS], [SEP], [SEP] + max_num_tokens = max_seq_length - 3 + + # We *usually* want to fill up the entire sequence since we are padding + # to `max_seq_length` anyways, so short sequences are generally wasted + # computation. However, we *sometimes* + # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter + # sequences to minimize the mismatch between pre-training and fine-tuning. + # The `target_seq_length` is just a rough target however, whereas + # `max_seq_length` is a hard limit. + target_seq_length = max_num_tokens + if rng.random() < short_seq_prob: + target_seq_length = rng.randint(2, max_num_tokens) + + # We DON'T just concatenate all of the tokens from a document into a long + # sequence and choose an arbitrary split point because this would make the + # next sentence prediction task too easy. Instead, we split the input into + # segments "A" and "B" based on the actual "sentences" provided by the user + # input. + instances = [] + current_chunk = [] + current_length = 0 + i = 0 + while i < len(document): + segment = document[i] + current_chunk.append(segment) + current_length += len(segment) + if i == len(document) - 1 or current_length >= target_seq_length: + if current_chunk: + # `a_end` is how many segments from `current_chunk` go into the `A` + # (first) sentence. + a_end = 1 + if len(current_chunk) >= 2: + a_end = rng.randint(1, len(current_chunk) - 1) + + tokens_a = [] + for j in range(a_end): + tokens_a.extend(current_chunk[j]) + + tokens_b = [] + # Random next + is_random_next = False + if len(current_chunk) == 1 or rng.random() < 0.5: + is_random_next = True + target_b_length = target_seq_length - len(tokens_a) + + # This should rarely go for more than one iteration for large + # corpora. However, just to be careful, we try to make sure that + # the random document is not the same as the document + # we're processing. + for _ in range(10): + random_document_index = rng.randint(0, len(all_documents) - 1) + if random_document_index != document_index: + break + + #If picked random document is the same as the current document + if random_document_index == document_index: + is_random_next = False + + random_document = all_documents[random_document_index] + random_start = rng.randint(0, len(random_document) - 1) + for j in range(random_start, len(random_document)): + tokens_b.extend(random_document[j]) + if len(tokens_b) >= target_b_length: + break + # We didn't actually use these segments so we "put them back" so + # they don't go to waste. + num_unused_segments = len(current_chunk) - a_end + i -= num_unused_segments + # Actual next + else: + is_random_next = False + for j in range(a_end, len(current_chunk)): + tokens_b.extend(current_chunk[j]) + truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) + + assert len(tokens_a) >= 1 + assert len(tokens_b) >= 1 + + tokens = [] + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in tokens_a: + tokens.append(token) + segment_ids.append(0) + + tokens.append("[SEP]") + segment_ids.append(0) + + for token in tokens_b: + tokens.append(token) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + (tokens, masked_lm_positions, + masked_lm_labels) = create_masked_lm_predictions( + tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) + instance = TrainingInstance( + tokens=tokens, + segment_ids=segment_ids, + is_random_next=is_random_next, + masked_lm_positions=masked_lm_positions, + masked_lm_labels=masked_lm_labels) + instances.append(instance) + current_chunk = [] + current_length = 0 + i += 1 + + return instances + + +MaskedLmInstance = collections.namedtuple("MaskedLmInstance", + ["index", "label"]) + + +def create_masked_lm_predictions(tokens, masked_lm_prob, + max_predictions_per_seq, vocab_words, rng): + """Creates the predictions for the masked LM objective.""" + + cand_indexes = [] + for (i, token) in enumerate(tokens): + if token == "[CLS]" or token == "[SEP]": + continue + cand_indexes.append(i) + + rng.shuffle(cand_indexes) + + output_tokens = list(tokens) + + num_to_predict = min(max_predictions_per_seq, + max(1, int(round(len(tokens) * masked_lm_prob)))) + + masked_lms = [] + covered_indexes = set() + for index in cand_indexes: + if len(masked_lms) >= num_to_predict: + break + if index in covered_indexes: + continue + covered_indexes.add(index) + + masked_token = None + # 80% of the time, replace with [MASK] + if rng.random() < 0.8: + masked_token = "[MASK]" + else: + # 10% of the time, keep original + if rng.random() < 0.5: + masked_token = tokens[index] + # 10% of the time, replace with random word + else: + masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] + + output_tokens[index] = masked_token + + masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) + + masked_lms = sorted(masked_lms, key=lambda x: x.index) + + masked_lm_positions = [] + masked_lm_labels = [] + for p in masked_lms: + masked_lm_positions.append(p.index) + masked_lm_labels.append(p.label) + + return (output_tokens, masked_lm_positions, masked_lm_labels) + + +def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): + """Truncates a pair of sequences to a maximum sequence length.""" + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_num_tokens: + break + + trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b + assert len(trunc_tokens) >= 1 + + # We want to sometimes truncate from the front and sometimes from the + # back to add more randomness and avoid biases. + if rng.random() < 0.5: + del trunc_tokens[0] + else: + trunc_tokens.pop() + + +def main(): + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument("--vocab_file", + default=None, + type=str, + required=True, + help="The vocabulary the BERT model will train on.") + parser.add_argument("--input_file", + default=None, + type=str, + required=True, + help="The input train corpus. can be directory with .txt files or a path to a single file") + parser.add_argument("--output_file", + default=None, + type=str, + required=True, + help="The output file where the model checkpoints will be written.") + + ## Other parameters + # int + parser.add_argument("--max_seq_length", + default=128, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.") + parser.add_argument("--dupe_factor", + default=10, + type=int, + help="Number of times to duplicate the input data (with different masks).") + parser.add_argument("--max_predictions_per_seq", + default=20, + type=int, + help="Maximum sequence length.") + + # floats + + parser.add_argument("--masked_lm_prob", + default=0.15, + type=float, + help="Masked LM probability.") + + parser.add_argument("--short_seq_prob", + default=0.1, + type=float, + help="Probability to create a sequence shorter than maximum sequence length") + + parser.add_argument("--do_lower_case", + action='store_true', + default=True, + help="Whether to lower case the input text. True for uncased models, False for cased models.") + parser.add_argument('--random_seed', + type=int, + default=12345, + help="random seed for initialization") + + args = parser.parse_args() + + tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case) + + input_files = [] + if os.path.isfile(args.input_file): + input_files.append(args.input_file) + elif os.path.isdir(args.input_file): + input_files = [os.path.join(args.input_file, f) for f in os.listdir(args.input_file) if + (os.path.isfile(os.path.join(args.input_file, f)) and f.endswith('.txt'))] + else: + raise ValueError("{} is not a valid path".format(args.input_file)) + + rng = random.Random(args.random_seed) + instances = create_training_instances( + input_files, tokenizer, args.max_seq_length, args.dupe_factor, + args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq, + rng) + + output_files = args.output_file.split(",") + print("*** Writing to output files ***") + for output_file in output_files: + print(output_file) + + + write_instance_to_example_files(instances, tokenizer, args.max_seq_length, + args.max_predictions_per_seq, output_files) + + +if __name__ == "__main__": + main() diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/pack_pretraining_data_tfrec.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/pack_pretraining_data_tfrec.py new file mode 100644 index 0000000000000000000000000000000000000000..a34d3582c481d9c8e835dfde57c5702c432f62bb --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/pack_pretraining_data_tfrec.py @@ -0,0 +1,531 @@ +# Copyright (c) 2020 Graphcore Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2022 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - Added functionality for saving parameters of packing algorithm to metadata file. +# - Added checks for output_dir parameter. It will be created automatically if passed location does not exist. + + +import argparse +import gc +import json +import os +import random +import time +from collections import OrderedDict, defaultdict, deque +from concurrent.futures import ProcessPoolExecutor +from functools import lru_cache +from itertools import chain, repeat +from sys import getsizeof, stderr + +import numpy as np +import tensorflow as tf +from scipy import optimize + +@lru_cache(maxsize=None) +def packing_strategies(start, previous, target, depth): + gap = target - start + + # The collection of possible strategies given the + # starting sum, the target sum, and the available depth + # strategy search is limited to increments greater or equal to previous + strategies = [] + # Complete the packing with exactly 1 number + if depth == 1: + if gap >= previous: + strategies.append([gap]) + + # Complete the sample in "depth" steps, recursively + else: + for new in range(previous, gap + 1): + + new_gap = target - start - new + if new_gap == 0: + strategies.append([new]) + else: + options = packing_strategies(start + new, new, target, depth - 1) + + for option in options: + if len(option) > 0: + strategies.append([new] + option) + return strategies + + +def get_metadata_file_path(output_dir): + """Returns path for metadata file one direcotry above output_dir. + File will be called the same way as directory with training dataset + with appended metadata.json as below: + ├── training + └── training_metadata.json""" + norm_path = os.path.normpath(output_dir) + base_path, metadata_file_name = os.path.split(norm_path) + metadata_file_name = metadata_file_name + '_metadata.json' + return os.path.join(base_path, metadata_file_name) + +def get_packing_recipe(output_dir, sequence_lengths, max_sequence_length, max_sequences_per_pack=3): + # Histogram of sequence lengths + histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, max_sequence_length + 2)) + print("Begin packing pass".center(80, "_")) + print(f"Unpacked mean sequence length: {sequence_lengths.mean():3.2f}") + + # Make sure all strategies are recipes to pack to the correct sequence length + strategy_set = packing_strategies(0, 1, max_sequence_length, max_sequences_per_pack) + for strategy in strategy_set: + assert(sum(strategy) == max_sequence_length) + num_strategies = len(strategy_set) + print(f"Found {num_strategies} unique packing strategies.") + + # Solve the packing equation A@mixture = histogram + A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32) + for i in range(num_strategies): + strategy = strategy_set[i] + for seq_len in strategy: + A[seq_len - 1, i] += 1 + + # short sequences are inexpensive to add, so should have low residual weights + # to exactly minimize padding use w0 = np.arange(1, max_sequence_length + 1) + # in practice the difference is negligible, but this converges faster + padding_cutoff = 8 + w0 = np.ones([max_sequence_length]) + # w0 = np.linspace(1, max_sequence_length+1, max_sequence_length)/max_sequence_length # padding minimization weight + w0[:padding_cutoff] = padding_cutoff / (2 * max_sequence_length) + w0 = np.sqrt(w0) + + # Starting values for the padding and the mixture + padding = np.zeros([max_sequence_length], dtype=np.int32) + mixture = np.zeros([num_strategies], dtype=np.int32) + b = histogram + padding + + # Pack sequences as best as possible, then increase padding accordingly and repeat + for i in range(0, 20): + print(f"\nIteration: {i}: sequences still to pack: ", b.sum()) + start = time.time() + partial_mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * b) + print(f"Solving nnls took {time.time() - start:3.2f} seconds.") + print(f"Residual norm: {rnorm:3.5e}") + + # Update mixture (round the floating point solution to integers) + partial_mixture = np.where(partial_mixture < 2, np.rint(partial_mixture), np.floor(partial_mixture)) + + # If partial mixture is empty (due to rounding) we follow the gradient + # this usually happens when the number of examples is small i.e. ~100 + if partial_mixture.max() == 0: + grad = A.T @ (b * np.arange(1, max_sequence_length + 1)) + k = int(b.sum() // 2) + 1 + topk = np.argsort(-grad)[:k] + partial_mixture[topk] += 1 + + # Update mixture + mixture = mixture + partial_mixture + + # Compute the residuals + residual = b - A @ partial_mixture + print(f"Max residual: {abs(residual).max()}") + print(f"Residual on first 8 categories: {np.around(residual[:8], 4)}") + print(f"Residual on last 8 categories: {np.around(residual[-8:], 4)}") + + # Add padding based on deficit (negative residual) + partial_padding = np.where(residual < 0, -residual, 0) + print(f"Added {(partial_padding*np.arange(1,max_sequence_length+1)).sum():3.2e} tokens of padding.") + padding = padding + partial_padding + + # Update the rhs vector (remaining surplus sequences) + b = histogram + padding - A @ mixture + assert np.all(b >= 0), b + + # Done iterating + if b.sum() < 100: + break + + # Make sure there is no remainder + unpacked_seqlen = np.arange(1, args.max_sequence_length + 1)[b > 0] + # Update the mixture to also covered the unpacked sequences + for l in unpacked_seqlen: + # Get the depth 1 strategy + strategy = sorted([l, args.max_sequence_length - l]) + strategy_index = strategy_set.index(strategy) + mixture[strategy_index] += b[l-1] + b = histogram - A @ mixture + padding = np.where(b < 0, -b, 0) + b = histogram + padding - A @ mixture + assert b.sum() == 0 + + # Analyze result + print("Done solving for packing order".center(80, "_")) + num_padding_tokens = (np.arange(1, max_sequence_length + 1) * padding).sum() + num_padding_tokens_original = (max_sequence_length - sequence_lengths).sum() + number_of_sequences_dropped = b.sum() + print(f"Number of sequences dropped: {number_of_sequences_dropped}") + number_of_strategies_utilized = np.count_nonzero(mixture) + print(f"Number of strategies utilized: {number_of_strategies_utilized}") + new_number_of_samples = int(mixture.sum()) + original_number_of_samples = len(sequence_lengths) + compression = 1 - new_number_of_samples / original_number_of_samples + print(f"New number of samples: {new_number_of_samples:3.2f}, original {original_number_of_samples}. A compression ratio of {compression:3.3f}") + expected_speedup_from_packing = 1 / (1 - compression) + print(f"The expected speed-up from packing: {expected_speedup_from_packing}") + upper_bound = 1.0 / (1 - ((1 - sequence_lengths / max_sequence_length).mean())) + print(f"Theoretical upper bound on speed-up: {upper_bound:3.3f}") + avg_sequences_per_sample = ((A.sum(0) * mixture).sum() - padding.sum()) / new_number_of_samples + print(f"Average sequences/sample {avg_sequences_per_sample:3.5f}") + print(f"Added {num_padding_tokens:3.2e} padding tokens. Original dataset used {num_padding_tokens_original:3.2e} padding tokens") + efficiency = (new_number_of_samples*max_sequence_length - num_padding_tokens)/(new_number_of_samples*max_sequence_length) + print(f"Packing efficiency (fraction of real tokens): {efficiency:3.4f}") + + print(f"Top 8 strategies") + topK = np.argsort(-mixture)[:8] + for i in topK: + print(f"Strategy {strategy_set[i]} which is used {int(mixture[i])} times") + print("".center(80, "_")) + + # Figure out the slicing that each strategy should use + slicing = np.zeros_like(A) + slicing[:, 1:] = np.cumsum(A * mixture, axis=1)[:, :-1] + slicing = slicing.T + + mixture = mixture.astype(np.int64) + + # Save packing parameters to metadata file + metadata_file_path = get_metadata_file_path(output_dir) + print(f"Saving metadata to file: {metadata_file_path}") + + packing_metadata = { + "sequences_dropped": int(number_of_sequences_dropped), + "num_strategies_utilized": number_of_strategies_utilized, + "new_number_of_samples": new_number_of_samples, + "original_number_of_samples": original_number_of_samples, + "compression_ratio": compression, + "expected_speedup": expected_speedup_from_packing, + "theoretical_speedup": float(upper_bound), + "avg_seq_per_sample": float(avg_sequences_per_sample), + "padding_tokens_original_dataset": int(num_padding_tokens_original), + "padding_tokens_packed_dataset": float(num_padding_tokens), + "packing_efficiency": float(efficiency), + "top_8_strategies": topK.tolist() + } + with open(metadata_file_path, mode='w') as json_file: + json_file.write(json.dumps(packing_metadata, sort_keys=True, indent=2)) + return strategy_set, mixture, padding, slicing + + +def slice_examples(examples_by_length, slicing, strategy_set, repeat_counts): + # Divide the work, firstly between the strategies and then into chunks of 50k + slices = [] + strategies = [] + part_idx = [] + for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, repeat_counts): + if repeat_count == 0: + continue + # Slice out the sequences allocated to this strategy in increments of 50k + num_sample_per_slice=4480 + num_parts = repeat_count // num_sample_per_slice + num_parts = num_parts + int(repeat_count != num_parts * num_sample_per_slice) + subcounts = (min(num_sample_per_slice, repeat_count - num_sample_per_slice * (i - 1)) for i in range(1, num_parts + 1)) + for part_id, part_count in enumerate(subcounts): + examples = [] + for k, seq_len in enumerate(strategy): + slice_start = int(slice_offsets[seq_len - 1]) + slice_end = slice_start + int(part_count) + slice_offsets[seq_len - 1] = slice_end + examples.append(examples_by_length[seq_len][slice_start:slice_end]) + slices.append(examples) + strategies.append(strategy) + part_idx.append(part_id) + examples_by_length = None + return slices, strategies, part_idx + + +def parallel_pack_according_to_strategy(args, part_idx, strategy, examples): + # Pack the sequences according to the strategy and write them to disk + try: + base_filename = os.path.join(args.output_dir, "strategy_" + "_".join(map(str, strategy))) + filename = base_filename + f"_part_{part_idx}" + print(filename) + writer = tf.compat.v1.python_io.TFRecordWriter(filename) + for i, multi_sequence in enumerate(zip(*examples)): + features = create_multi_sequence_example(multi_sequence, args.max_predictions_per_sequence, + args.max_sequence_length, args.max_sequences_per_pack) + # Write to file + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + writer.write(tf_example.SerializeToString()) + writer.close() + except: + print('failed to write: ',strategy,part_idx) + base_filename = os.path.join(args.output_dir, "FAIL_strategy_" + "_".join(map(str, strategy))) + filename = base_filename + f"_part_{part_idx}" + print('saved failed examples to: ','FAIL_'+filename) + + + +def create_multi_sequence_example(multi_sequence, max_predictions_per_sequence, max_sequence_length, max_sequences_per_pack): + # SEQ + packed_input_ids = np.zeros(max_sequence_length, dtype=np.int32) + packed_input_mask = np.zeros(max_sequence_length, dtype=np.int32) + packed_segment_ids = np.zeros(max_sequence_length, dtype=np.int32) + packed_positions = np.zeros(max_sequence_length, dtype=np.int32) + + # MLM + # we are packing up to max_sequences_per_pack, each with a certain percentage of masked tokens + # in case that percentege is rounded up for all sequences in the pack, need to add an extra token for + # each sequence in the pack + packed_masked_lm_positions = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32) + packed_masked_lm_ids = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32) + packed_masked_lm_weights = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32) + + # NSP + packed_next_sentence_positions = np.zeros(max_sequences_per_pack, dtype=np.int32) + packed_next_sentence_labels = np.zeros(max_sequences_per_pack, dtype=np.int32) + packed_next_sentence_weights = np.zeros(max_sequences_per_pack, dtype=np.int32) + + offset = 0 + mlm_offset = 0 + sequence_index = 1 # used in the input mask + for sequence in multi_sequence: + # Padding sequences are donoted with None + if sequence is not None: + example = tf.train.Example() + example.ParseFromString(sequence.numpy()) + + input_ids = np.array(example.features.feature['input_ids'].int64_list.value) + input_mask = np.array(example.features.feature['input_mask'].int64_list.value) + segment_ids = np.array(example.features.feature['segment_ids'].int64_list.value) + masked_lm_positions = np.array(example.features.feature['masked_lm_positions'].int64_list.value) + masked_lm_ids = np.array(example.features.feature['masked_lm_ids'].int64_list.value) + masked_lm_weights = np.array(example.features.feature['masked_lm_weights'].float_list.value) + next_sentence_labels = np.array(example.features.feature['next_sentence_labels'].int64_list.value) + seq_len = input_mask.sum() + + del example + + # SEQ + packed_input_ids[offset:offset + seq_len] = input_ids[:seq_len] + packed_input_mask[offset:offset + seq_len] = sequence_index + packed_segment_ids[offset:offset + seq_len] = segment_ids[:seq_len] + packed_positions[offset:offset + seq_len] = np.arange(0, seq_len) + + # MLM + mlm_len = int(masked_lm_weights.sum()) + assert mlm_offset + mlm_len < max_predictions_per_sequence + max_sequences_per_pack, "Too many LM predictions per sequences" + max_mlm = mlm_offset + mlm_len + packed_masked_lm_positions[mlm_offset:max_mlm] = offset + masked_lm_positions[:mlm_len] + packed_masked_lm_ids[mlm_offset:max_mlm] = masked_lm_ids[:mlm_len] + packed_masked_lm_weights[mlm_offset:max_mlm] = sequence_index + # NSP + packed_next_sentence_positions[sequence_index - 1] = offset + packed_next_sentence_labels[sequence_index - 1] = next_sentence_labels + packed_next_sentence_weights[sequence_index - 1] = 1 + + # Update offsets + sequence_index += 1 + offset += seq_len + mlm_offset = max_mlm + input_ids = None; input_mask = None; segment_ids = None; masked_lm_positions = None; + masked_lm_ids = None; masked_lm_weights = None; next_sentence_labels = None; seq_len = None; + # Pack into binary format and write it + + features = OrderedDict() + + features["input_ids"] = create_int_feature(packed_input_ids) + features["input_mask"] = create_int_feature(packed_input_mask) + features["segment_ids"] = create_int_feature(packed_segment_ids) + features["positions"] = create_int_feature(packed_positions) + features["masked_lm_positions"] = create_int_feature(packed_masked_lm_positions) + features["masked_lm_ids"] = create_int_feature(packed_masked_lm_ids) + features["masked_lm_weights"] = create_float_feature(packed_masked_lm_weights) + features["next_sentence_positions"] = create_int_feature(packed_next_sentence_positions) + features["next_sentence_labels"] = create_int_feature(packed_next_sentence_labels) + features["next_sentence_weights"] = create_float_feature(packed_next_sentence_weights) + del packed_input_ids; del packed_input_mask; del packed_segment_ids; del packed_positions; del packed_masked_lm_positions + del packed_masked_lm_weights; del packed_next_sentence_positions; del packed_next_sentence_labels; del packed_next_sentence_weights + + return features + +def create_bytes_feature(value): + """Returns a bytes_list from a string / byte.""" + if isinstance(value, type(tf.constant(0))): + value = value.numpy() # BytesList won't unpack a string from an EagerTensor. + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + +def create_int_feature(values): + feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + return feature + +def create_float_feature(values): + feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) + return feature + +def total_size(o, handlers={}, verbose=False): + """ Returns the approximate memory footprint an object and all of its contents. + + Automatically finds the contents of the following builtin containers and + their subclasses: tuple, list, deque, dict, set and frozenset. + To search other containers, add handlers to iterate over their contents: + + handlers = {SomeContainerClass: iter, + OtherContainerClass: OtherContainerClass.get_elements} + + """ + dict_handler = lambda d: chain.from_iterable(d.items()) + all_handlers = {tuple: iter, + list: iter, + deque: iter, + dict: dict_handler, + set: iter, + frozenset: iter, + } + all_handlers.update(handlers) # user handlers take precedence + seen = set() # track which object id's have already been seen + default_size = getsizeof(0) # estimate sizeof object without __sizeof__ + + def sizeof(o): + if id(o) in seen: # do not double count the same object + return 0 + seen.add(id(o)) + s = getsizeof(o, default_size) + + if verbose: + print(s, type(o), repr(o), file=stderr) + + for typ, handler in all_handlers.items(): + if isinstance(o, typ): + s += sum(map(sizeof, handler(o))) + break + return s + + return sizeof(o) +def compress_zeros(input): + return input[0:np.where(input)[0][-1]+1] + +def decompress_zeros(input,list_size): + output = np.zeros(list_size) + output[0:len(input)]=input + return output + +def compress_seg_ids(segment_ids): + tmp=np.where(segment_ids)[0] + return np.array([tmp[0],tmp[-1]-tmp[0]]) + +def decompress_seg_ids(segment_ids): + output = np.zeros(512) + output[segment_ids[0],segment_ids[0]+segment_ids[1]]=1 + return output + +def getCurrentMemoryUsage(): + # Getting all memory using os.popen() + total_memory, used_memory, free_memory = map( + int, os.popen('free -t -m').readlines()[-1].split()[1:]) + + # Memory usage + print("RAM memory % used:", round((used_memory/total_memory) * 100, 2)) + return used_memory/total_memory + +def parallel_record_loader(record): + example = tf.train.Example() + example.ParseFromString(record.numpy()) + im_length = sum(example.features.feature['input_mask'].int64_list.value) + return record, im_length + + +def parallel_data_loader(path,filename): + sequence_lengths_part = [] + examples_by_length_part = defaultdict(list) + for record in tf.data.TFRecordDataset(path+filename): + example = tf.train.Example() + example.ParseFromString(record.numpy()) + im_length = sum(example.features.feature['input_mask'].int64_list.value) + examples_by_length_part[im_length].append(record) + sequence_lengths_part.append(im_length) + del example + return sequence_lengths_part,examples_by_length_part + +if __name__ == "__main__": + tf.compat.v1.enable_eager_execution() + parser = argparse.ArgumentParser() + parser.add_argument("--input-glob", help="A glob expression for the input files to read in and pack", required=True, type=str) + parser.add_argument("--output-dir", help="The destination folder for the output files", required=True) + parser.add_argument("--max-files", help="At most how many files to process (limited by RAM)", default=100,type=int) + parser.add_argument("--duplication-factor", help="Same as the one passed to create input data", default=1, type=int) + parser.add_argument("--max-sequence-length", help="The maximum number of tokens in an example", default=512, type=int) + parser.add_argument("--max-predictions-per-sequence", help="The maximum number of masked tokens in an un-packed example", default=76, type=int) + parser.add_argument("--max-sequences-per-pack", help="The maximum number of sequences per packed example.", choices=[2, 3], default=3, type=int) + args = parser.parse_args() + + logger = tf.get_logger() + logger.propagate = False + + if not os.path.exists(args.output_dir): + logger.warning( + f"Output directory: {args.output_dir} does not exists, creating..." + ) + try: + os.makedirs(args.output_dir, exist_ok=True) + except IOError as error: + logger.error(error) + raise + + # Input files + print("Looping through dataset to collect sequence length information...") + input_files = np.random.choice(os.listdir(args.input_glob), size=args.max_files, replace=False) + sequence_lengths = [] + examples_by_length = defaultdict(list) + + with ProcessPoolExecutor(25) as executor: + work = repeat(args.input_glob), input_files.tolist() + for sequence_lengths_part,examples_by_length_part in executor.map(parallel_data_loader, *work): + pass + sequence_lengths += sequence_lengths_part + examples_by_length = { key:examples_by_length.get(key,[])+examples_by_length_part.get(key,[]) for key in set(list(examples_by_length.keys())+list(examples_by_length_part.keys())) } + del examples_by_length_part + sequence_lengths_part=None; examples_by_length_part=None + sequence_lengths = np.array(sequence_lengths) + print('Done extracting sequance length !!!') + del executor + gc.collect() + # Pass the array of sequence lengths to the packing algorithm + strategy_set, mixture, padding, slicing = get_packing_recipe(args.output_dir, sequence_lengths, args.max_sequence_length, args.max_sequences_per_pack) + print('Done get_packing_recipe !!!') + # Add the calculated padding + for i in range(1, args.max_sequence_length + 1): + if i not in examples_by_length.keys(): + examples_by_length[i]=[] + examples_by_length[i].extend([None] * int(padding[i - 1])) + + # Shuffle the data + for key in examples_by_length: + random.shuffle(examples_by_length[key]) + + # Pack and store the data + print(f"\nPacking and writing packed dataset to {args.output_dir}.") + + # Slice the data into chunks of max 50k packed examples + example_slices, strategies, part_idx = slice_examples(examples_by_length, slicing, strategy_set, mixture) + gc.collect() + print('Done slice_examples !!!') + del examples_by_length; del slicing; del strategy_set; del mixture + gc.collect() + start = time.time() + print(f"Splitting work into {len(part_idx)} parts.") + for rr in range(1+len(strategies)//500): + str_idx,stp_idx=rr*500,min((rr+1)*500,len(strategies)) + part_idx_prt, strategies_prt, example_slices_prt = part_idx[str_idx:stp_idx], strategies[str_idx:stp_idx], example_slices[str_idx:stp_idx] + with ProcessPoolExecutor(25) as executor: + work = repeat(args), part_idx_prt, strategies_prt, example_slices_prt + for partial_result in executor.map(parallel_pack_according_to_strategy, *work): + pass + del work + print(f"\nDone. Took: {time.time() - start:3.2f} seconds to pack and write dataset.") + print('-------------',str_idx,stp_idx) + print('Done Cleaning') diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_dataset.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..62690c54738e116216aa1b36b241f475435fd891 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_dataset.py @@ -0,0 +1,31 @@ +############################################################################### +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company +############################################################################### + +import os +from pathlib import Path +import sys +import socket +import subprocess + +def download_dataset_r(dataset_path): + host_name = socket.gethostname() + try: + if not os.path.isdir(dataset_path): + print(f"{host_name}: *** Downloading dataset...\n\n") + os.makedirs(dataset_path, exist_ok=True) + download_script = Path(__file__).parent.joinpath("download_glue_data.py") + sys.stdout.flush() + sys.stderr.flush() + with subprocess.Popen(f"{sys.executable} {str(download_script)} --data_dir {dataset_path} --tasks MRPC", shell=True, executable='/bin/bash') as proc: + proc.wait() + except Exception as exc: + raise Exception(f"{host_name}: Error in {__file__} download_dataset_r({dataset_path})") from exc + +if __name__ == "__main__": + host_name = socket.gethostname() + print(f"{host_name}: In {sys.argv[0]}") + print(f"{host_name}: called with arguments: \"{sys.argv[1]}\"") + dataset_path = sys.argv[1] + print(f"{host_name}: MULTI_HLS_IPS = {os.environ.get('MULTI_HLS_IPS')}") + download_dataset_r(dataset_path) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_glue_data.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_glue_data.py new file mode 100644 index 0000000000000000000000000000000000000000..3027c14cbd918c1c48cc10e3014f78d3e2f185f9 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_glue_data.py @@ -0,0 +1,141 @@ +''' Script for downloading all GLUE data. + +Note: for legal reasons, we are unable to host MRPC. +You can either use the version hosted by the SentEval team, which is already tokenized, +or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually. +For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example). +You should then rename and place specific files in a folder (see below for an example). + +mkdir MRPC +cabextract MSRParaphraseCorpus.msi -d MRPC +cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt +cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt +rm MRPC/_* +rm MSRParaphraseCorpus.msi + +1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now. +2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray! +''' + +import os +import sys +import shutil +import argparse +import tempfile +import urllib.request +import zipfile + +TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"] +TASK2PATH = {"CoLA":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4', + "SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8', + "MRPC":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc', + "QQP":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5', + "STS":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5', + "MNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce', + "SNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df', + "QNLI": 'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601', + "RTE":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb', + "WNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf', + "diagnostic":'https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D'} + +MRPC_TRAIN = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt' +MRPC_TEST = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt' + +def download_and_extract(task, data_dir): + print("Downloading and extracting %s..." % task) + data_file = os.path.join(tempfile.gettempdir(), "%s.zip" % task) + urllib.request.urlretrieve(TASK2PATH[task], data_file) + with zipfile.ZipFile(data_file) as zip_ref: + zip_ref.extractall(data_dir) + os.remove(data_file) + print("\tCompleted!") + +def format_mrpc(data_dir, path_to_data): + print("Processing MRPC...") + mrpc_dir = os.path.join(data_dir, "MRPC") + if not os.path.isdir(mrpc_dir): + os.mkdir(mrpc_dir) + if path_to_data: + mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt") + mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt") + else: + print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN) + mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt") + mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt") + urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file) + urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file) + assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file + assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file + urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv")) + + dev_ids = [] + with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh: + for row in ids_fh: + dev_ids.append(row.strip().split('\t')) + + with open(mrpc_train_file, encoding="utf8") as data_fh, \ + open(os.path.join(mrpc_dir, "train.tsv"), 'w', encoding="utf8") as train_fh, \ + open(os.path.join(mrpc_dir, "dev.tsv"), 'w', encoding="utf8") as dev_fh: + header = data_fh.readline() + train_fh.write(header) + dev_fh.write(header) + for row in data_fh: + label, id1, id2, s1, s2 = row.strip().split('\t') + if [id1, id2] in dev_ids: + dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) + else: + train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) + + with open(mrpc_test_file, encoding="utf8") as data_fh, \ + open(os.path.join(mrpc_dir, "test.tsv"), 'w', encoding="utf8") as test_fh: + header = data_fh.readline() + test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n") + for idx, row in enumerate(data_fh): + label, id1, id2, s1, s2 = row.strip().split('\t') + test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2)) + print("\tCompleted!") + +def download_diagnostic(data_dir): + print("Downloading and extracting diagnostic...") + if not os.path.isdir(os.path.join(data_dir, "diagnostic")): + os.mkdir(os.path.join(data_dir, "diagnostic")) + data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv") + urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file) + print("\tCompleted!") + return + +def get_tasks(task_names): + task_names = task_names.split(',') + if "all" in task_names: + tasks = TASKS + else: + tasks = [] + for task_name in task_names: + assert task_name in TASKS, "Task %s not found!" % task_name + tasks.append(task_name) + return tasks + +def main(arguments): + parser = argparse.ArgumentParser() + parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data') + parser.add_argument('--tasks', help='tasks to download data for as a comma separated string', + type=str, default='all') + parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt', + type=str, default='') + args = parser.parse_args(arguments) + + if not os.path.isdir(args.data_dir): + os.mkdir(args.data_dir) + tasks = get_tasks(args.tasks) + + for task in tasks: + if task == 'MRPC': + format_mrpc(args.data_dir, args.path_to_mrpc) + elif task == 'diagnostic': + download_diagnostic(args.data_dir) + else: + download_and_extract(task, args.data_dir) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) \ No newline at end of file diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_pretrained_model.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_pretrained_model.py new file mode 100644 index 0000000000000000000000000000000000000000..f0b8b7cddb9046d59d373eaf16e5133b73a7e878 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_pretrained_model.py @@ -0,0 +1,75 @@ +############################################################################### +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company +############################################################################### + +import os +from pathlib import Path +import sys +import socket +import urllib.request +import zipfile +import subprocess + + +def run_cmd_as_subprocess(cmd=str): + print(cmd) + sys.stdout.flush() + sys.stderr.flush() + with subprocess.Popen(cmd, shell=True, executable='/bin/bash') as proc: + proc.wait() + + +def download_pretrained_model_r(pretrained_url, pretrained_model, flatten_archive=False): + host_name = socket.gethostname() + this_dir = os.getcwd() + try: + os.chdir(Path(__file__).parent.parent) + if not os.path.isdir(pretrained_model): + _wget = False + if os.path.exists(pretrained_model + ".zip") == False: + _wget = True + else: + if os.path.getsize(pretrained_model + ".zip") == 0: + print(f"{host_name}: *** Broken file, needs download ...\n\n") + _wget = True + if _wget == True: + print(f"{host_name}: *** Downloading pre-trained model...\n\n") + inf = urllib.request.urlopen(pretrained_url + pretrained_model + ".zip") + with open(pretrained_model + ".zip", "wb") as outf: + outf.write(inf.read()) + + print(f"{host_name}: *** Extracting pre-trained model...\n\n") + with zipfile.ZipFile(pretrained_model + ".zip", 'r') as zip_ref: + if flatten_archive: + # large model is zipped with subdirectory, flatten archive tree structure + for member in zip_ref.infolist(): + # skip directories + if member.is_dir(): + continue + zip_ref.extract(member) + else: + zip_ref.extractall(pretrained_model) + + if _wget == True: + cmd = f"rm -f {pretrained_model}.zip" + run_cmd_as_subprocess(cmd) + else: + print(f"{host_name}: Reusing existing pre-trained model directory \'{pretrained_model}\'") + os.chdir(this_dir) + except Exception as exc: + os.chdir(this_dir) + raise Exception(f"{host_name}: Error in {__file__} download_pretrained_model()") from exc + +if __name__ == "__main__": + host_name = socket.gethostname() + print(f"{host_name}: In {sys.argv[0]}") + print(f"{host_name}: called with arguments: \"{sys.argv[1]} {sys.argv[2]} {sys.argv[3]}\"") + pretrained_url = str(sys.argv[1]) + pretrained_model = str(sys.argv[2]) + flatten_archive_str = str(sys.argv[3]) + if flatten_archive_str == "True": + flatten_archive = True + else: + flatten_archive = False + print(f"{host_name}: MULTI_HLS_IPS = {os.environ.get('MULTI_HLS_IPS')}") + download_pretrained_model_r(pretrained_url, pretrained_model, flatten_archive) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/optimization.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..7a6e6ca0b09fb77412dd066d2709b9f86079253e --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/optimization.py @@ -0,0 +1,458 @@ +# coding=utf-8 +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Functions and classes related to optimization (weight updates).""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import re +import tensorflow as tf +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops + +try: + import horovod.tensorflow as hvd +except ImportError: + hvd = None + +def horovod_enabled(): + return hvd is not None and hvd.is_initialized() + +def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, manual_fp16=False, use_fp16=False, num_accumulation_steps=1, + optimizer_type="adam", allreduce_post_accumulation=False, init_loss_scale=2**32, use_tpu=False): + """Creates an optimizer training op.""" + global_step = tf.compat.v1.train.get_or_create_global_step() + + # avoid step change in learning rate at end of warmup phase + if optimizer_type == "adam": + power = 1.0 + decayed_learning_rate_at_crossover_point = init_lr * ( + (1.0 - float(num_warmup_steps) / float(num_train_steps)) ** power) + else: + power = 0.5 + decayed_learning_rate_at_crossover_point = init_lr + + adjusted_init_lr = init_lr * (init_lr / decayed_learning_rate_at_crossover_point) + print('decayed_learning_rate_at_crossover_point = %e, adjusted_init_lr = %e' % + (decayed_learning_rate_at_crossover_point, adjusted_init_lr)) + + learning_rate = tf.constant(value=adjusted_init_lr, shape=[], dtype=tf.float32) + + # Implements linear decay of the learning rate. + learning_rate = tf.compat.v1.train.polynomial_decay( + learning_rate, + global_step, + num_train_steps, + end_learning_rate=0.0, + power=power, + cycle=False) + + # Implements linear warmup. I.e., if global_step < num_warmup_steps, the + # learning rate will be `global_step/num_warmup_steps * init_lr`. + if num_warmup_steps: + global_steps_int = tf.cast(global_step, tf.int32) + warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32) + + global_steps_float = tf.cast(global_steps_int, tf.float32) + warmup_steps_float = tf.cast(warmup_steps_int, tf.float32) + + warmup_percent_done = global_steps_float / warmup_steps_float + warmup_learning_rate = init_lr * warmup_percent_done + + is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32) + learning_rate = ( + (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) + + if optimizer_type == "lamb": + print("Initializing LAMB Optimizer") + optimizer = LAMBOptimizer( + learning_rate=learning_rate, + weight_decay_rate=0.01, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-6, + exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) + else: + print("Initializing ADAM Weight Decay Optimizer") + # It is recommended that you use this optimizer for fine tuning, since this + # is how the model was trained (note that the Adam m/v variables are NOT + # loaded from init_checkpoint.) + optimizer = AdamWeightDecayOptimizer( + learning_rate=learning_rate, + weight_decay_rate=0.01, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-6, + exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) + + if horovod_enabled() and (num_accumulation_steps == 1 or (not allreduce_post_accumulation)): + optimizer = hvd.DistributedOptimizer(optimizer, sparse_as_dense=True) + if use_fp16: + loss_scaler = tf.train.experimental.DynamicLossScale( + initial_loss_scale=init_loss_scale, increment_period=1000, multiplier=2.0) + optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer, loss_scaler) + loss_scale_value = tf.identity(loss_scaler(), name="loss_scale") + if manual_fp16: + assert False, "No support for ExponentialUpdateLossScaleManager and LossScaleOptimizer in TF2.0" + loss_scale_manager = tf.contrib.mixed_precision.ExponentialUpdateLossScaleManager(init_loss_scale=init_loss_scale, + incr_every_n_steps=1000, + decr_every_n_nan_or_inf=2, + decr_ratio=0.5) + optimizer = tf.contrib.mixed_precision.LossScaleOptimizer(optimizer, loss_scale_manager) + if use_tpu: + optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer) + tvars = tf.compat.v1.trainable_variables() + + if num_accumulation_steps > 1: + grads_and_vars = optimizer.compute_gradients(loss * 1.0 / num_accumulation_steps, tvars) + local_step = tf.compat.v1.get_variable(name="local_step", shape=[], dtype=tf.int32, trainable=False, + initializer=tf.compat.v1.zeros_initializer) + batch_finite = tf.compat.v1.get_variable(name="batch_finite", shape=[], dtype=tf.bool, trainable=False, + initializer=tf.compat.v1.ones_initializer) + accum_vars = [tf.compat.v1.get_variable( + name=tvar.name.split(":")[0] + "/accum", + shape=tvar.shape.as_list(), + dtype=tf.float32, + trainable=False, + initializer=tf.compat.v1.zeros_initializer()) for tvar in tf.compat.v1.trainable_variables()] + + reset_step = tf.cast(tf.math.equal(local_step % num_accumulation_steps, 0), dtype=tf.bool) + local_step = tf.cond(pred=reset_step, true_fn=lambda: local_step.assign( + tf.ones_like(local_step)), false_fn=lambda: local_step.assign_add(1)) + + grads_and_vars_and_accums = [(gv[0], gv[1], accum_vars[i]) + for i, gv in enumerate(grads_and_vars) if gv[0] is not None] + grads, tvars, accum_vars = list(zip(*grads_and_vars_and_accums)) + + all_are_finite = tf.reduce_all(input_tensor=[tf.reduce_all(input_tensor=tf.math.is_finite( + g)) for g in grads]) if manual_fp16 or use_fp16 else tf.constant(True, dtype=tf.bool) + batch_finite = tf.cond(pred=reset_step, + true_fn=lambda: batch_finite.assign(tf.math.logical_and( + tf.constant(True, dtype=tf.bool), all_are_finite)), + false_fn=lambda: batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite))) + + # This is how the model was pre-trained. + # ensure global norm is a finite number + # to prevent clip_by_global_norm from having a hizzy fit. + (clipped_grads, _) = tf.clip_by_global_norm( + grads, clip_norm=1.0, + use_norm=tf.cond( + pred=all_are_finite, + true_fn=lambda: tf.linalg.global_norm(grads), + false_fn=lambda: tf.constant(1.0))) + + accum_vars = tf.cond(pred=reset_step, + true_fn=lambda: [accum_vars[i].assign(grad) for i, grad in enumerate(clipped_grads)], + false_fn=lambda: [accum_vars[i].assign_add(grad) for i, grad in enumerate(clipped_grads)]) + + update_step = tf.identity(tf.cast(tf.math.equal(local_step % num_accumulation_steps, 0), + dtype=tf.bool), name="update_step") + + def allreduce_of_batch_finite_required(): + # In case of bf16 and fp32 batch finite is tf.constant(True, dtype=tf.bool) + return horovod_enabled() and manual_fp16 and use_fp16 + + # TODO: in future if we want to enable infinite batch iter skiping we will need to change this allreduce. + new_global_step = tf.cond(pred=tf.math.logical_and(update_step, + tf.cast(hvd.allreduce(tf.cast(batch_finite, tf.int32)), tf.bool) if allreduce_of_batch_finite_required() else batch_finite), + true_fn=lambda: global_step + 1, + false_fn=lambda: global_step) + new_global_step = tf.identity(new_global_step, name='step_update') + + def update(accum_vars): + with tf.control_dependencies([global_step.assign(new_global_step)]): + if allreduce_post_accumulation and horovod_enabled(): + accum_vars = [hvd.allreduce(tf.convert_to_tensor(value=accum_var)) if isinstance(accum_var, tf.IndexedSlices) + else hvd.allreduce(accum_var) for accum_var in accum_vars] + + return optimizer.apply_gradients(list(zip(accum_vars, tvars)), global_step=global_step) + + train_op = tf.cond(pred=update_step, + true_fn=lambda: update(accum_vars), false_fn=lambda: tf.no_op()) + else: + grads_and_vars = optimizer.compute_gradients(loss, tvars) + if horovod_enabled(): + grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None] + grads, tvars = list(zip(*grads_and_vars)) + else: + grads = tf.gradients(ys=loss, xs=tvars) + all_are_finite = tf.reduce_all( + input_tensor=[tf.reduce_all(input_tensor=tf.math.is_finite(g)) for g in grads]) if use_fp16 or manual_fp16 else tf.constant(True, dtype=tf.bool) + + # This is how the model was pre-trained. + # ensure global norm is a finite number + # to prevent clip_by_global_norm from having a hizzy fit. + (clipped_grads, _) = tf.clip_by_global_norm( + grads, clip_norm=1.0, + use_norm=tf.cond( + pred=all_are_finite, + true_fn=lambda: tf.linalg.global_norm(grads), + false_fn=lambda: tf.constant(1.0))) + + new_global_step = tf.cond(pred=all_are_finite, true_fn=lambda: global_step + 1, false_fn=lambda: global_step) + new_global_step = tf.identity(new_global_step, name='step_update') + + with tf.control_dependencies([global_step.assign(new_global_step)]): + train_op = optimizer.apply_gradients( + list(zip(clipped_grads, tvars)), global_step=global_step) + return train_op + + +class AdamWeightDecayOptimizer(tf.compat.v1.train.Optimizer): + """A basic Adam optimizer that includes "correct" L2 weight decay.""" + + def __init__(self, + learning_rate, + weight_decay_rate=0.0, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-6, + exclude_from_weight_decay=None, + name="AdamWeightDecayOptimizer"): + """Constructs a AdamWeightDecayOptimizer.""" + super(AdamWeightDecayOptimizer, self).__init__(False, name) + + self.learning_rate = tf.identity(learning_rate, name='learning_rate') + self.weight_decay_rate = weight_decay_rate + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + self.exclude_from_weight_decay = exclude_from_weight_decay + + def apply_gradients(self, grads_and_vars, global_step=None, name=None, + manual_fp16=False): + """See base class.""" + assignments = [] + for (grad, param) in grads_and_vars: + if grad is None or param is None: + continue + + param_name = self._get_variable_name(param.name) + has_shadow = manual_fp16 and param.dtype.base_dtype != tf.float32 + if has_shadow: + # create shadow fp32 weights for fp16 variable + param_fp32 = tf.compat.v1.get_variable( + name=param_name + "/shadow", + dtype=tf.float32, + trainable=False, + initializer=tf.cast(param.initialized_value(), tf.float32)) + else: + param_fp32 = param + + m = tf.compat.v1.get_variable( + name=param_name + "/adam_m", + shape=param.shape.as_list(), + dtype=tf.float32, + trainable=False, + initializer=tf.compat.v1.zeros_initializer()) + v = tf.compat.v1.get_variable( + name=param_name + "/adam_v", + shape=param.shape.as_list(), + dtype=tf.float32, + trainable=False, + initializer=tf.compat.v1.zeros_initializer()) + + # Standard Adam update. + next_m = ( + tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) + next_v = ( + tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, + tf.square(grad))) + + update = next_m * tf.math.rsqrt(next_v + self.epsilon * self.epsilon) + + # Just adding the square of the weights to the loss function is *not* + # the correct way of using L2 regularization/weight decay with Adam, + # since that will interact with the m and v parameters in strange ways. + # + # Instead we want to decay the weights in a manner that doesn't interact + # with the m/v parameters. This is equivalent to adding the square + # of the weights to the loss with plain (non-momentum) SGD. + if self._do_use_weight_decay(param_name): + update += self.weight_decay_rate * param_fp32 + + update_with_lr = self.learning_rate * update + + next_param = param_fp32 - update_with_lr + + if has_shadow: + # cast shadow fp32 weights to fp16 and assign to trainable variable + param.assign(tf.cast(next_param, param.dtype.base_dtype)) + assignments.extend( + [param_fp32.assign(next_param), + m.assign(next_m), + v.assign(next_v)]) + return tf.group(*assignments, name=name) + + def _do_use_weight_decay(self, param_name): + """Whether to use L2 weight decay for `param_name`.""" + if not self.weight_decay_rate: + return False + if self.exclude_from_weight_decay: + for r in self.exclude_from_weight_decay: + if re.search(r, param_name) is not None: + return False + return True + + def _get_variable_name(self, param_name): + """Get the variable name from the tensor name.""" + m = re.match("^(.*):\\d+$", param_name) + if m is not None: + param_name = m.group(1) + return param_name + + # This code originally was a WA for this issue: + # See: https://jira.habana-labs.com/browse/SW-19371 + # However, the root issue has been fixed and is no longer required. + # + # It turned out that this function needs to be uncommented to speed up the BERT finetuning training. + # See: https://jira.habana-labs.com/browse/SW-19126 + # + # At this moment, enabling SAO leads to an immediate crash: + # See: https://jira.habana-labs.com/browse/SW-19688 + # + def compute_gradients(self, loss, var_list=None, + gate_gradients=tf.compat.v1.train.Optimizer.GATE_OP, + aggregation_method=None, + colocate_gradients_with_ops=False, + grad_loss=None): + assert gate_gradients == tf.compat.v1.train.Optimizer.GATE_OP + assert aggregation_method is None + assert not colocate_gradients_with_ops + assert grad_loss is None + + grads = tf.gradients(ys=loss, xs=var_list) + grads_and_vars = list(zip(grads, var_list)) + return grads_and_vars + + +class LAMBOptimizer(tf.compat.v1.train.Optimizer): + """A LAMB optimizer that includes "correct" L2 weight decay.""" + + def __init__(self, + learning_rate, + weight_decay_rate=0.0, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-6, + exclude_from_weight_decay=None, + name="LAMBOptimizer"): + """Constructs a LAMBOptimizer.""" + super(LAMBOptimizer, self).__init__(False, name) + + self.learning_rate = tf.identity(learning_rate, name='learning_rate') + self.weight_decay_rate = weight_decay_rate + self.beta_1 = beta_1 + self.beta_2 = beta_2 + self.epsilon = epsilon + self.exclude_from_weight_decay = exclude_from_weight_decay + + def apply_gradients(self, grads_and_vars, global_step, name=None, + manual_fp16=False): + """See base class.""" + assignments = [] + steps = tf.cast(global_step, tf.float32) + for (grad, param) in grads_and_vars: + if grad is None or param is None: + continue + + param_name = self._get_variable_name(param.name) + has_shadow = manual_fp16 and param.dtype.base_dtype != tf.float32 + if has_shadow: + # create shadow fp32 weights for fp16 variable + param_fp32 = tf.compat.v1.get_variable( + name=param_name + "/shadow", + dtype=tf.float32, + trainable=False, + initializer=tf.cast(param.initialized_value(), tf.float32)) + else: + param_fp32 = param + + m = tf.compat.v1.get_variable( + name=param_name + "/adam_m", + shape=param.shape.as_list(), + dtype=tf.float32, + trainable=False, + initializer=tf.compat.v1.zeros_initializer()) + v = tf.compat.v1.get_variable( + name=param_name + "/adam_v", + shape=param.shape.as_list(), + dtype=tf.float32, + trainable=False, + initializer=tf.compat.v1.zeros_initializer()) + + # LAMB update + next_m = ( + tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) + next_v = ( + tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, + tf.square(grad))) + + beta1_correction = (1 - self.beta_1 ** steps) + beta2_correction = (1 - self.beta_2 ** steps) + + next_m_unbiased = next_m / beta1_correction + next_v_unbiased = next_v / beta2_correction + + update = next_m_unbiased / (tf.sqrt(next_v_unbiased) + self.epsilon) + + # Just adding the square of the weights to the loss function is *not* + # the correct way of using L2 regularization/weight decay with Adam, + # since that will interact with the m and v parameters in strange ways. + # + # Instead we want to decay the weights in a manner that doesn't interact + # with the m/v parameters. This is equivalent to adding the square + # of the weights to the loss with plain (non-momentum) SGD. + if self._do_use_weight_decay(param_name): + update += self.weight_decay_rate * param_fp32 + + w_norm = linalg_ops.norm(param, ord=2) + g_norm = linalg_ops.norm(update, ord=2) + ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where( + math_ops.greater(g_norm, 0), (w_norm / g_norm), 1.0), 1.0) + + update_with_lr = ratio * self.learning_rate * update + + next_param = param_fp32 - update_with_lr + + if has_shadow: + # cast shadow fp32 weights to fp16 and assign to trainable variable + param.assign(tf.cast(next_param, param.dtype.base_dtype)) + assignments.extend( + [param_fp32.assign(next_param), + m.assign(next_m), + v.assign(next_v)]) + return tf.group(*assignments, name=name) + + def _do_use_weight_decay(self, param_name): + """Whether to use L2 weight decay for `param_name`.""" + if not self.weight_decay_rate: + return False + if self.exclude_from_weight_decay: + for r in self.exclude_from_weight_decay: + if re.search(r, param_name) is not None: + return False + return True + + def _get_variable_name(self, param_name): + """Get the variable name from the tensor name.""" + m = re.match("^(.*):\\d+$", param_name) + if m is not None: + param_name = m.group(1) + return param_name diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/__init__.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/fused_layer_norm.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/fused_layer_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..b66b8c3513452390ca6b9792db373330b8fda656 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/fused_layer_norm.py @@ -0,0 +1,141 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import copy +import json +import math +import re +import six +import tensorflow as tf + +from tensorflow.python.framework import ops +from tensorflow.contrib.layers.python.layers import utils +from tensorflow.contrib.framework.python.ops import variables +from tensorflow.python.ops import init_ops +import numpy +from tensorflow.python.ops import array_ops +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import nn + +def fused_layer_norm(inputs, + center=True, + scale=True, + activation_fn=None, + reuse=None, + variables_collections=None, + outputs_collections=None, + trainable=True, + begin_norm_axis=1, + begin_params_axis=-1, + scope=None, + use_fused_batch_norm=False): + with tf.compat.v1.variable_scope( + scope, 'LayerNorm', [inputs], reuse=reuse) as sc: + inputs = ops.convert_to_tensor(inputs) + inputs_shape = inputs.shape + inputs_rank = inputs_shape.ndims + if inputs_rank is None: + raise ValueError('Inputs %s has undefined rank.' % inputs.name) + dtype = inputs.dtype.base_dtype + if begin_norm_axis < 0: + begin_norm_axis = inputs_rank + begin_norm_axis + if begin_params_axis >= inputs_rank or begin_norm_axis >= inputs_rank: + raise ValueError('begin_params_axis (%d) and begin_norm_axis (%d) ' + 'must be < rank(inputs) (%d)' % + (begin_params_axis, begin_norm_axis, inputs_rank)) + params_shape = inputs_shape[begin_params_axis:] + if not params_shape.is_fully_defined(): + raise ValueError( + 'Inputs %s: shape(inputs)[%s:] is not fully defined: %s' % + (inputs.name, begin_params_axis, inputs_shape)) + # Allocate parameters for the beta and gamma of the normalization. + beta, gamma = None, None + if center: + beta_collections = utils.get_variable_collections(variables_collections, + 'beta') + beta = variables.model_variable( + 'beta', + shape=params_shape, + dtype=dtype, + initializer=init_ops.zeros_initializer(), + collections=beta_collections, + trainable=trainable) + if scale: + gamma_collections = utils.get_variable_collections( + variables_collections, 'gamma') + gamma = variables.model_variable( + 'gamma', + shape=params_shape, + dtype=dtype, + initializer=init_ops.ones_initializer(), + collections=gamma_collections, + trainable=trainable) + if use_fused_batch_norm: + # get static TensorShape if fully defined, + # otherwise retrieve shape tensor + norm_shape = inputs.shape[begin_norm_axis:] + if norm_shape.is_fully_defined(): + bn_shape = [1, -1, 1, numpy.prod(norm_shape.as_list())] + else: + norm_shape = tf.shape(input=inputs)[begin_norm_axis:] + bn_shape = [1, -1, 1, tf.reduce_prod(input_tensor=norm_shape)] + if inputs.get_shape().is_fully_defined(): + outputs_shape = inputs.get_shape() + else: + outputs_shape = tf.shape(input=inputs) + inputs = array_ops.reshape(inputs, bn_shape) + if inputs.get_shape().is_fully_defined(): + # static inputs TensorShape fully defined after reshape. + ones = array_ops.ones(inputs.get_shape()[1], dtype=dtypes.float32) + zeros = array_ops.zeros(inputs.get_shape()[1], dtype=dtypes.float32) + else: + # static inputs TensorShape NOT fully defined after reshape. + # must use dynamic shape, which means these input tensors + # have to be created at runtime, which causes a slowdown. + scale_shape = tf.shape(input=inputs)[1] + ones = array_ops.ones(scale_shape, dtype=dtypes.float32) + zeros = array_ops.zeros(scale_shape, dtype=dtypes.float32) + outputs, mean, variance = nn.fused_batch_norm( + inputs, + ones, zeros, + epsilon=1e-4, + data_format="NCHW") + outputs = array_ops.reshape(outputs, outputs_shape) + if center and scale: + outputs = outputs * gamma + beta + elif center: + outputs = outputs + beta + elif scale: + outputs = outputs * gamma + else: + # Calculate the moments on the last axis (layer activations). + norm_axes = list(range(begin_norm_axis, inputs_rank)) + mean, variance = nn.moments(inputs, norm_axes, keep_dims=True) + # Compute layer normalization using the batch_normalization function. + variance_epsilon = 1e-4 + outputs = nn.batch_normalization( + inputs, + mean, + variance, + offset=beta, + scale=gamma, + variance_epsilon=variance_epsilon) + outputs.set_shape(inputs_shape) + if activation_fn is not None: + outputs = activation_fn(outputs) + return utils.collect_named_outputs(outputs_collections, sc.name, outputs) + diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/gpu_environment.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/gpu_environment.py new file mode 100644 index 0000000000000000000000000000000000000000..948c3fa44836b11c94b24554407b46f0e6a0ff5f --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/gpu_environment.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tensorflow as tf +import numpy as np + +def float32_variable_storage_getter(getter, name, shape=None, dtype=None, + initializer=None, regularizer=None, + trainable=True, + *args, **kwargs): + """Custom variable getter that forces trainable variables to be stored in + float32 precision and then casts them to the training precision. + """ + storage_dtype = tf.float32 if trainable else dtype + variable = getter(name, shape, dtype=storage_dtype, + initializer=initializer, regularizer=regularizer, + trainable=trainable, + *args, **kwargs) + if trainable and dtype != tf.float32: + variable = tf.cast(variable, dtype) + return variable + +def get_custom_getter(compute_type): + return float32_variable_storage_getter if compute_type == tf.float16 else None diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/utils.py b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ae7b1bd0dd90708c10ed138d46bbad6e79fedc5a --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/utils.py @@ -0,0 +1,64 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tensorflow as tf +import time + +# report latency and throughput during eval +class LogEvalRunHook(tf.estimator.SessionRunHook): + def __init__(self, global_batch_size, hvd_rank=-1): + self.global_batch_size = global_batch_size + self.hvd_rank = hvd_rank + self.count = 0 + self.time_list = [] + + def before_run(self, run_context): + self.t0 = time.time() + + def after_run(self, run_context, run_values): + elapsed_secs = time.time() - self.t0 + self.count += 1 + self.time_list.append(elapsed_secs) + +# report throughput during training +class LogTrainRunHook(tf.estimator.SessionRunHook): + def __init__(self, global_batch_size, hvd_rank=-1, save_checkpoints_steps=1000, num_steps_ignore_xla=100): + self.global_batch_size = global_batch_size + self.hvd_rank = hvd_rank + self.save_checkpoints_steps = save_checkpoints_steps + + self.total_time = 0.0 + self.count = 0 # Holds number of iterations, including skipped iterations for fp16 loss scaling + self.skipped = 0 + self.num_steps_ignore_xla = num_steps_ignore_xla + #initial steps while xla is still compilingneed to be ignored from throughput computation + + def after_create_session(self, session, coord): + self.init_global_step = session.run(tf.compat.v1.train.get_global_step()) + + def before_run(self, run_context): + self.t0 = time.time() + return tf.estimator.SessionRunArgs( + fetches=['step_update:0']) + + def after_run(self, run_context, run_values): + elapsed_secs = time.time() - self.t0 + self.global_step = run_values.results[0] + self.count += 1 + + # Removing first 100 step + first five steps after every checkpoint save + if (self.global_step - self.init_global_step) <= self.num_steps_ignore_xla or (self.global_step - self.init_global_step) % self.save_checkpoints_steps < 5: + print("Skipping time record for ", self.global_step, " due to checkpoint-saving/warmup overhead") + self.skipped += 1 + else: + self.total_time += elapsed_secs diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/LICENSE b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ddc3fe64e16b34f1e393b47cafae378e5fa11ef8 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/LICENSE @@ -0,0 +1,203 @@ +Copyright (c) 2021 Habana Labs, Ltd. an Intel Company + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/NOTICE b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/NOTICE new file mode 100644 index 0000000000000000000000000000000000000000..e59096a3a810ff6ef809f30e95e5e02a2ab7b1d9 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/NOTICE @@ -0,0 +1,5 @@ +Copyright (C) 2021 Habana Labs, Ltd. an Intel Company + +This repository includes software from: +* Tensor2Tensor, (https://github.com/tensorflow/tensor2tensor) licensed + under the Apache License, Version 2.0 diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/README.md b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c2db186b97d90559fd4348b59f79243f796d7ee2 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/README.md @@ -0,0 +1,338 @@ +# Transformer for TensorFlow + +This repository provides a script and recipe to train the Transformer model for Tensorflow on Intel® Gaudi® AI Accelerator. For further information on performance, refer to [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-training-models/#performance). + +For more information on training deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/). + +## Table of Contents + + * [Model-References](../../../README.md) + * [Model Overview](#model-overview) + * [Setup](#setup) + * [Training and Examples](#training-and-examples) + * [Evaluating BLEU Score](#evaluating-bleu-score) + * [Profile](#profile) + * [Supported Configuration](#supported-configuration) + * [Changelog](#changelog) + * [Known Issues](#known-issues) + +## Model Overview +The Transformer is a Neural Machine Translation (NMT) model which uses attention mechanism to boost training speed and overall accuracy. +The model was initially introduced in [Attention Is All You Need](https://arxiv.org/abs/1706.03762). +This implementation is based on [Tensor2Tensor](https://github.com/tensorflow/tensor2tensor) implementation (authors: Google Inc., Artit Wangperawong). + +There are three model variants available: tiny, base and big. + +### Model Architecture +The Transformer model uses standard NMT encoder-decoder architecture. Unlike other NMT models, Transformer model does not use recurrent connections and operates on fixed size context window. +The encoder stack is made up of N identical layers. Each layer is composed of the following sub-layers: +- Self-attention layer +- Feedforward network (which is 2 fully-connected layers) + +The decoder stack is also made up of N identical layers. Each layer is composed of the sub-layers: +- Self-attention layer +- Multi-headed attention layer combining encoder outputs with results from the previous self-attention layer. +- Feedforward network (2 fully-connected layers) + +The encoder uses self-attention to compute a representation of the input sequence. The decoder generates the output sequence one token at a time, taking the encoder output and previous decoder-outputted tokens as inputs. +The model also applies embeddings on the input and output tokens, and adds a constant positional encoding. The positional encoding adds information about the position of each token. + +The complete description of the Transformer architecture can be found in [Attention Is All You Need](https://arxiv.org/abs/1706.03762) paper. + +## Setup +Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/GAUDI_Installation_Guide.html) to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/TensorFlow/Model_Optimization_TensorFlow/Optimization_Training_Platform.html). +The guides will walk you through the process of setting up your system to run the model on Gaudi. + +### Clone Habana Model-References +In the docker container, clone this repository and switch to the branch that matches your SynapseAI version. You can run the [`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) utility to determine the SynapseAI version. +```bash +git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References /root/Model-References +``` + +**Note:** If Model-References repository path is not in the PYTHONPATH, make sure you update it: +```bash +export PYTHONPATH=$PYTHONPATH:/root/Model-References +``` +### Download and Generate the Dataset + +Go to the Transformer directory and generate the dataset. The following script will save the dataset to `/data/tensorflow/wmt32k_packed/train`: +```bash +cd Model-References/TensorFlow/nlp/transformer/ +$PYTHON datagen.py \ + --data_dir=/data/tensorflow/wmt32k_packed/train \ + --tmp_dir=/tmp/transformer_datagen \ + --problem=translate_ende_wmt32k_packed \ + --random_seed=429459 +``` + +### Install Model Requirements + +1. In the docker container, go to the Transformer directory: +```bash +cd /root/Model-References/TensorFlow/nlp/transformer +``` + +2. Install the required packages using pip: +```bash +$PYTHON -m pip install -r requirements.txt +``` + +## Training and Examples + +### Single card and Multi-Card Training Examples + +**NOTE:** All training examples for 1 HPU and 8 HPUs are valid both for first-gen Gaudi and Gaudi2. + +**Run training on 1 HPU:** + +```bash +$PYTHON trainer.py \ + --data_dir=/train \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_ \ + --hparams=batch_size= \ + --output_dir= \ + --local_eval_frequency= \ + --train_steps= \ + --schedule=train \ + --use_hpu=True \ + --use_bf16= +``` + +Run training on 1 HPU, batch size 4096, bfloat16, transformer_big, 300k steps with a checkpoint saved every 10k steps, last 10 checkpoints kept: + +```bash +$PYTHON trainer.py \ + --data_dir=/data/tensorflow/wmt32k_packed/train/ \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_big \ + --hparams=batch_size=4096 \ + --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \ + --local_eval_frequency=10000 \ + --keep_checkpoint_max=10 \ + --train_steps=300000 \ + --schedule=train \ + --use_hpu=True \ + --use_bf16=True +``` + +For Gaudi2, training batch size can be increased for better performance: +```bash +$PYTHON trainer.py \ + --data_dir=/data/tensorflow/wmt32k_packed/train/ \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_big \ + --hparams=batch_size=16384,learning_rate_constant=5.0,learning_rate_warmup_steps=5000 \ + --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs16384 \ + --local_eval_frequency=2500 \ + --keep_checkpoint_max=10 \ + --train_steps=75000 \ + --schedule=train \ + --use_hpu=True \ + --use_bf16=True +``` + +**Run training on 8 HPUs:** + +**NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/TensorFlow/Tensorflow_Scaling_Guide/Horovod_Scaling/index.html#mpirun-configuration). + +Run training on 8 HPUs, global batch size 8 * 4096, bfloat16, transformer_big, 300k steps with a checkpoint saved every 10k steps, last 10 checkpoints kept, learning rate constant 2.5: + +```bash +mpirun \ + --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \ + --tag-output --merge-stderr-to-stdout \ + $PYTHON trainer.py \ + --data_dir=/data/tensorflow/wmt32k_packed/train/ \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_big \ + --hparams=batch_size=4096,learning_rate_constant=2.5 \ + --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \ + --local_eval_frequency=10000 \ + --keep_checkpoint_max=10 \ + --train_steps=300000 \ + --schedule=train \ + --use_horovod=True \ + --use_hpu=True \ + --use_bf16=True +``` + +For Gaudi2, training batch size can be increased for better performance: + +```bash +mpirun \ + --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \ + --tag-output --merge-stderr-to-stdout \ + $PYTHON trainer.py \ + --data_dir=/data/tensorflow/wmt32k_packed/train/ \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_big \ + --hparams=batch_size=16384,learning_rate_constant=5.0,learning_rate_warmup_steps=5000 \ + --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs16384 \ + --local_eval_frequency=2500 \ + --keep_checkpoint_max=10 \ + --train_steps=75000 \ + --schedule=train \ + --use_horovod=True \ + --use_hpu=True \ + --use_bf16=True +``` + +### Multi-Server Training and Examples +To run training on multiple servers, make sure to set the `MULTI_HLS_IPS` environment +variable with the IPs of the used servers. + +**NOTE:** Multi-server training is supported only on first-gen Gaudi. + +**Run training on 16 HPUs:** +```bash +export MULTI_HLS_IPS=192.10.100.174,10.10.100.101 +mpirun \ + --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \ + --tag-output --merge-stderr-to-stdout \ + $PYTHON trainer.py \ + --data_dir=/data/tensorflow/wmt32k_packed/train/ \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_big \ + --hparams=batch_size=4096,learning_rate_constant=3.0 \ + --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \ + --local_eval_frequency=50000 \ + --train_steps=150000 \ + --schedule=train \ + --use_horovod=True \ + --use_hpu=True \ + --use_bf16=True +``` + +**Run training on 32 HPUs:** + + **NOTE:** It is recommended to use `learning_rate_constant` 3.5 and `train_steps` 75000. + +```bash +export MULTI_HLS_IPS=192.10.100.174,10.10.100.101,10.10.100.102,10.10.100.103 +mpirun \ + --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \ + --tag-output --merge-stderr-to-stdout \ + $PYTHON trainer.py \ + --data_dir=/data/tensorflow/wmt32k_packed/train/ \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_big \ + --hparams=batch_size=4096,learning_rate_constant=3.5 \ + --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \ + --local_eval_frequency=50000 \ + --train_steps=75000 \ + --schedule=train \ + --use_horovod=True \ + --use_hpu=True \ + --use_bf16=True +``` + +## Evaluating BLEU Score +After training the model, you can evaluate the achieved BLEU score: +1. Download and tokenize the validation file: +```bash +sacrebleu -t wmt14 -l en-de --echo src > wmt14.src +cat wmt14.src | sacremoses tokenize -l en > wmt14.src.tok +``` + +2. Compute BLEU score of a single checkpoint: +```bash +$PYTHON decoder.py \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_big \ + --data_dir=/train \ + --output_dir= \ + --checkpoint_path= \ + --use_hpu=True \ + --decode_from_file=./wmt14.src.tok \ + --decode_to_file=./wmt14.tgt.tok \ + --decode_hparams=log_results=False +cat wmt14.tgt.tok | sacremoses detokenize -l de | sacrebleu -t wmt14 -l en-de +``` + +3. Optional: To split BLEU calculation to multiple cards, run `decoder.py` through `mpirun`. For example: +```bash +mpirun \ + --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \ + --tag-output --merge-stderr-to-stdout \ + $PYTHON decoder.py \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_big \ + --data_dir=/train \ + --output_dir= \ + --checkpoint_path= \ + --decode_from_file=./wmt14.src.tok \ + --decode_to_file=./wmt14.tgt.tok \ + --use_hpu=True \ + --use_horovod=True \ + --decode_hparams=log_results=False +cat wmt14.tgt.tok | sacremoses detokenize -l de | sacrebleu -t wmt14 -l en-de +``` +**NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/TensorFlow/Tensorflow_Scaling_Guide/Horovod_Scaling/index.html#mpirun-configuration). + +## Profile +To run with profiling enabled, pass `--profile_steps` flag. It should be a comma separated pair of numbers - on which step to start and end profiling. + +Profiler steps are counted individually for each run. Thus, if you run training for 100 steps, with `--profile_steps 99,100`, profiling will be always enabled for the last two steps, no matter the `global_step_count`. + +**Run training on 1 HPU with profiler:** + +```bash + $PYTHON trainer.py \ + --data_dir=/data/tensorflow/wmt32k_packed/train/ \ + --problem=translate_ende_wmt32k_packed \ + --model=transformer \ + --hparams_set=transformer_big \ + --hparams=batch_size=4096 \ + --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \ + --local_eval_frequency=10000 \ + --train_steps=100 \ + --schedule=train \ + --use_hpu=True \ +--profile_steps 50,53 +``` +The above example will produce profile trace for 4 steps (50,51,52,53). + +## Supported Configuration +| Validated on | SynapseAI Version | TensorFlow Version(s) | Mode | +|:------:|:-----------------:|:-----:|:----------:| +| Gaudi | 1.14.0 | 2.15.0 | Training | +| Gaudi2 | 1.14.0 | 2.15.0 | Training | + +## Changelog +### 1.6.0 +* Model enabled on Gaudi2, with the same config as first-gen Gaudi. +* Added profiling support. +* Enabled experimental variable clustering to improve performance. +* Removed advanced parameters section from README. + +### 1.4.0 +* Replaced references to custom demo script by community entry points in README. +* Added support to import horovod-fork package directly instead of using Model-References' TensorFlow.common.horovod_helpers; wrapped horovod import with a try-catch block so that the user is not required to install this library when the model is being run on a single card. +* Updated requirements.txt. +* Changed the default value of the log_step_count_steps flag. + +### 1.3.0 +* Enabled multi-HPU BLEU calculation. +* Updated requirements.txt. + +### 1.2.0 +* Added support for recipe cache, see `TF_RECIPE_CACHE_PATH` in HabanaAI documentation for details. +* Enabled multi-server training. + +### Training Script Modifications +* Support for other models than Transformer was removed. +* Added support for Horovod together with some adjustments in the topology script to allow simplifying the computational graph. + +## Known Issues + +Only FP32 precision is supported when calculating BLEU on HPU. diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/bf16_config/transformer.json b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/bf16_config/transformer.json new file mode 100644 index 0000000000000000000000000000000000000000..4efe336513d47756a42895380015c343b216d200 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/bf16_config/transformer.json @@ -0,0 +1,153 @@ +{ + "allowlist": [ + "_ScopedAllocatorSplit", + "_ScopedAllocatorConcat", + "_ScopedAllocator", + "BatchMatMul", + "BatchMatMulV2", + "BiasAdd", + "CollectiveReduceV2", + "CollectiveReduceV3", + "Conv2D", + "Conv2DBackpropFilter", + "Conv2DBackpropInput", + "Cumprod", + "Cumsum", + "EuclideanNorm", + "Exp", + "FloorDiv", + "FusedBatchNormV2", + "FusedBatchNormV3", + "FusedBatchNormGradV2", + "FusedBatchNormGradV3", + "GatherNd", + "GatherV2", + "Greater", + "GreaterEqual", + "HabanaConv2DWithPadding", + "HabanaConv2DWithPaddingBackpropFilter", + "HabanaConv2DWithPaddingBackpropInput", + "HabanaDropout", + "HabanaDropoutGrad", + "HabanaDropoutStateful", + "HabanaFusedBatchNormV3", + "HabanaGelu", + "HabanaGeluGrad", + "HabanaLayerNorm", + "HabanaLayerNormGrad", + "HabanaSoftmaxGrad", + "HabanaLogSoftmaxGrad", + "HorovodAllgather", + "HorovodAllreduce", + "HpuCollectiveReduce", + "Less", + "LessEqual", + "Log", + "Log1p", + "LogSoftmax", + "MatMul", + "MaxPool", + "MaxPoolV2", + "MaxPoolGrad", + "MaxPoolGradV2", + "Mul", + "PyramidRoiAlign", + "PyramidRoiAlignGradImages", + "Relu", + "Relu6", + "ReluGrad", + "Relu6Grad", + "Round", + "Rsqrt", + "RsqrtGrad", + "Sigmoid", + "SigmoidGrad", + "Softmax", + "SparseSoftmaxCrossEntropyWithLogits", + "Square", + "SquaredDifference", + "Sqrt", + "Tanh", + "TanhGrad", + "TensorScatterUpdate" + ], + "conditional_list": [ + "Abs", + "Add", + "AddN", + "AddV2", + "ArgMax", + "ArgMin", + "BiasAddGrad", + "CollectiveReduceV2", + "CollectiveReduceV3", + "DynamicStitch", + "Equal", + "ExpandDims", + "Fill", + "HabanaClampFwd", + "HabanaClampBwd", + "HabanaMaxGrad", + "HabanaMinGrad", + "HabanaSparseSegmentSum", + "HabanaRandomUniformWithMaxval", + "HabanaRandomUniformWithScale", + "HabanaSize", + "HorovodAllgather", + "HorovodAllreduce", + "HpuCollectiveReduce", + "HpuCollectiveGather", + "HpuCollectiveGatherV2", + "Identity", + "IsFinite", + "MatrixBandPart", + "Neg", + "NotEqual", + "Pack", + "Pad", + "PadV2", + "RandomStandardNormal", + "RandomUniform", + "Rank", + "Reshape", + "ResizeNearestNeighbor", + "ResizeNearestNeighborGrad", + "Select", + "SelectV2", + "Shape", + "ShapeN", + "Sign", + "Size", + "Slice", + "SparseSegmentSumWithNumSegments", + "SplitV", + "Split", + "Snapshot", + "Squeeze", + "StridedSlice", + "StridedSliceGrad", + "Sub", + "Tile", + "Transpose", + "Unpack", + "ZerosLike" + ], + "strict_conditional_list": [ + "Add", + "AddN", + "BiasAddGrad", + "Sub" + ], + "non_convertible_exceptions": [ + [ + ".*KEEP_FP32_PRECISION.*", + "" + ] + ], + "convertible_exceptions": [ + [ + ".*FORCE_BF16_PRECISION.*", + "" + ] + ] +} \ No newline at end of file diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/build_vocab.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/build_vocab.py new file mode 100644 index 0000000000000000000000000000000000000000..bae1b89b4db54f7c1d7ee832b370d9526dc04400 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/build_vocab.py @@ -0,0 +1,77 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports + +r"""Build vocab for a subclass of Text2TextProblem. + +build_vocab \ + --problem=program_search_algolisp \ + --data_dir=~/t2t_data \ + --tmp_dir=~/t2t_data/tmp +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from TensorFlow.nlp.transformer.utils import problems as problems_lib # pylint: disable=unused-import +from TensorFlow.nlp.transformer.data_generators import text_problems +from TensorFlow.nlp.transformer.utils import registry +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("data_dir", "/tmp/t2t/data_dir", + "Directory to place the generated vocabulary file in.") + +flags.DEFINE_string("tmp_dir", "/tmp/t2t/tmp_dir", + "Temporary storage directory.") + +flags.DEFINE_string("problem", None, + "Problem to generate the vocabulary file for.") + +flags.mark_flag_as_required("problem") + + +def main(_): + problem = registry.problem(FLAGS.problem) + + # We make the assumption that the problem is a subclass of Text2TextProblem. + assert isinstance(problem, text_problems.Text2TextProblem) + + data_dir = os.path.expanduser(FLAGS.data_dir) + tmp_dir = os.path.expanduser(FLAGS.tmp_dir) + + tf.gfile.MakeDirs(data_dir) + tf.gfile.MakeDirs(tmp_dir) + + tf.logging.info("Saving vocabulary to data_dir: %s" % data_dir) + + problem.get_or_create_vocab(data_dir, tmp_dir) + + tf.logging.info("Saved vocabulary file: " + + os.path.join(data_dir, problem.vocab_filename)) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/compute_bleu.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/compute_bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..5dd56536b0be131ee4bc2c68d3fd937ce2a6ba11 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/compute_bleu.py @@ -0,0 +1,42 @@ +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +import subprocess +from argparse import ArgumentParser +from TensorFlow.common.tb_utils import TBSummary + + +parser = ArgumentParser() +parser.add_argument('--decoded_file', '-df', type=str, default='wmt14.tgt.tok', + help='Decoded file produced by t2t-decode command.') +parser.add_argument('--log_dir', '-ld', type=str, default=None, + help='Where to store TensorBoard summary file, ' + 'if None summary will not be saved.') +args = parser.parse_args() + +def get_sacremoses_version(): + ver_line = subprocess.run(['sacremoses', '--version'], stdout=subprocess.PIPE).stdout.decode() + return tuple(map(int, ver_line.split()[-1].split('.'))) + +def get_sacremoses_cmd(version): + if version >= (0, 0, 42): + return ['sacremoses', '-l', 'de', 'detokenize'] + else: + return ['sacremoses', 'detokenize', '-l', 'de'] + +def main(): + detok = subprocess.run(get_sacremoses_cmd(get_sacremoses_version()), + stdin=open(args.decoded_file, 'r'), + stdout=subprocess.PIPE) + bleu = subprocess.run(['sacrebleu', '-t', 'wmt14', '-l', 'en-de', '-b'], + input=detok.stdout, stdout=subprocess.PIPE) + score = bleu.stdout.decode() + print('BLEU:', score) + + if args.log_dir is not None: + with TBSummary(args.log_dir) as tb: + tb.add_scalar('accuracy', float(score), 0) + + +if __name__ == '__main__': + main() diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/__init__.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b0a2368a3e35eb4db097267915c30a0dc569953 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/all_problems.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/all_problems.py new file mode 100644 index 0000000000000000000000000000000000000000..7f3bb278042f40dcbf40cbc51680992ebe492897 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/all_problems.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Imports for problem modules.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import importlib +import six +from six.moves import range # pylint: disable=redefined-builtin + +MODULES = [ + "TensorFlow.nlp.transformer.data_generators.translate_encs_cubbitt", + "TensorFlow.nlp.transformer.data_generators.translate_encs", + "TensorFlow.nlp.transformer.data_generators.translate_ende", + "TensorFlow.nlp.transformer.data_generators.translate_enes", + "TensorFlow.nlp.transformer.data_generators.translate_enet", + "TensorFlow.nlp.transformer.data_generators.translate_enfr", + "TensorFlow.nlp.transformer.data_generators.translate_enid", + "TensorFlow.nlp.transformer.data_generators.translate_enmk", + "TensorFlow.nlp.transformer.data_generators.translate_envi", + "TensorFlow.nlp.transformer.data_generators.translate_enzh", +] +ALL_MODULES = list(MODULES) + + + +def _is_import_err_msg(err_str, module): + parts = module.split(".") + suffixes = [".".join(parts[i:]) for i in range(len(parts))] + prefixes = [".".join(parts[:i]) for i in range(len(parts))] + return err_str in (["No module named %s" % suffix for suffix in suffixes] + + ["No module named '%s'" % suffix for suffix in suffixes] + + ["No module named %s" % prefix for prefix in prefixes] + + ["No module named '%s'" % prefix for prefix in prefixes]) + + +def _handle_errors(errors): + """Log out and possibly reraise errors during import.""" + if not errors: + return + log_all = True # pylint: disable=unused-variable + err_msg = "T2T: skipped importing {num_missing} data_generators modules." + print(err_msg.format(num_missing=len(errors))) + for module, err in errors: + err_str = str(err) + if log_all: + print("Did not import module: %s; Cause: %s" % (module, err_str)) + if not _is_import_err_msg(err_str, module): + print("From module %s" % module) + raise err + + +def import_modules(modules): + errors = [] + for module in modules: + try: + importlib.import_module(module) + except ImportError as error: + errors.append((module, error)) + _handle_errors(errors) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/cleaner_en_xx.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/cleaner_en_xx.py new file mode 100644 index 0000000000000000000000000000000000000000..5baa7bb6020c6781ab99dac0ad13ef1e357c1ade --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/cleaner_en_xx.py @@ -0,0 +1,176 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# encoding=UTF-8 +"""An unsophisticated data cleaner for en-.. sentence translation pairs. + +This pattern-based English-... cleaner aims fairly aggressively for clean +sentence-like pairs. It discards pairs if the English member has signs of +non-sentence noise or origin, e.g., lacks expected punctuation or has suspicious +character sequences. It also simplistically detects and corrects some missing +sentence breaks. It makes minimal assumptions about the other language, mainly +that its sentences can end in one of '.!?' and that its sentences can start +with an ASCII capital letter. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + + +import itertools +import re + +from TensorFlow.nlp.transformer.data_generators import text_encoder + +import tensorflow.compat.v1 as tf + + +_RE_GOOD_S_START = re.compile(r'^["“”]?[A-Z]') +_RE_GOOD_S_END = re.compile(r'\w[.?!]["”]?$', re.UNICODE) + +_RE_LABEL_COLON = re.compile(r'^\w+\.?( \w+)?: ', re.UNICODE) +_RE_DIGIT_SPACE_DIGIT = re.compile(r'\d +\d', re.UNICODE) +_RE_ALL_CAP_WORDS = re.compile(r'^[A-Z]\S*(\s+[A-Z]\S+)+\s*$') + +_RE_DQ_ONE = re.compile(r'^[^"“”]*["“”][^"“”]*$') +_RE_DQ_INITIAL = re.compile(r'^["“”]([^"“”]+)$') +_RE_DQ_FINAL = re.compile(r'^[^"“”]+["“”]$') +_RE_DQ_LINE = re.compile(r'^["“”].*["“”]$') + +_RE_DQ_MANY = re.compile(r'(["“”].*){3,}') +_RE_SQ_MANY = re.compile(r'''(['‘’][^st].*){3,}''') +_RE_CHARS_QQ = re.compile(r'''["“”'‘’]\s*["“”'‘’]''') +_RE_SPACE_PUNCT_SPACE = re.compile(r'''\s["“”'‘’,:;]\s''') + +_RE_COPYRIGHT = re.compile(r'©|^Copyright|^\(C\)') +_RE_UNMATCHED_PAREN_LEFT = re.compile(r'[(][^)]*$') +_RE_UNMATCHED_PAREN_RIGHT = re.compile(r'^[^(]*[)]') +_RE_TAGLINE_CITY = re.compile(r'^[A-Z]{2,}(\s+[A-Z]+)*\s+-') +_RE_CHARS_UPPER_UNDERSCORE = re.compile(r'^[A-Z]+[a-z]*_') + + +def paracrawl_v3_pairs(paracrawl_file): + """Generates raw (English, other) pairs from a ParaCrawl V3.0 data file. + + Args: + paracrawl_file: A ParaCrawl V3.0 en-.. data file. + Yields: + Pairs of (sentence_en, sentence_xx), as Unicode strings. + Raises: + StopIteration: If the file ends while this method is in the middle of + creating a translation pair. + """ + raw_sentences = _raw_sentences(paracrawl_file) + for s_en in raw_sentences: + try: + s_xx = next(raw_sentences) + if s_en and s_xx: # Prevent empty string examples. + yield s_en, s_xx + except StopIteration: + tf.logging.error( + 'Unmatched final sentence while reading in sentence pairs: [%s]', + s_en) + + +def _raw_sentences(paracrawl_file): + """Generates Unicode strings, one for each in a ParaCrawl data file. + + Also decodes some of the most common HTML entities found in ParaCrawl data. + + Args: + paracrawl_file: A ParaCrawl V3.0 en-.. data file. + Yields: + One Unicode string for each element in the ParaCrawl data file. + """ + for line_utf8 in paracrawl_file: + line_uni = line_utf8.decode('UTF-8') + text_match = re.match(r' +(.*)$', line_uni) + if text_match: + txt = text_match.group(1) + txt = re.sub(r'&', r'&', txt) + txt = re.sub(r'& ?amp;', r'&', txt) + txt = re.sub(r'& ?apos;', r"'", txt) + txt = re.sub(r'& ?quot;', r'"', txt) + txt = re.sub(r'& ?lt;', r'<', txt) + txt = re.sub(r'& ?gt;', r'>', txt) + yield txt + + +def clean_en_xx_pairs(en_xx_pairs): + """Generates a cleaned-up stream of (English, other) translation pairs. + + Cleaning includes both filtering and simplistic sentence splitting, with + minimal assumptions on the non-English pair member: (1) All filtering is + done based on the English member of the pair, and (2) sentence splitting + assumes only that sentences can end with one of '.!?' and begin with an + ASCII uppercase letter. Input pairs that would get split into different + numbers of sentences (e.g., three English sentences vs. two German ones) are + discarded. + + Args: + en_xx_pairs: A stream (iterable) of Unicode string pairs. Each item in the + stream should be a (sentence_en, sentence_xx) pair. + Yields: + Cleaned-up (sentence_en, sentence_xx) pairs. + """ + for s1, s2 in en_xx_pairs: + if _regex_filter(s1): + continue + s1_list, s2_list = _split_sentences(s1, s2) + if len(s1_list) != len(s2_list): + continue # discard this pair + elif len(s1_list) == 1: + yield s1, s2 + else: + for s1_subsentence, s2_subsentence in itertools.izip(s1_list, s2_list): + if _regex_filter(s1_subsentence): + continue + yield s1_subsentence, s2_subsentence + + +def _regex_filter(sentence): + return (not _is_match(sentence, _RE_GOOD_S_START) + or not _is_match(sentence, _RE_GOOD_S_END) + or _is_match(sentence, _RE_LABEL_COLON) + or _is_match(sentence, _RE_DIGIT_SPACE_DIGIT) + or _is_match(sentence, _RE_DQ_ONE) + or _is_match(sentence, _RE_DQ_INITIAL) + or _is_match(sentence, _RE_DQ_FINAL) + or _is_match(sentence, _RE_DQ_LINE) + or _is_match(sentence, _RE_DQ_MANY) + or _is_match(sentence, _RE_SQ_MANY) + or _is_match(sentence, _RE_CHARS_QQ) + or _is_match(sentence, _RE_SPACE_PUNCT_SPACE) + or _is_match(sentence, _RE_COPYRIGHT) + or _is_match(sentence, _RE_UNMATCHED_PAREN_LEFT) + or _is_match(sentence, _RE_UNMATCHED_PAREN_RIGHT) + or _is_match(sentence, _RE_TAGLINE_CITY) + or _is_match(sentence, _RE_CHARS_UPPER_UNDERSCORE)) + + +def _is_match(sentence, regex): + return regex.search(sentence) + + +def _split_sentences(s1, s2): + s1 = text_encoder.native_to_unicode(s1) + s2 = text_encoder.native_to_unicode(s2) + s1 = re.sub(r'(\w[A-Z]|[0-9a-z])([.!?]) ([A-Z])', r'\1\2__|__\3', s1) + s2 = re.sub(r'([^0-9][.!?]) ([A-Z])', r'\1__|__\2', s2) + s1_subsentences = s1.split('__|__') + s2_subsentences = s2.split('__|__') + return s1_subsentences, s2_subsentences diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/generator_utils.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/generator_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..14e0c851281c472cdaa1dae6cd5b5514c9530595 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/generator_utils.py @@ -0,0 +1,1259 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for data generators.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import gzip +import math +import multiprocessing +import os +import random +import stat +import tarfile +import tempfile +import numpy as np +import requests +import six +from six.moves import range # pylint: disable=redefined-builtin +# Imports urllib on Python2, urllib.request on Python3 +import six.moves.urllib_request as urllib + +from TensorFlow.nlp.transformer.data_generators import text_encoder + +import tensorflow.compat.v1 as tf + +UNSHUFFLED_SUFFIX = "-unshuffled" + +flags = tf.flags +FLAGS = flags.FLAGS + +def to_example(dictionary): + """Helper: build tf.Example from (string -> int/float/str list) dictionary.""" + features = {} + for (k, v) in six.iteritems(dictionary): + if not v: + raise ValueError("Empty generated field: %s" % str((k, v))) + # Subtly in PY2 vs PY3, map is not scriptable in py3. As a result, + # map objects will fail with TypeError, unless converted to a list. + if six.PY3 and isinstance(v, map): + v = list(v) + if (isinstance(v[0], six.integer_types) or + np.issubdtype(type(v[0]), np.integer)): + features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v)) + elif isinstance(v[0], float): + features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v)) + elif isinstance(v[0], six.string_types): + if not six.PY2: # Convert in python 3. + v = [bytes(x, "utf-8") for x in v] + features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) + elif isinstance(v[0], bytes): + features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) + else: + raise ValueError("Value for %s is not a recognized type; v: %s type: %s" % + (k, str(v[0]), str(type(v[0])))) + return tf.train.Example(features=tf.train.Features(feature=features)) + + +def generate_files_distributed(generator, + output_name, + output_dir, + num_shards=1, + max_cases=None, + task_id=0): + """generate_files but with a single writer writing to shard task_id.""" + assert task_id < num_shards + output_filename = sharded_name(output_name, task_id, num_shards) + output_file = os.path.join(output_dir, output_filename) + tf.logging.info("Writing to file %s", output_file) + writer = tf.python_io.TFRecordWriter(output_file) + + counter = 0 + for case in generator: + if counter % 100000 == 0: + tf.logging.info("Generating case %d for %s." % (counter, output_name)) + counter += 1 + if max_cases and counter > max_cases: + break + example = to_example(case) + writer.write(example.SerializeToString()) + + writer.close() + return output_file + + +def _data_filenames(output_name, output_dir, num_shards): + return [ + os.path.join(output_dir, fname) + for fname in shard_filepath(output_name, num_shards) + ] + + +def train_data_filenames(problem, output_dir, num_shards): + return _data_filenames(problem + "-train", output_dir, num_shards) + + +def dev_data_filenames(problem, output_dir, num_shards): + return _data_filenames(problem + "-dev", output_dir, num_shards) + + +def test_data_filenames(problem, output_dir, num_shards): + return _data_filenames(problem + "-test", output_dir, num_shards) + + +def combined_data_filenames(problem, output_dir, num_training_shards): + return (train_data_filenames(problem, output_dir, num_training_shards) + + dev_data_filenames(problem, output_dir, 1) + test_data_filenames( + problem, output_dir, 1)) + + +def sharded_name(base_name, shard, total_shards): + return "%s-%.5d-of-%.5d" % (base_name, shard, total_shards) + + +def shard_filepath(fname, num_shards): + return [ + sharded_name(fname, shard, num_shards) for shard in range(num_shards) + ] + + +def outputs_exist(filenames): + for out_fname in filenames: + out_fname = out_fname.replace(UNSHUFFLED_SUFFIX, "") + if tf.gfile.Exists(out_fname): + return out_fname + + +def generate_files(generator, output_filenames, + max_cases=None, cycle_every_n=1): + """Generate cases from a generator and save as TFRecord files. + + Generated cases are transformed to tf.Example protos and saved as TFRecords + in sharded files named output_dir/output_name-00..N-of-00..M=num_shards. + + Args: + generator: a generator yielding (string -> int/float/str list) dictionaries. + output_filenames: List of output file paths. + max_cases: maximum number of cases to get from the generator; + if None (default), we use the generator until StopIteration is raised. + cycle_every_n: how many cases from the generator to take before + switching to the next shard; by default set to 1, switch every case. + """ + if outputs_exist(output_filenames): + tf.logging.info("Skipping generator because outputs files exists at {}" + .format(output_filenames)) + return + tmp_filenames = [fname + ".incomplete" for fname in output_filenames] + num_shards = len(output_filenames) + # Check if is training or eval, ref: train_data_filenames(). + if num_shards > 0: + if "-train" in output_filenames[0]: + tag = "train" + elif "-dev" in output_filenames[0]: + tag = "eval" + else: + tag = "other" + + writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames] + counter, shard = 0, 0 + for case in generator: + if case is None: + continue + if counter % 100000 == 0: + tf.logging.info("Generating case %d." % counter) + counter += 1 + if max_cases and counter > max_cases: + break + example = to_example(case) + writers[shard].write(example.SerializeToString()) + if counter % cycle_every_n == 0: + shard = (shard + 1) % num_shards + + for writer in writers: + writer.close() + + for tmp_name, final_name in zip(tmp_filenames, output_filenames): + tf.gfile.Rename(tmp_name, final_name) + + tf.logging.info("Generated %s Examples", counter) + + +def download_report_hook(count, block_size, total_size): + """Report hook for download progress. + + Args: + count: current block number + block_size: block size + total_size: total size + """ + percent = int(count * block_size * 100 / total_size) + print("\r%d%%" % percent + " completed", end="\r") + + +def maybe_download(directory, filename, uri): + """Download filename from uri unless it's already in directory. + + Copies a remote file to local if that local file does not already exist. If + the local file pre-exists this function call, it does not check that the local + file is a copy of the remote. + + Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a + URL. + + Args: + directory: path to the directory that will be used. + filename: name of the file to download to (do nothing if it already exists). + uri: URI to copy (or download) from. + + Returns: + The path to the downloaded file. + """ + tf.gfile.MakeDirs(directory) + filepath = os.path.join(directory, filename) + if tf.gfile.Exists(filepath): + tf.logging.info("Not downloading, file already found: %s" % filepath) + return filepath + + tf.logging.info("Downloading %s to %s" % (uri, filepath)) + try: + tf.gfile.Copy(uri, filepath) + except tf.errors.UnimplementedError: + if uri.startswith("http"): + inprogress_filepath = filepath + ".incomplete" + inprogress_filepath, _ = urllib.urlretrieve( + uri, inprogress_filepath, reporthook=download_report_hook) + # Print newline to clear the carriage return from the download progress + print() + tf.gfile.Rename(inprogress_filepath, filepath) + else: + raise ValueError("Unrecognized URI: " + filepath) + statinfo = os.stat(filepath) + tf.logging.info("Successfully downloaded %s, %s bytes." % + (filename, statinfo.st_size)) + return filepath + + +def maybe_download_from_drive(directory, filename, url): + """Download filename from Google drive unless it's already in directory. + + Args: + directory: path to the directory that will be used. + filename: name of the file to download to (do nothing if it already exists). + url: URL to download from. + + Returns: + The path to the downloaded file. + """ + if not tf.gfile.Exists(directory): + tf.logging.info("Creating directory %s" % directory) + tf.gfile.MakeDirs(directory) + filepath = os.path.join(directory, filename) + confirm_token = None + if tf.gfile.Exists(filepath): + tf.logging.info("Not downloading, file already found: %s" % filepath) + return filepath + + # Since the file is big, drive will scan it for virus and take it to a + # warning page. We find the confirm token on this page and append it to the + # URL to start the download process. + confirm_token = None + session = requests.Session() + response = session.get(url, stream=True) + for k, v in response.cookies.items(): + if k.startswith("download_warning"): + confirm_token = v + + if confirm_token: + url = url + "&confirm=" + confirm_token + tf.logging.info("Downloading %s to %s" % (url, filepath)) + + response = session.get(url, stream=True) + # Now begin the download. + chunk_size = 16 * 1024 + with open(filepath, "wb") as f: + for chunk in response.iter_content(chunk_size): + if chunk: + f.write(chunk) + + # Print newline to clear the carriage return from the download progress + print() + statinfo = os.stat(filepath) + tf.logging.info("Successfully downloaded %s, %s bytes." % (filename, + statinfo.st_size)) + return filepath + + +def gunzip_file(gz_path, new_path): + """Unzips from gz_path into new_path. + + Args: + gz_path: path to the zipped file. + new_path: path to where the file will be unzipped. + """ + if tf.gfile.Exists(new_path): + tf.logging.info("File %s already exists, skipping unpacking" % new_path) + return + tf.logging.info("Unpacking %s to %s" % (gz_path, new_path)) + # We may be unpacking into a newly created directory, add write mode. + mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH + os.chmod(os.path.dirname(new_path), mode) + with gzip.open(gz_path, "rb") as gz_file: + with tf.gfile.GFile(new_path, mode="wb") as new_file: + for line in gz_file: + new_file.write(line) + + +def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, + generator, max_subtoken_length=None, + reserved_tokens=None): + """Inner implementation for vocab generators. + + Args: + data_dir: The base directory where data and vocab files are stored. If None, + then do not save the vocab even if it doesn't exist. + vocab_filename: relative filename where vocab file is stored + vocab_size: target size of the vocabulary constructed by SubwordTextEncoder + generator: a generator that produces tokens from the vocabulary + max_subtoken_length: an optional integer. Set this to a finite value to + avoid quadratic costs during vocab building. + reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS` + should be a prefix of `reserved_tokens`. If `None`, defaults to + `RESERVED_TOKENS`. + + Returns: + A SubwordTextEncoder vocabulary object. + """ + if data_dir and vocab_filename: + vocab_filepath = os.path.join(data_dir, vocab_filename) + if tf.gfile.Exists(vocab_filepath): + tf.logging.info("Found vocab file: %s", vocab_filepath) + return text_encoder.SubwordTextEncoder(vocab_filepath) + else: + vocab_filepath = None + + tf.logging.info("Generating vocab file: %s", vocab_filepath) + vocab = text_encoder.SubwordTextEncoder.build_from_generator( + generator, vocab_size, max_subtoken_length=max_subtoken_length, + reserved_tokens=reserved_tokens) + + if vocab_filepath: + tf.gfile.MakeDirs(data_dir) + vocab.store_to_file(vocab_filepath) + + return vocab + + +def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size, + sources, file_byte_budget=1e6, + max_subtoken_length=None): + """Generate a vocabulary from the datasets in sources.""" + + vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget) + return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, + vocab_generator, max_subtoken_length) + + +def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6): + """Generate lines for vocabulary generation.""" + tf.logging.info("Generating vocab from: %s", str(sources)) + for source in sources: + url = source[0] + filename = os.path.basename(url) + compressed_file = maybe_download(tmp_dir, filename, url) + + for lang_file in source[1]: + tf.logging.info("Reading file: %s" % lang_file) + filepath = os.path.join(tmp_dir, lang_file) + + # Extract from tar if needed. + if not tf.gfile.Exists(filepath): + read_type = "r:gz" if filename.endswith("tgz") else "r" + with tarfile.open(compressed_file, read_type) as corpus_tar: + corpus_tar.extractall(tmp_dir) + + # For some datasets a second extraction is necessary. + if lang_file.endswith(".gz"): + new_filepath = os.path.join(tmp_dir, lang_file[:-3]) + if tf.gfile.Exists(new_filepath): + tf.logging.info( + "Subdirectory %s already exists, skipping unpacking" % filepath) + else: + tf.logging.info("Unpacking subdirectory %s" % filepath) + gunzip_file(filepath, new_filepath) + filepath = new_filepath + + with tf.gfile.GFile(filepath, mode="r") as source_file: + file_byte_budget_ = file_byte_budget + counter = 0 + countermax = int(source_file.size() / file_byte_budget_ / 2) + for line in source_file: + if counter < countermax: + counter += 1 + else: + if file_byte_budget_ <= 0: + break + line = line.strip() + file_byte_budget_ -= len(line) + counter = 0 + yield line + + +def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename, + index, vocab_filename, vocab_size): + r"""Generate a vocabulary from a tabbed source file. + + The source is a file of source, target pairs, where each line contains + a source string and a target string, separated by a tab ('\t') character. + The index parameter specifies 0 for the source or 1 for the target. + + Args: + data_dir: path to the data directory. + tmp_dir: path to the temporary directory. + source_filename: the name of the tab-separated source file. + index: index. + vocab_filename: the name of the vocabulary file. + vocab_size: vocabulary size. + + Returns: + The vocabulary. + """ + def generate(): + filepath = os.path.join(tmp_dir, source_filename) + tf.logging.info("Generating vocab from %s", filepath) + with tf.gfile.GFile(filepath, mode="r") as source_file: + for line in source_file: + line = line.strip() + if line and "\t" in line: + parts = line.split("\t", 1) + part = parts[index].strip() + yield part + + return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, + generate()) + + +def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size, + filepatterns): + """Generate a vocabulary from txt files with example-per-line.""" + if isinstance(filepatterns, str): + filepatterns = [filepatterns] + + def generate(): + tf.logging.info("Generating vocab from %s", filepatterns) + for filepattern in filepatterns: + for filename in tf.gfile.Glob(filepattern): + with tf.gfile.GFile(filename, mode="r") as source_file: + for line in source_file: + yield line.strip() + + return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, + generate()) + + +def read_records(filename): + reader = tf.python_io.tf_record_iterator(filename) + records = [] + for record in reader: + records.append(record) + if len(records) % 100000 == 0: + tf.logging.info("read: %d", len(records)) + return records + + +def write_records(records, out_filename): + writer = tf.python_io.TFRecordWriter(out_filename) + for count, record in enumerate(records): + writer.write(record) + if count > 0 and count % 100000 == 0: + tf.logging.info("write: %d", count) + writer.close() + + +def generate_dataset_and_shuffle(train_gen, + train_paths, + dev_gen, + dev_paths, + shuffle=True): + generate_files(train_gen, train_paths) + generate_files(dev_gen, dev_paths) + if shuffle: + shuffle_dataset(train_paths + dev_paths) + + +def _shuffle_single(fname, extra_fn=None): + """Shuffle a single file of records. + + Args: + fname: a string + extra_fn: an optional function from list of TFRecords to list of TFRecords + to be called after shuffling. + """ + records = read_records(fname) + random.shuffle(records) + if extra_fn is not None: + records = extra_fn(records) + out_fname = fname.replace(UNSHUFFLED_SUFFIX, "") + write_records(records, out_fname) + tf.gfile.Remove(fname) + + +def shuffle_dataset(filenames, extra_fn=None): + """Shuffles the dataset. + + Args: + filenames: a list of strings + extra_fn: an optional function from list of records to list of records + to be called after shuffling a file. + """ + if outputs_exist(filenames): + tf.logging.info("Skipping shuffle because output files exist") + return + tf.logging.info("Shuffling data...") + for filename in filenames: + _shuffle_single(filename, extra_fn=extra_fn) + tf.logging.info("Data shuffled.") + + +class SequencePacker(object): + """Helper for constructing a packed example of sequence examples. + + See comments to pack_examples() + """ + + def __init__(self, first_sequence, spacing=2): + self._spacing = spacing + self._ids = first_sequence[:] + self._segmentation = [1] * len(first_sequence) + self._position = list(range(len(first_sequence))) + + def add(self, ids): + padding = [0] * self._spacing + self._ids.extend(padding + ids) + next_segment_num = self._segmentation[-1] + 1 if self._segmentation else 1 + self._segmentation.extend(padding + [next_segment_num] * len(ids)) + self._position.extend(padding + list(range(len(ids)))) + + def can_fit(self, ids, packed_length): + return len(self._ids) + self._spacing + len(ids) <= packed_length + + def pad(self, packed_length): + padding = [0] * (packed_length - len(self._ids)) + self._ids.extend(padding) + self._segmentation.extend(padding) + self._position.extend(padding) + + def to_dict(self): + return {"inputs": [0], + "targets": self._ids, + "targets_segmentation": self._segmentation, + "targets_position": self._position} + + +class SequencePairPacker(object): + """Helper for packing sequence-to-sequence examples into bigger examples. + + See comments to pack_examples() + """ + + def __init__(self, first_sequence_pair, spacing=2): + self._inputs = SequencePacker(first_sequence_pair[0], spacing) + self._targets = SequencePacker(first_sequence_pair[1], spacing) + + def add(self, pair): + self._inputs.add(pair[0]) + self._targets.add(pair[1]) + + def can_fit(self, pair, packed_length): + return (self._inputs.can_fit(pair[0], packed_length) and + self._targets.can_fit(pair[1], packed_length)) + + def pad(self, packed_length): + self._inputs.pad(packed_length) + self._targets.pad(packed_length) + + def to_dict(self): + ret = self._targets.to_dict() + inputs_dict = self._inputs.to_dict() + ret["inputs"] = inputs_dict["targets"] + ret["inputs_segmentation"] = inputs_dict["targets_segmentation"] + ret["inputs_position"] = inputs_dict["targets_position"] + return ret + + +def pack_examples(examples, + has_inputs, + packed_length=256, + spacing=2, + queue_size=10, + chop_long_sequences=False): + """Pack examples into longer examples. + + If has_inputs=False, we are packing single-sequence examples with + targets only and no inputs. + + In this case, we concatenate the targets from several examples to form + each new example. We insert a number of zeros for spacing between the + original sequences. This is to help the sequences stay separate + under convolutions. If chop_long_sequences is set, then any input sequence + longer than packed_length gets chopped up into multiple examples. Otherwise, + long sequences are emitted as singletons. + + If has_inputs=True, then we are packing sequence-to-sequence + examples. We combine several examples by concatenating the inputs + (as above) and concatenating the targets (as above). Chopping of + long sequences is not supported. + + The packed examples are represented as dictionaries containing: + "inputs", "targets": the packed sequences described above + "inputs_segmentation", "targets_segmentation": + Sequences aligned with "inputs", "targets" specifying to which original + sequence each position belongs. Numbering starts from 1, and 0 is used + for spacing. This information is useful for preventing attention across + segments. + e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4] + "inputs_position", "targets_position": + Sequences aligned with "inputs", "targets" specifying position within + the original sequence. This is useful for positional encodings. + e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2] + + Args: + examples: a generator returning feature dictionaries. + has_inputs: a boolean + packed_length: an integer + spacing: an integer + queue_size: an integer + chop_long_sequences: a boolean + + Yields: + feature dictionaries. + """ + packer = SequencePairPacker if has_inputs else SequencePacker + combined = [] + for example in examples: + x = ((example["inputs"], example["targets"]) + if has_inputs else example["targets"]) + if chop_long_sequences and len(x) > packed_length: + assert not has_inputs + num_fragments = len(x) // packed_length + for i in range(num_fragments): + yield packer( + x[packed_length * i:packed_length * (i + 1)], spacing).to_dict() + x = x[packed_length * num_fragments:] + added = False + for c in combined: + if c.can_fit(x, packed_length): + c.add(x) + added = True + break + if not added: + if len(combined) == queue_size: + if FLAGS.with_padding: + combined[0].pad(packed_length) + yield combined[0].to_dict() + combined = combined[1:] + combined.append(packer(x, spacing)) + for c in combined: + if FLAGS.with_padding: + c.pad(packed_length) + yield c.to_dict() + + +def pack_dataset(dataset, length, keys=None, use_custom_ops=False): + """Creates a 'packed' version of a dataset on-the-fly. + + This is meant to replace the irritation of having to create a separate + "packed" version of a dataset to train efficiently on TPU. + + Each example in the output dataset represents several examples in the + input dataset. + + For each key in the input dataset, two additional keys are created: + _segmentation: an int32 tensor identifying the parts + representing the original example. + _position: an int32 tensor identifying the position within the original + example. + + Example: + Two input examples get combined to form an output example. + The input examples are: + {"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]} + {"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]} + The output example is: + { + "inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0] + "inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0] + "inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0] + "targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0] + "targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0] + "targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0] + } + + 0 represents padding in both the inputs and the outputs. + + Sequences in the incoming examples are truncated to length "length", and the + sequences in the output examples all have fixed (padded) length "length". + + Args: + dataset: a tf.data.Dataset + length: an integer + keys: a list of strings (e.g. ["inputs", "targets"]) + use_custom_ops: use a custom c++ op not included in standard tf (faster) + + Returns: + a tf.data.Dataset + """ + shapes = dataset.output_shapes + if keys is None: + keys = shapes.keys() + + for k in keys: + if k not in shapes: + raise ValueError("Key %s not found in dataset. Available keys are %s" + % (k, shapes.keys())) + if not shapes[k].is_compatible_with(tf.TensorShape([None])): + raise ValueError("Tensors to be packed must be one-dimensional.") + + if use_custom_ops: + return _pack_with_custom_ops(dataset, keys, length) + else: + packer = SequenceDatasetPacker(length, spacing=0, queue_size=10) + return packer(dataset, cycle_length=10, keys=keys) + + +def _pack_with_custom_ops(dataset, keys, length): + """Helper-function for packing a dataset which has already been batched. + + See pack_dataset() + + Relies on custom ops which require a custom compiled binary. + Faster than _pack_with_tf_ops(), and denser packing. + + Args: + dataset: a dataset containing padded batches of examples. + keys: a list of strings (must have length 2) + length: an integer + + Returns: + a dataset. + """ + from TensorFlow.nlp.transformer.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top + + # trim to length + dataset = dataset.map(lambda x: {k: x[k][:length] for k in keys}) + # Setting batch_size=length ensures that the concatenated sequences (if they + # have length >=1) are sufficient to fill at least one packed example. + batch_size = length + dataset = dataset.padded_batch( + batch_size, padded_shapes={k: [-1] for k in keys}) + + # better packing (may be faster) but requires custom-built binary. + k1, k2 = keys + def map_fn_custom(x): + """Map-function.""" + (k1_packed, k1_segmengation, k1_position, + k2_packed, k2_segmentation, k2_position) = ( + pack_sequences_ops.pack_sequences2(x[k1], x[k2], length, length)) + packed = { + k1: k1_packed, + k1 + "_segmentation": k1_segmengation, + k1 + "_position": k1_position, + k2: k2_packed, + k2 + "_segmentation": k2_segmentation, + k2 + "_position": k2_position, + } + return tf.data.Dataset.from_tensor_slices(packed) + dataset = dataset.flat_map(map_fn_custom) + return dataset + + +INDEX_DTYPE = tf.int32 + + +class SequenceDatasetPacker(object): + """Helper class for packing a dataset of sequences in an online fashon. + + The input sequence is expected to be a tuple of 1D Tensors which will be + converted to a dataset which produces a dict of packed examples, example + positions, and segment ids. + + If `window_size` or `cycle_length` is specified multiple packing operations + will be performed in parallel to increase throughput. A value of None will + select default parallelism parameters. If this dataset will be run on a TPU, + specifying a cycle_length > 10 is recommended. + """ + + def __init__(self, packed_length=256, spacing=0, queue_size=10, + chop_long_sequences=False): + self._packed_length = packed_length + self._spacing = spacing + self._queue_size = queue_size + self._chop_long_sequences = chop_long_sequences + self._num_sequences = None + self._token_dtype = None + + def __call__(self, dataset, **kwargs): + if {"window_size", "cycle_length"}.intersection(kwargs): + return self._concurrent_pack(dataset, **kwargs) + return self._pack(dataset, **kwargs) + + def _concurrent_pack(self, dataset, window_size=None, cycle_length=None, + keys=None): + """Selects sensible default parallelism parameters based for a task.""" + + if window_size is None: + # This is a heuristic to fill all of the queues 10 times, and should do a + # reasonable job balancing parallelism (which benefits from lower window + # size) with packing efficiency (which suffers from edge effects when the + # window size is too low.) + window_size = int(self._packed_length / 8 * self._queue_size * 10) + + if cycle_length is None: + # Typically binning one stream will saturate about 3 cores. + + # Note on TPUs: + # cycle_length should still be explicitly set when training on TPUs, + # since the cpu count will be the local CPU count (which could be quite + # small), wereas the transforms will actually run on the TPU host + # controller which has a very robust CPU. + cycle_length = max([int(multiprocessing.cpu_count() / 3), 1]) + return self._pack(dataset, window_size=window_size, + cycle_length=cycle_length, keys=keys) + + def _pack(self, dataset, window_size=None, cycle_length=None, + deterministic=False, keys=None): + """Main method for chaining together packing transformation steps.""" + (dataset, self._num_sequences, self._token_dtype, keys + ) = self._standardize(dataset, keys) + if window_size is None: + dataset = self._scanning_pack(dataset) + else: + # Dataset.window splits nested Tensors. + re_zip = lambda *x: tf.data.Dataset.zip(x) + dataset = dataset.window(window_size).map(re_zip).interleave( + self._scanning_pack, cycle_length=cycle_length, + block_length=window_size, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + if not deterministic: + # Sloppy interleave offers a marginal performance improvement. + options = tf.data.Options() + options.experimental_deterministic = False + dataset = dataset.with_options(options) + + dataset = dataset.map( + self._finalize, num_parallel_calls=tf.data.experimental.AUTOTUNE) + self._num_sequences, self._token_dtype = None, None + + if keys: + def dict_pack(example): + output = {} + for i, key in enumerate(keys): + output[key] = example["contents"][:, i] + output[key + "_segmentation"] = example["segment"][:, i] + output[key + "_position"] = example["position"][:, i] + return output + dataset = dataset.map(dict_pack) + return dataset + + def _standardize(self, dataset, keys): + """Force dataset structure into a tuple of Tensors.""" + shapes = tf.data.get_output_shapes(dataset) + + if isinstance(shapes, dict): + keys = keys or tuple(shapes.keys()) + dataset = dataset.map(lambda x: tuple(x[k] for k in keys)) + shapes = tf.data.get_output_shapes(dataset) + + if not all(isinstance(i, tf.TensorShape) for i in shapes): + # Internally this class expects tuples of Tensors, even for the degenerate + # case of a single sequence. + dataset = dataset.map(lambda x: (x,)) + shapes = tf.data.get_output_shapes(dataset) + + for s in shapes: + if not s.is_compatible_with(tf.TensorShape([None])): + raise ValueError("Tensors to be packed must be one-dimensional.") + + if not shapes: + raise ValueError("Expected sequence dataset.") + + if self._chop_long_sequences and len(shapes) != 1: + raise ValueError("chop_long_sequences expects a single sequence dataset.") + + token_types = tf.data.get_output_types(dataset) + if len(set(token_types)) > 1: + raise ValueError("Inconsistent dtypes: {}".format(token_types)) + + return dataset, len(shapes), token_types[0], keys + + def _eviction_fn(self, _): + return tuple(-tf.ones((self._packed_length,), dtype=self._token_dtype) + for _ in range(self._num_sequences)) + + def _scan_initial_state(self): + """Create TensorArrays and indices to track bin assignment. + + availability: TensorArray[queue_size, num_sequences] + This represents the number of tokens available in the ith bin. + See implementation note below. + + contents: TensorArray[queue_size, num_sequences * 2] + This holds the actual contents of the packed strings as well as a bit + mask indicating where sequences begin. It is stored in a flat vector and + is accessed in offsets of packed_length. + + top_index: scalar [0, queue_size) + Integer tensor indicating which index is the "top" bin. See implementation + note below. + + IMPLEMENTATION_NOTE: + The FFD algorithm periodically pops the topmost queue and pushes a new + one to replace it. In order to replicate those semantics with a fixed size + TensorArray, indexing operations are shifted by top_index. For example, + instead of: + `queue_available.read(i)` + + a read is instead performed as: + `queue_available.read((i - top_index) % queue_size)` + + to account for the fact that the "ith" logical FFD queue is stored at + position j. This means that the pop / push update can be performed by + simply incrementing top_index. (And zeroing the old top_index position.) + + Returns: + The state for the binning scan. + """ + + all_available = tf.ones((self._queue_size, self._num_sequences), + dtype=INDEX_DTYPE) * self._packed_length + total_size = self._packed_length * self._queue_size + total_size_range = tf.range(total_size, dtype=INDEX_DTYPE) + empty = tf.zeros((total_size, self._num_sequences * 2), + dtype=self._token_dtype) + + availability = tf.TensorArray( + dtype=INDEX_DTYPE, size=self._queue_size, dynamic_size=False, + clear_after_read=False, element_shape=(self._num_sequences,) + ).scatter(tf.range(self._queue_size, dtype=INDEX_DTYPE), all_available) + + contents = tf.TensorArray( + dtype=self._token_dtype, size=total_size, dynamic_size=False, + clear_after_read=False, element_shape=(self._num_sequences * 2,) + ).scatter(total_size_range, empty) + + # Which index should be considered the "top" bucket for the purpose of + # the first-fit descending algorithm. + top_index = tf.zeros((), dtype=INDEX_DTYPE) + + return availability, contents, top_index + + def _scanning_pack(self, dataset): + """Apply scan based pack to a dataset.""" + if self._chop_long_sequences: + dataset = dataset.map(lambda x: (x[:self._packed_length],)) + else: + dataset = dataset.filter(lambda *x: tf.reduce_max( # pylint: disable=g-long-lambda + tf.stack([tf.shape(i)[0] for i in x]), axis=0) <= self._packed_length) + + # In order to retrieve the sequences which are still in the queue when the + # dataset is exhausted, we feed dummy sequences which are guaranteed to + # displace the remaining elements. + dataset = dataset.concatenate( + tf.data.Dataset.range(self._queue_size).map(self._eviction_fn)) + + initial_state = self._scan_initial_state() + step_fn = functools.partial( + tf.autograph.to_graph(_scan_step_fn), packed_length=self._packed_length, + queue_size=self._queue_size, spacing=self._spacing, + num_sequences=self._num_sequences, token_dtype=self._token_dtype) + + dataset = dataset.apply(tf.data.experimental.scan(initial_state, step_fn)) + + is_real_sample = lambda valid_sample, _: valid_sample + return dataset.filter(is_real_sample) + + def _compute_auxiliary_structure(self, contents_and_mask): + """Compute segment and position metadata.""" + contents = contents_and_mask[:, :self._num_sequences] + start_mask = tf.cast(contents_and_mask[:, self._num_sequences:], + dtype=INDEX_DTYPE) + + segment = tf.cumsum(start_mask, axis=0) + uniform_count = tf.ones_like(segment[:, 0]) + position = [] + for i in range(self._num_sequences): + segment_slice = segment[:, i] + counts = tf.math.segment_sum(uniform_count, segment[:, i]) + position.append(tf.range(self._packed_length) - tf.cumsum( + tf.gather(counts, segment_slice - 1) * start_mask[:, i])) + position = tf.concat([i[:, tf.newaxis] for i in position], axis=1) + + # Correct for padding tokens. + pad_mask = tf.cast(tf.not_equal(contents, 0), dtype=INDEX_DTYPE) + segment *= pad_mask + position *= pad_mask + + return segment, position + + def _finalize(self, _, contents): + """Structure output and compute segment and position metadata.""" + + # The output shape information is lost during the filter; however we can + # guarantee the shape. (That's the point of this exercise, after all!) + contents.set_shape((self._packed_length, self._num_sequences * 2)) + + # Both the dummy branch of the scan step function and the eviction dataset + # use vectors of minus one. The cost of this check is negligible and the + # leakage of such dummy sequences would be difficult to debug downstream. + check_leaks = tf.assert_none_equal(contents, -tf.ones_like(contents)) + with tf.control_dependencies([check_leaks]): + contents = tf.identity(contents) + + segment, position = self._compute_auxiliary_structure(contents) + return {"contents": contents[:, :self._num_sequences], + "segment": segment, "position": position} + + +def _scan_step_fn(state, example, packed_length, queue_size, spacing, + num_sequences, token_dtype): # pylint: disable=g-doc-args + """Transform function used by tf.data.experimental.scan to process an example. + + This is written as a stateless function rather than a class method because we + trace it with AutoGraph (in order to simplify the conditional), and this way + we don't have to worry about handling re-tracing semantics. + + Args: + See the SequenceDatasetPacker class. + + Returns: + The updated queue state, and either a packed example or a dummy sequence + which will be filtered out downstream. + """ + + # Convert TensorArray tuples to lists since we'll need to replace them. + availability, contents, top_index = state + + lengths = tf.concat([tf.shape(i) for i in example], axis=0) + start_availability = availability.stack() + can_fit = tf.reduce_all(tf.greater_equal(start_availability, lengths), axis=1) + any_can_fit = tf.reduce_any(can_fit, axis=0) + + # AutoGraph will convert this block to a tf.cond + if any_can_fit: + # This indicates where in the FFD queue rotation a given index sits + shifted_range = ( + tf.range(queue_size, dtype=INDEX_DTYPE) - top_index) % queue_size + + # Mark any indices which cannot accommodate the current example. + exclusion_mask = tf.cast(tf.logical_not(can_fit), INDEX_DTYPE) * queue_size + + # Index in [0, queue_size) in which to place the sample. Note, this index + # is the position in the actual TensorArray, not the index of the FFD queue. + queue_index = (tf.reduce_min(shifted_range + exclusion_mask) + + top_index) % queue_size + + # NOTE(taylorrobie): We emit a non-empty Tensor for downstream checks. + output_contents = -tf.ones((1, num_sequences), dtype=token_dtype) + + else: + index_range = top_index * packed_length + tf.range(packed_length) + output_contents = contents.gather(index_range) + + # Reset the queue state. + availability = availability.write( + top_index, packed_length * tf.ones((num_sequences,), dtype=INDEX_DTYPE)) + empty_contents = tf.zeros((packed_length, num_sequences * 2), + dtype=token_dtype) + contents = contents.scatter(index_range, empty_contents) + + queue_index = top_index + top_index = (top_index + 1) % queue_size + + pre_assign_availability = availability.read(queue_index) + space_left = pre_assign_availability - lengths - spacing + availability = availability.write(queue_index, space_left) + + # ============================================================================ + # == Update contents ========================================================= + # ============================================================================ + # Consider the following case for a seq-to-seq packing: + # (padding is represented as underscores) + # + # Queue starting state: + # [1, 3, 2, 4, 6, 1, _, _, _, _, _, ...] + # [5, 9, _, _, _, _, _, _, _, _, _, ...] + # + # Examples: + # [4, 2, 4], [3] + # + # Desired new queue state: + # [1, 3, 2, 4, 6, 1, _, _, 4, 2, 4, _, _, ...] + # [5, 9, _, _, 3, _, _, _, _, _, _, _, _, ...] + # + # This could be acomplished by creating a TensorArray for each of the two + # sequences, and scattering into the respective arrays. However TensorArray + # writes are extremely expensive relative to other operations. So instead we + # store the contents in a single TensorArray of shape (packed_length, 2), and + # we pad and concatenate the examples such that they can be added in a single + # assign: + # + # [_, _, _, _, 4, 2, 4] + # [3, _, _, _, _, _, _] + # + + # [1, 3, 2, 4, 6, 1, _, _, _, _, _, ...] + # [5, 9, _, _, _, _, _, _, _, _, _, ...] + # + # And in practice, the extra work of padding is neglidgable compared to + # the gain from vectorizing the TensorArray assign. We also store a bit mask + # denoting where sequences start which is used to compute segment and + # position metadata: + # + # [_, _, _, _, 1, _, _] + # [1, _, _, _, _, _, _] + # + + # [1, _, _, _, _, _, _, _, _, _, _, ...] + # [1, _, _, _, _, _, _, _, _, _, _, ...] + # + # Both the contents and the mask are concatenated in the same TensorArray + # for performance. + + start_index = packed_length - pre_assign_availability + end_index = start_index + lengths + leftmost = tf.reduce_min(start_index, axis=0) + rightmost = tf.reduce_max(end_index, axis=0) + delta = rightmost - leftmost + pad_indices = [tf.stack((start_index[i] - leftmost, rightmost - end_index[i])) + for i in range(num_sequences)] + + padded_examples = [tf.pad(ex, padding[tf.newaxis, :]) + for ex, padding in zip(example, pad_indices)] + padded_examples = tf.transpose(tf.stack(padded_examples)) + mask_update = tf.one_hot(start_index - leftmost, delta, + dtype=contents.dtype, axis=0) + + content_update = tf.concat([padded_examples, mask_update], axis=1) + + index_range = (queue_index * packed_length + # Offset into the right section. + tf.range(delta, dtype=INDEX_DTYPE) + leftmost) + contents = contents.scatter(index_range, contents.gather(index_range) + + content_update) + + state = (availability, contents, top_index) + return state, (tf.logical_not(any_can_fit), output_contents) + + +def make_tmp_dir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin + """Make a temporary directory.""" + if dir is None: + return tempfile.mkdtemp(suffix, prefix, dir) + else: + while True: + rand_term = random.randint(1, 9999) + tmp_dir = os.path.join(dir, "%s%d%s" % (prefix, rand_term, suffix)) + if tf.gfile.Exists(tmp_dir): + continue + tf.gfile.MakeDirs(tmp_dir) + break + return tmp_dir + + +def tfrecord_iterator_for_problem(problem, data_dir, + dataset_split=tf.estimator.ModeKeys.TRAIN): + """Iterate over the records on disk for the Problem.""" + filenames = tf.gfile.Glob(problem.filepattern(data_dir, mode=dataset_split)) + example_spec = problem.example_reading_spec()[0] + return tfrecord_iterator(filenames, example_spec=example_spec) + + +def tfrecord_iterator(filenames, gzipped=False, example_spec=None): + """Yields records from TFRecord files. + + Args: + filenames: list, list of TFRecord filenames to read from. + gzipped: bool, whether the TFRecord files are gzip-encoded. + example_spec: dict, + if provided, will parse each record as a tensorflow.Example proto. + + Yields: + Records (or parsed Examples, if example_spec is provided) from files. + """ + with tf.Graph().as_default(): + dataset = tf.data.Dataset.from_tensor_slices(filenames) + + def _load_records(filename): + return tf.data.TFRecordDataset( + filename, + compression_type=tf.constant("GZIP") if gzipped else None, + buffer_size=16 * 1000 * 1000) + + dataset = dataset.flat_map(_load_records) + + def _parse_example(ex_ser): + return tf.parse_single_example(ex_ser, example_spec) + + if example_spec: + dataset = dataset.map(_parse_example, num_parallel_calls=32) + dataset = dataset.prefetch(100) + record_it = dataset.make_one_shot_iterator().get_next() + + with tf.Session() as sess: + while True: + try: + ex = sess.run(record_it) + yield ex + except tf.errors.OutOfRangeError: + break + + +def random_deinterleave(text, separator_symbol="X"): + """Create a fill-in-the-blanks training example from text. + + Split on spaces, then cut into segments at random points. Alternate segments + are assigned to the two output strings. separator_symbol separates segments + within each of the outputs. + + example: + text="The quick brown fox jumps over the lazy dog." + returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.") + + The two outputs can also be reversed to yield an instance of the same problem. + + Args: + text: a string + separator_symbol: a string + Returns: + a pair of strings + """ + words = text.strip().split(" ") + n = len(words) + if n <= 1: + return text, "" + cut = [False] * n + cut[0] = True + num_cuts = int(math.exp(random.uniform(0, math.log(n)))) + for _ in range(num_cuts): + cut[random.randint(1, n -1)] = True + out = [[], []] + part = random.randint(0, 1) + for i in range(n): + if cut[i]: + out[part].append(separator_symbol) + part = 1 - part + out[part].append(words[i]) + return " ".join(out[0]), " ".join(out[1]) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5b59fdfd786b43f595de988a9cede76945a6bafd --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder.py @@ -0,0 +1,1064 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encoders for text data. + +* TextEncoder: base class +* ByteTextEncoder: for ascii text +* TokenTextEncoder: with user-supplied vocabulary file +* SubwordTextEncoder: invertible +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +from itertools import chain +import math +import re +import tempfile +import time +import numpy as np +import six +from six.moves import range # pylint: disable=redefined-builtin +from TensorFlow.nlp.transformer.data_generators import tokenizer + +import tensorflow.compat.v1 as tf + +# Reserved tokens for things like padding and EOS symbols. +PAD = "" +EOS = "" +RESERVED_TOKENS = [PAD, EOS] +NUM_RESERVED_TOKENS = len(RESERVED_TOKENS) +PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0 +EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1 + +if six.PY2: + RESERVED_TOKENS_BYTES = RESERVED_TOKENS +else: + RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")] + +# Regular expression for unescaping token strings. +# '\u' is converted to '_' +# '\\' is converted to '\' +# '\213;' is converted to unichr(213) +_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") +_ESCAPE_CHARS = set(u"\\_u;0123456789") + + +# Unicode utility functions that work with Python 2 and 3 +def native_to_unicode(s): + if is_unicode(s): + return s + try: + return to_unicode(s) + except UnicodeDecodeError: + res = to_unicode(s, ignore_errors=True) + tf.logging.info("Ignoring Unicode error, outputting: %s" % res) + return res + + +def unicode_to_native(s): + if six.PY2: + return s.encode("utf-8") if is_unicode(s) else s + else: + return s + + +def is_unicode(s): + return isinstance(s, six.text_type) + + +def to_unicode(s, ignore_errors=False): + if is_unicode(s): + return s + error_mode = "ignore" if ignore_errors else "strict" + return s.decode("utf-8", errors=error_mode) + + +def to_unicode_ignore_errors(s): + return to_unicode(s, ignore_errors=True) + + +def to_unicode_utf8(s): + return unicode(s, "utf-8") if six.PY2 else s.decode("utf-8") + + +def strip_ids(ids, ids_to_strip): + """Strip ids_to_strip from the end ids.""" + ids = list(ids) + while ids and ids[-1] in ids_to_strip: + ids.pop() + return ids + + +class TextEncoder(object): + """Base class for converting from ints to/from human readable strings.""" + + def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS): + self._num_reserved_ids = num_reserved_ids + + @property + def num_reserved_ids(self): + return self._num_reserved_ids + + def encode(self, s): + """Transform a human-readable string into a sequence of int ids. + + The ids should be in the range [num_reserved_ids, vocab_size). Ids [0, + num_reserved_ids) are reserved. + + EOS is not appended. + + Args: + s: human-readable string to be converted. + + Returns: + ids: list of integers + """ + return [int(w) + self._num_reserved_ids for w in s.split()] + + def decode(self, ids, strip_extraneous=False): + """Transform a sequence of int ids into a human-readable string. + + EOS is not expected in ids. + + Args: + ids: list of integers to be converted. + strip_extraneous: bool, whether to strip off extraneous tokens + (EOS and PAD). + + Returns: + s: human-readable string. + """ + if strip_extraneous: + ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) + return " ".join(self.decode_list(ids)) + + def decode_list(self, ids): + """Transform a sequence of int ids into a their string versions. + + This method supports transforming individual input/output ids to their + string versions so that sequence to/from text conversions can be visualized + in a human readable format. + + Args: + ids: list of integers to be converted. + + Returns: + strs: list of human-readable string. + """ + decoded_ids = [] + for id_ in ids: + if 0 <= id_ < self._num_reserved_ids: + decoded_ids.append(RESERVED_TOKENS[int(id_)]) + else: + decoded_ids.append(id_ - self._num_reserved_ids) + return [str(d) for d in decoded_ids] + + @property + def vocab_size(self): + raise NotImplementedError() + + +class ByteTextEncoder(TextEncoder): + """Encodes each byte to an id. For 8-bit strings only.""" + + def encode(self, s): + numres = self._num_reserved_ids + if six.PY2: + if isinstance(s, unicode): + s = s.encode("utf-8") + return [ord(c) + numres for c in s] + # Python3: explicitly convert to UTF-8 + return [c + numres for c in s.encode("utf-8")] + + def decode(self, ids, strip_extraneous=False): + if strip_extraneous: + ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) + numres = self._num_reserved_ids + decoded_ids = [] + int2byte = six.int2byte + for id_ in ids: + if 0 <= id_ < numres: + decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)]) + else: + decoded_ids.append(int2byte(id_ - numres)) + if six.PY2: + return "".join(decoded_ids) + # Python3: join byte arrays and then decode string + return b"".join(decoded_ids).decode("utf-8", "replace") + + def decode_list(self, ids): + numres = self._num_reserved_ids + decoded_ids = [] + int2byte = six.int2byte + for id_ in ids: + if 0 <= id_ < numres: + decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)]) + else: + decoded_ids.append(int2byte(id_ - numres)) + # Python3: join byte arrays and then decode string + return decoded_ids + + @property + def vocab_size(self): + return 2**8 + self._num_reserved_ids + + +class ClassLabelEncoder(TextEncoder): + """Encoder for class labels.""" + + def __init__(self, class_labels=None, class_labels_fname=None): + super(ClassLabelEncoder, self).__init__(num_reserved_ids=0) + + if class_labels_fname: + with tf.gfile.Open(class_labels_fname) as f: + class_labels = [label.strip() for label in f.readlines()] + + assert class_labels + self._class_labels = class_labels + + def encode(self, s): + label_str = s + return self._class_labels.index(label_str) + + def decode(self, ids, strip_extraneous=False): + del strip_extraneous + label_id = ids + if isinstance(label_id, list): + assert len(label_id) == 1 + label_id, = label_id + if isinstance(label_id, np.ndarray): + label_id = np.squeeze(label_id) + return self._class_labels[label_id] + + def decode_list(self, ids): + return [self._class_labels[i] for i in ids] + + @property + def vocab_size(self): + return len(self._class_labels) + + +class OneHotClassLabelEncoder(ClassLabelEncoder): + """One-hot encoder for class labels.""" + + def encode(self, label_str, on_value=1, off_value=0): # pylint: disable=arguments-differ + e = np.full(self.vocab_size, off_value, dtype=np.int32) + e[self._class_labels.index(label_str)] = on_value + return e.tolist() + + def decode(self, ids, strip_extraneous=False): + del strip_extraneous + label_id = ids + if isinstance(label_id, np.ndarray): + label_id = np.squeeze(label_id).astype(np.int8).tolist() + assert isinstance(label_id, list) + assert len(label_id) == self.vocab_size + return self._class_labels[label_id.index(1)] + + @property + def vocab_size(self): + return len(self._class_labels) + + +class TokenTextEncoder(TextEncoder): + """Encoder based on a user-supplied vocabulary (file or list).""" + + def __init__(self, + vocab_filename, + reverse=False, + vocab_list=None, + replace_oov=None, + num_reserved_ids=NUM_RESERVED_TOKENS): + """Initialize from a file or list, one token per line. + + Handling of reserved tokens works as follows: + - When initializing from a list, we add reserved tokens to the vocab. + - When initializing from a file, we do not add reserved tokens to the vocab. + - When saving vocab files, we save reserved tokens to the file. + + Args: + vocab_filename: If not None, the full filename to read vocab from. If this + is not None, then vocab_list should be None. + reverse: Boolean indicating if tokens should be reversed during encoding + and decoding. + vocab_list: If not None, a list of elements of the vocabulary. If this is + not None, then vocab_filename should be None. + replace_oov: If not None, every out-of-vocabulary token seen when + encoding will be replaced by this string (which must be in vocab). + num_reserved_ids: Number of IDs to save for reserved tokens like . + """ + super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) + self._reverse = reverse + self._replace_oov = replace_oov + if vocab_filename: + self._init_vocab_from_file(vocab_filename) + else: + assert vocab_list is not None + self._init_vocab_from_list(vocab_list) + + def encode(self, s): + """Converts a space-separated string of tokens to a list of ids.""" + sentence = s + tokens = sentence.strip().split() + if self._replace_oov is not None: + tokens = [t if t in self._token_to_id else self._replace_oov + for t in tokens] + ret = [self._token_to_id[tok] for tok in tokens] + return ret[::-1] if self._reverse else ret + + def decode(self, ids, strip_extraneous=False): + return " ".join(self.decode_list(ids)) + + def decode_list(self, ids): + seq = reversed(ids) if self._reverse else ids + return [self._safe_id_to_token(i) for i in seq] + + @property + def vocab_size(self): + return len(self._id_to_token) + + def _safe_id_to_token(self, idx): + return self._id_to_token.get(idx, "ID_%d" % idx) + + def _init_vocab_from_file(self, filename): + """Load vocab from a file. + + Args: + filename: The file to load vocabulary from. + """ + with tf.gfile.Open(filename) as f: + tokens = [token.strip() for token in f.readlines()] + + def token_gen(): + for token in tokens: + yield token + + self._init_vocab(token_gen(), add_reserved_tokens=False) + + def _init_vocab_from_list(self, vocab_list): + """Initialize tokens from a list of tokens. + + It is ok if reserved tokens appear in the vocab list. They will be + removed. The set of tokens in vocab_list should be unique. + + Args: + vocab_list: A list of tokens. + """ + def token_gen(): + for token in vocab_list: + if token not in RESERVED_TOKENS: + yield token + + self._init_vocab(token_gen()) + + def _init_vocab(self, token_generator, add_reserved_tokens=True): + """Initialize vocabulary with tokens from token_generator.""" + + self._id_to_token = {} + non_reserved_start_index = 0 + + if add_reserved_tokens: + self._id_to_token.update(enumerate(RESERVED_TOKENS)) + non_reserved_start_index = len(RESERVED_TOKENS) + + self._id_to_token.update( + enumerate(token_generator, start=non_reserved_start_index)) + + # _token_to_id is the reverse of _id_to_token + self._token_to_id = dict((v, k) + for k, v in six.iteritems(self._id_to_token)) + + def store_to_file(self, filename): + """Write vocab file to disk. + + Vocab files have one token per line. The file ends in a newline. Reserved + tokens are written to the vocab file as well. + + Args: + filename: Full path of the file to store the vocab to. + """ + with tf.gfile.Open(filename, "w") as f: + for i in range(len(self._id_to_token)): + f.write(self._id_to_token[i] + "\n") + + +def _escape_token(token, alphabet): + """Escape away underscores and OOV characters and append '_'. + + This allows the token to be expressed as the concatenation of a list + of subtokens from the vocabulary. The underscore acts as a sentinel + which allows us to invertibly concatenate multiple such lists. + + Args: + token: A unicode string to be escaped. + alphabet: A set of all characters in the vocabulary's alphabet. + + Returns: + escaped_token: An escaped unicode string. + + Raises: + ValueError: If the provided token is not unicode. + """ + if not isinstance(token, six.text_type): + raise ValueError("Expected string type for token, got %s" % type(token)) + + token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u") + ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token] + return u"".join(ret) + "_" + + +def _unescape_token(escaped_token): + """Inverse of _escape_token(). + + Args: + escaped_token: a unicode string + + Returns: + token: a unicode string + """ + + def match(m): + if m.group(1) is None: + return u"_" if m.group(0) == u"\\u" else u"\\" + + try: + return six.unichr(int(m.group(1))) + except (ValueError, OverflowError) as _: + return u"\u3013" # Unicode for undefined character. + + trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token + return _UNESCAPE_REGEX.sub(match, trimmed) + + +class SubwordTextEncoder(TextEncoder): + """Class for invertibly encoding text using a limited vocabulary. + + Invertibly encodes a native string as a sequence of subtokens from a limited + vocabulary. + + A SubwordTextEncoder is built from a corpus (so it is tailored to the text in + the corpus), and stored to a file. See text_encoder_build_subword.py. + + It can then be loaded and used to encode/decode any text. + + Encoding has four phases: + + 1. Tokenize into a list of tokens. Each token is a unicode string of either + all alphanumeric characters or all non-alphanumeric characters. We drop + tokens consisting of a single space that are between two alphanumeric + tokens. + + 2. Escape each token. This escapes away special and out-of-vocabulary + characters, and makes sure that each token ends with an underscore, and + has no other underscores. + + 3. Represent each escaped token as a the concatenation of a list of subtokens + from the limited vocabulary. Subtoken selection is done greedily from + beginning to end. That is, we construct the list in order, always picking + the longest subtoken in our vocabulary that matches a prefix of the + remaining portion of the encoded token. + + 4. Concatenate these lists. This concatenation is invertible due to the + fact that the trailing underscores indicate when one list is finished. + + """ + + def __init__(self, filename=None): + """Initialize and read from a file, if provided. + + Args: + filename: filename from which to read vocab. If None, do not load a + vocab + """ + self._alphabet = set() + self.filename = filename + if filename is not None: + self._load_from_file(filename) + super(SubwordTextEncoder, self).__init__() + + def encode(self, s): + """Converts a native string to a list of subtoken ids. + + Args: + s: a native string. + Returns: + a list of integers in the range [0, vocab_size) + """ + return self._tokens_to_subtoken_ids( + tokenizer.encode(native_to_unicode(s))) + + def encode_without_tokenizing(self, token_text): + """Converts string to list of subtoken ids without calling tokenizer. + + This treats `token_text` as a single token and directly converts it + to subtoken ids. This may be useful when the default tokenizer doesn't + do what we want (e.g., when encoding text with tokens composed of lots of + nonalphanumeric characters). It is then up to the caller to make sure that + raw text is consistently converted into tokens. Only use this if you are + sure that `encode` doesn't suit your needs. + + Args: + token_text: A native string representation of a single token. + Returns: + A list of subword token ids; i.e., integers in the range [0, vocab_size). + """ + return self._tokens_to_subtoken_ids([native_to_unicode(token_text)]) + + def decode(self, ids, strip_extraneous=False): + """Converts a sequence of subtoken ids to a native string. + + Args: + ids: a list of integers in the range [0, vocab_size) + strip_extraneous: bool, whether to strip off extraneous tokens + (EOS and PAD). + + Returns: + a native string + """ + if strip_extraneous: + ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) + return unicode_to_native( + tokenizer.decode(self._subtoken_ids_to_tokens(ids))) + + def decode_list(self, ids): + return [self._subtoken_id_to_subtoken_string(s) for s in ids] + + @property + def vocab_size(self): + """The subtoken vocabulary size.""" + return len(self._all_subtoken_strings) + + def _tokens_to_subtoken_ids(self, tokens): + """Converts a list of tokens to a list of subtoken ids. + + Args: + tokens: a list of strings. + Returns: + a list of integers in the range [0, vocab_size) + """ + ret = [] + for token in tokens: + ret.extend(self._token_to_subtoken_ids(token)) + return ret + + def _token_to_subtoken_ids(self, token): + """Converts token to a list of subtoken ids. + + Args: + token: a string. + Returns: + a list of integers in the range [0, vocab_size) + """ + cache_location = hash(token) % self._cache_size + cache_key, cache_value = self._cache[cache_location] + if cache_key == token: + return cache_value + ret = self._escaped_token_to_subtoken_ids( + _escape_token(token, self._alphabet)) + self._cache[cache_location] = (token, ret) + return ret + + def _subtoken_ids_to_tokens(self, subtokens): + """Converts a list of subtoken ids to a list of tokens. + + Args: + subtokens: a list of integers in the range [0, vocab_size) + Returns: + a list of strings. + """ + concatenated = "".join( + [self._subtoken_id_to_subtoken_string(s) for s in subtokens]) + split = concatenated.split("_") + ret = [] + for t in split: + if t: + unescaped = _unescape_token(t + "_") + if unescaped: + ret.append(unescaped) + return ret + + def _subtoken_id_to_subtoken_string(self, subtoken): + """Converts a subtoken integer ID to a subtoken string.""" + if 0 <= subtoken < self.vocab_size: + return self._all_subtoken_strings[subtoken] + return u"" + + def _escaped_token_to_subtoken_strings(self, escaped_token): + """Converts an escaped token string to a list of subtoken strings. + + Args: + escaped_token: An escaped token as a unicode string. + Returns: + A list of subtokens as unicode strings. + """ + # NOTE: This algorithm is greedy; it won't necessarily produce the "best" + # list of subtokens. + ret = [] + start = 0 + token_len = len(escaped_token) + while start < token_len: + for end in range( + min(token_len, start + self._max_subtoken_len), start, -1): + subtoken = escaped_token[start:end] + if subtoken in self._subtoken_string_to_id: + ret.append(subtoken) + start = end + break + + else: # Did not break + # If there is no possible encoding of the escaped token then one of the + # characters in the token is not in the alphabet. This should be + # impossible and would be indicative of a bug. + assert False, "Token substring not found in subtoken vocabulary." + + return ret + + def _escaped_token_to_subtoken_ids(self, escaped_token): + """Converts an escaped token string to a list of subtoken IDs. + + Args: + escaped_token: An escaped token as a unicode string. + Returns: + A list of subtoken IDs as integers. + """ + return [ + self._subtoken_string_to_id[subtoken] + for subtoken in self._escaped_token_to_subtoken_strings(escaped_token) + ] + + @classmethod + def build_from_generator(cls, + generator, + target_size, + max_subtoken_length=None, + reserved_tokens=None): + """Builds a SubwordTextEncoder from the generated text. + + Args: + generator: yields text. + target_size: int, approximate vocabulary size to create. + max_subtoken_length: Maximum length of a subtoken. If this is not set, + then the runtime and memory use of creating the vocab is quadratic in + the length of the longest token. If this is set, then it is instead + O(max_subtoken_length * length of longest token). + reserved_tokens: List of reserved tokens. The global variable + `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this + argument is `None`, it will use `RESERVED_TOKENS`. + + Returns: + SubwordTextEncoder with `vocab_size` approximately `target_size`. + """ + token_counts = collections.defaultdict(int) + for item in generator: + for tok in tokenizer.encode(native_to_unicode(item)): + token_counts[tok] += 1 + encoder = cls.build_to_target_size( + target_size, token_counts, 1, 1e3, + max_subtoken_length=max_subtoken_length, + reserved_tokens=reserved_tokens) + return encoder + + @classmethod + def build_to_target_size(cls, + target_size, + token_counts, + min_val, + max_val, + max_subtoken_length=None, + reserved_tokens=None, + num_iterations=4): + """Builds a SubwordTextEncoder that has `vocab_size` near `target_size`. + + Uses simple recursive binary search to find a minimum token count that most + closely matches the `target_size`. + + Args: + target_size: Desired vocab_size to approximate. + token_counts: A dictionary of token counts, mapping string to int. + min_val: An integer; lower bound for the minimum token count. + max_val: An integer; upper bound for the minimum token count. + max_subtoken_length: Maximum length of a subtoken. If this is not set, + then the runtime and memory use of creating the vocab is quadratic in + the length of the longest token. If this is set, then it is instead + O(max_subtoken_length * length of longest token). + reserved_tokens: List of reserved tokens. The global variable + `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this + argument is `None`, it will use `RESERVED_TOKENS`. + num_iterations: An integer; how many iterations of refinement. + + Returns: + A SubwordTextEncoder instance. + + Raises: + ValueError: If `min_val` is greater than `max_val`. + """ + if min_val > max_val: + raise ValueError("Lower bound for the minimum token count " + "is greater than the upper bound.") + if target_size < 1: + raise ValueError("Target size must be positive.") + + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + def bisect(min_val, max_val): + """Bisection to find the right size.""" + present_count = (max_val + min_val) // 2 + tf.logging.info("Trying min_count %d" % present_count) + subtokenizer = cls() + subtokenizer.build_from_token_counts( + token_counts, present_count, num_iterations, + max_subtoken_length=max_subtoken_length, + reserved_tokens=reserved_tokens) + + # Being within 1% of the target size is ok. + is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size + # If min_val == max_val, we can't do any better than this. + if is_ok or min_val >= max_val or present_count < 2: + return subtokenizer + + if subtokenizer.vocab_size > target_size: + other_subtokenizer = bisect(present_count + 1, max_val) + else: + other_subtokenizer = bisect(min_val, present_count - 1) + + if other_subtokenizer is None: + return subtokenizer + + if (abs(other_subtokenizer.vocab_size - target_size) < + abs(subtokenizer.vocab_size - target_size)): + return other_subtokenizer + return subtokenizer + + return bisect(min_val, max_val) + + def build_from_token_counts(self, + token_counts, + min_count, + num_iterations=4, + reserved_tokens=None, + max_subtoken_length=None): + """Train a SubwordTextEncoder based on a dictionary of word counts. + + Args: + token_counts: a dictionary of Unicode strings to int. + min_count: an integer - discard subtokens with lower counts. + num_iterations: an integer. how many iterations of refinement. + reserved_tokens: List of reserved tokens. The global variable + `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this + argument is `None`, it will use `RESERVED_TOKENS`. + max_subtoken_length: Maximum length of a subtoken. If this is not set, + then the runtime and memory use of creating the vocab is quadratic in + the length of the longest token. If this is set, then it is instead + O(max_subtoken_length * length of longest token). + + Raises: + ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it + is not clear what the space is being reserved for, or when it will be + filled in. + """ + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + else: + # There is not complete freedom in replacing RESERVED_TOKENS. + for default, proposed in zip(RESERVED_TOKENS, reserved_tokens): + if default != proposed: + raise ValueError("RESERVED_TOKENS must be a prefix of " + "reserved_tokens.") + + # Initialize the alphabet. Note, this must include reserved tokens or it can + # result in encoding failures. + alphabet_tokens = chain(six.iterkeys(token_counts), + [native_to_unicode(t) for t in reserved_tokens]) + + self._init_alphabet_from_tokens(alphabet_tokens) + + # Bootstrap the initial list of subtokens with the characters from the + # alphabet plus the escaping characters. + self._init_subtokens_from_list(list(self._alphabet), + reserved_tokens=reserved_tokens) + + # We build iteratively. On each iteration, we segment all the words, + # then count the resulting potential subtokens, keeping the ones + # with high enough counts for our new vocabulary. + if min_count < 1: + min_count = 1 + for i in range(num_iterations): + tf.logging.info("Iteration {0}".format(i)) + + # Collect all substrings of the encoded token that break along current + # subtoken boundaries. + subtoken_counts = collections.defaultdict(int) + for token, count in six.iteritems(token_counts): + iter_start_time = time.time() + escaped_token = _escape_token(token, self._alphabet) + subtokens = self._escaped_token_to_subtoken_strings(escaped_token) + start = 0 + for subtoken in subtokens: + last_position = len(escaped_token) + 1 + if max_subtoken_length is not None: + last_position = min(last_position, start + max_subtoken_length) + + for end in range(start + 1, last_position): + new_subtoken = escaped_token[start:end] + subtoken_counts[new_subtoken] += count + start += len(subtoken) + iter_time_secs = time.time() - iter_start_time + if iter_time_secs > 0.1: + tf.logging.info(u"Processing token [{0}] took {1} seconds, consider " + "setting Text2TextProblem.max_subtoken_length to a " + "smaller value.".format(token, iter_time_secs)) + + # Array of sets of candidate subtoken strings, by length. + len_to_subtoken_strings = [] + for subtoken_string, count in six.iteritems(subtoken_counts): + lsub = len(subtoken_string) + if count >= min_count: + while len(len_to_subtoken_strings) <= lsub: + len_to_subtoken_strings.append(set()) + len_to_subtoken_strings[lsub].add(subtoken_string) + + # Consider the candidates longest to shortest, so that if we accept + # a longer subtoken string, we can decrement the counts of its prefixes. + new_subtoken_strings = [] + for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1): + subtoken_strings = len_to_subtoken_strings[lsub] + for subtoken_string in subtoken_strings: + count = subtoken_counts[subtoken_string] + if count >= min_count: + # Exclude alphabet tokens here, as they must be included later, + # explicitly, regardless of count. + if subtoken_string not in self._alphabet: + new_subtoken_strings.append((count, subtoken_string)) + for l in range(1, lsub): + subtoken_counts[subtoken_string[:l]] -= count + + # Include the alphabet explicitly to guarantee all strings are encodable. + new_subtoken_strings.extend((subtoken_counts.get(a, 0), a) + for a in self._alphabet) + new_subtoken_strings.sort(reverse=True) + + # Reinitialize to the candidate vocabulary. + new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings] + if reserved_tokens: + escaped_reserved_tokens = [ + _escape_token(native_to_unicode(t), self._alphabet) + for t in reserved_tokens + ] + new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings + + self._init_subtokens_from_list(new_subtoken_strings) + tf.logging.info("vocab_size = %d" % self.vocab_size) + + @property + def all_subtoken_strings(self): + return tuple(self._all_subtoken_strings) + + def dump(self): + """Debugging dump of the current subtoken vocabulary.""" + subtoken_strings = [(i, s) + for s, i in six.iteritems(self._subtoken_string_to_id)] + print(u", ".join(u"{0} : '{1}'".format(i, s) + for i, s in sorted(subtoken_strings))) + + def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None): + """Initialize token information from a list of subtoken strings. + + Args: + subtoken_strings: a list of subtokens + reserved_tokens: List of reserved tokens. We must have `reserved_tokens` + as None or the empty list, or else the global variable `RESERVED_TOKENS` + must be a prefix of `reserved_tokens`. + + Raises: + ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it + is not clear what the space is being reserved for, or when it will be + filled in. + """ + if reserved_tokens is None: + reserved_tokens = [] + + if reserved_tokens: + self._all_subtoken_strings = reserved_tokens + subtoken_strings + else: + self._all_subtoken_strings = subtoken_strings + + # we remember the maximum length of any subtoken to avoid having to + # check arbitrarily long strings. + self._max_subtoken_len = max([len(s) for s in subtoken_strings]) + self._subtoken_string_to_id = { + s: i + len(reserved_tokens) + for i, s in enumerate(subtoken_strings) if s + } + # Initialize the cache to empty. + self._cache_size = 2 ** 20 + self._cache = [(None, None)] * self._cache_size + + def _init_alphabet_from_tokens(self, tokens): + """Initialize alphabet from an iterable of token or subtoken strings.""" + # Include all characters from all tokens in the alphabet to guarantee that + # any token can be encoded. Additionally, include all escaping characters. + self._alphabet = {c for token in tokens for c in token} + self._alphabet |= _ESCAPE_CHARS + + def _load_from_file_object(self, f): + """Load from a file object. + + Args: + f: File object to load vocabulary from + """ + subtoken_strings = [] + for line in f: + s = line.rstrip() + # Some vocab files wrap words in single quotes, but others don't + if ((s.startswith("'") and s.endswith("'")) or + (s.startswith("\"") and s.endswith("\""))): + s = s[1:-1] + subtoken_strings.append(native_to_unicode(s)) + self._init_subtokens_from_list(subtoken_strings) + self._init_alphabet_from_tokens(subtoken_strings) + + def _load_from_file(self, filename): + """Load from a vocab file.""" + if not tf.gfile.Exists(filename): + raise ValueError("File %s not found" % filename) + with tf.gfile.Open(filename) as f: + self._load_from_file_object(f) + + def store_to_file(self, filename, add_single_quotes=True): + with tf.gfile.Open(filename, "w") as f: + for subtoken_string in self._all_subtoken_strings: + if add_single_quotes: + f.write("'" + unicode_to_native(subtoken_string) + "'\n") + else: + f.write(unicode_to_native(subtoken_string) + "\n") + + +class ImageEncoder(object): + """Encoder class for saving and loading images.""" + + def __init__(self, num_reserved_ids=0, height=None, width=None, channels=3): + assert num_reserved_ids == 0 + self._height = height + self._width = width + self._channels = channels + + @property + def num_reserved_ids(self): + return 0 + + def encode(self, s): + """Transform a string with a filename into a list of RGB integers. + + Args: + s: path to the file with an image. + + Returns: + ids: list of integers + """ + try: + import matplotlib.image as im # pylint: disable=g-import-not-at-top + except ImportError as e: + tf.logging.warning( + "Reading an image requires matplotlib to be installed: %s", e) + raise NotImplementedError("Image reading not implemented.") + return im.imread(s) + + def decode(self, ids, strip_extraneous=False): + """Transform a sequence of int ids into an image file. + + Args: + ids: list of integers to be converted. + strip_extraneous: unused + + Returns: + Path to the temporary file where the image was saved. + + Raises: + ValueError: if the ids are not of the appropriate size. + """ + del strip_extraneous + _, tmp_file_path = tempfile.mkstemp("_decode.png") + if self._height is None or self._width is None: + size = int(math.sqrt(len(ids) / self._channels)) + length = size * size * self._channels + else: + size = None + length = self._height * self._width * self._channels + if len(ids) != length: + raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x " + "channels (%d); %d != %d.\n Ids: %s" + % (len(ids), self._height, self._width, self._channels, + len(ids), length, " ".join([str(i) for i in ids]))) + with tf.Graph().as_default(): + raw = tf.constant(ids, dtype=tf.uint8) + if size is None: + img = tf.reshape(raw, [self._height, self._width, self._channels]) + else: + img = tf.reshape(raw, [size, size, self._channels]) + png = tf.image.encode_png(img) + op = tf.write_file(tmp_file_path, png) + with tf.Session() as sess: + sess.run(op) + return tmp_file_path + + def decode_list(self, ids): + """Transform a sequence of int ids into an image file. + + Args: + ids: list of integers to be converted. + + Returns: + Singleton list: path to the temporary file where the image was saved. + """ + return [self.decode(ids)] + + @property + def vocab_size(self): + return 256 + + +class RealEncoder(object): + """Encoder class for saving and loading float values.""" + + def encode(self, s): + """Transform a string (space separated float values) into a float array. + + Args: + s: space separated float values. + + Returns: + Array of float values. + """ + return [float(w) for w in s.split()] + + def decode(self, ids, strip_extraneous=False): + """Transform sequence of float values into string (float values). + + Args: + ids: array of floats to be converted. + strip_extraneous: unused + + Returns: + String having space separated float values. + + Raises: + ValueError: if the ids are not of the appropriate size. + """ + del strip_extraneous + return " ".join([str(i) for i in ids]) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder_build_subword.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder_build_subword.py new file mode 100644 index 0000000000000000000000000000000000000000..bc645a2721d2a49cb0eab0572b87a59a18cb4b48 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder_build_subword.py @@ -0,0 +1,79 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Program to build a SubwordTextEncoder. + +The flags --min_count and --corpus_max_lines will affect the size of the +vocabulary. Try changing these flags until you get a vocabulary +of the size you want. + +Example usage: + +python data_generators/text_encoder_build_subword.py \ + --corpus_filepattern=$DATA_DIR/my_problem-train-* \ + --corpus_max_lines=12345 \ + --output_filename=$DATA_DIR/my_problem.subword_text_encoder \ + --logtostderr + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from TensorFlow.nlp.transformer.data_generators import text_encoder +from TensorFlow.nlp.transformer.data_generators import tokenizer + +import tensorflow.compat.v1 as tf + +tf.flags.DEFINE_string('output_filename', '/tmp/my.subword_text_encoder', + 'where to store the SubwordTextEncoder') +tf.flags.DEFINE_string('corpus_filepattern', '', + 'Corpus of one or more text files') +tf.flags.DEFINE_string('vocab_filepattern', '', 'One or more vocabulary files ' + '(one word per line as "word,count")') +tf.flags.DEFINE_integer('min_count', 5, 'Minimum subtoken count in corpus') +tf.flags.DEFINE_integer('corpus_max_lines', 10000, + 'How many lines of corpus to read') +tf.flags.DEFINE_integer('num_iterations', 4, 'Number of iterations') +tf.flags.DEFINE_bool('split_on_newlines', True, 'Break corpus into lines.') +FLAGS = tf.flags.FLAGS + + +def main(unused_argv): + if FLAGS.corpus_filepattern and FLAGS.vocab_filepattern: + raise ValueError( + 'Must only provide one of --corpus_filepattern or --vocab_filepattern') + + elif FLAGS.corpus_filepattern: + token_counts = tokenizer.corpus_token_counts( + FLAGS.corpus_filepattern, + FLAGS.corpus_max_lines, + split_on_newlines=FLAGS.split_on_newlines) + + elif FLAGS.vocab_filepattern: + token_counts = tokenizer.vocab_token_counts(FLAGS.vocab_filepattern, + FLAGS.corpus_max_lines) + + else: + raise ValueError( + 'Must provide one of --corpus_filepattern or --vocab_filepattern') + + encoder = text_encoder.SubwordTextEncoder() + encoder.build_from_token_counts(token_counts, FLAGS.min_count, + FLAGS.num_iterations) + encoder.store_to_file(FLAGS.output_filename) + + +if __name__ == '__main__': + tf.app.run() diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/tokenizer.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..373432e8dd56d94946b2dd57356cef9d92bcb49f --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/tokenizer.py @@ -0,0 +1,194 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A simple invertible tokenizer. + +Converts from a unicode string to a list of tokens +(represented as Unicode strings). + +This tokenizer has the following desirable properties: + - It is invertible. + - Alphanumeric characters are broken away from non-alphanumeric characters. + - A single space between words does not produce an extra token. + - The full Unicode punctuation and separator set is recognized. + +The tokenization algorithm is as follows: + +1. Split the text into a list of tokens, splitting at every boundary of an + alphanumeric character and a non-alphanumeric character. This produces + a list which alternates between "alphanumeric tokens" + (strings of alphanumeric characters) and "non-alphanumeric tokens" + (strings of non-alphanumeric characters). + +2. Remove every token consisting of a single space, unless it is + the very first or very last token in the list. These tokens are now + implied by the fact that there are two adjacent alphanumeric tokens. + +e.g. u"Dude - that's so cool." + -> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."] +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import sys +import unicodedata +import six +from six.moves import range # pylint: disable=redefined-builtin +import tensorflow.compat.v1 as tf + +# Conversion between Unicode and UTF-8, if required (on Python2) +_native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s) + + +# This set contains all letter and number characters. +_ALPHANUMERIC_CHAR_SET = set( + six.unichr(i) for i in range(sys.maxunicode) + if (unicodedata.category(six.unichr(i)).startswith("L") or + unicodedata.category(six.unichr(i)).startswith("N"))) + + +def encode(text): + """Encode a unicode string as a list of tokens. + + Args: + text: a unicode string + Returns: + a list of tokens as Unicode strings + """ + if not text: + return [] + ret = [] + token_start = 0 + # Classify each character in the input string + is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text] + for pos in range(1, len(text)): + if is_alnum[pos] != is_alnum[pos - 1]: + token = text[token_start:pos] + if token != u" " or token_start == 0: + ret.append(token) + token_start = pos + final_token = text[token_start:] + ret.append(final_token) + return ret + + +def decode(tokens): + """Decode a list of tokens to a unicode string. + + Args: + tokens: a list of Unicode strings + Returns: + a unicode string + """ + token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens] + ret = [] + for i, token in enumerate(tokens): + if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]: + ret.append(u" ") + ret.append(token) + return "".join(ret) + + +def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True): + """Reads files matching a wildcard pattern, yielding the contents. + + Args: + filepattern: A wildcard pattern matching one or more files. + max_lines: If set, stop reading after reading this many lines. + split_on_newlines: A boolean. If true, then split files by lines and strip + leading and trailing whitespace from each line. Otherwise, treat each + file as a single string. + + Yields: + The contents of the files as lines, if split_on_newlines is True, or + the entire contents of each file if False. + """ + filenames = sorted(tf.gfile.Glob(filepattern)) + lines_read = 0 + for filename in filenames: + with tf.gfile.Open(filename) as f: + if split_on_newlines: + for line in f: + yield line.strip() + lines_read += 1 + if max_lines and lines_read >= max_lines: + return + + else: + if max_lines: + doc = [] + for line in f: + doc.append(line) + lines_read += 1 + if max_lines and lines_read >= max_lines: + yield "".join(doc) + return + yield "".join(doc) + + else: + yield f.read() + + +def corpus_token_counts( + text_filepattern, corpus_max_lines, split_on_newlines=True): + """Read the corpus and compute a dictionary of token counts. + + Args: + text_filepattern: A pattern matching one or more files. + corpus_max_lines: An integer; maximum total lines to read. + split_on_newlines: A boolean. If true, then split files by lines and strip + leading and trailing whitespace from each line. Otherwise, treat each + file as a single string. + + Returns: + a dictionary mapping token to count. + """ + counts = collections.Counter() + for doc in _read_filepattern( + text_filepattern, + max_lines=corpus_max_lines, + split_on_newlines=split_on_newlines): + counts.update(encode(_native_to_unicode(doc))) + + return counts + + +def vocab_token_counts(text_filepattern, max_lines): + """Read a vocab file and return a dictionary of token counts. + + Reads a two-column CSV file of tokens and their frequency in a dataset. The + tokens are presumed to be generated by encode() or the equivalent. + + Args: + text_filepattern: A pattern matching one or more files. + max_lines: An integer; maximum total lines to read. + + Returns: + a dictionary mapping token to count. + """ + ret = {} + for i, line in enumerate( + _read_filepattern(text_filepattern, max_lines=max_lines)): + if "," not in line: + tf.logging.warning("Malformed vocab line #%d '%s'", i, line) + continue + + token, count = line.rsplit(",", 1) + ret[_native_to_unicode(token)] = int(count) + + return ret diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_encs.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_encs.py new file mode 100644 index 0000000000000000000000000000000000000000..a969d797704e4172ccb44f9da8edce7ffdedf2b1 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_encs.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from TensorFlow.nlp.transformer.data_generators import problem +from TensorFlow.nlp.transformer.data_generators import text_encoder +from TensorFlow.nlp.transformer.data_generators import text_problems +from TensorFlow.nlp.transformer.data_generators import translate +from TensorFlow.nlp.transformer.utils import registry + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +_ENCS_TRAIN_DATASETS = [ + [("https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/" + "11234/1-1458/data-plaintext-format.tar"), + ("tsv", 3, 2, "data.plaintext-format/*train.gz")], + [ + "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz", # pylint: disable=line-too-long + ("training-parallel-nc-v13/news-commentary-v13.cs-en.en", + "training-parallel-nc-v13/news-commentary-v13.cs-en.cs") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", + ("commoncrawl.cs-en.en", "commoncrawl.cs-en.cs") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", + ("training/europarl-v7.cs-en.en", "training/europarl-v7.cs-en.cs") + ], +] +_ENCS_TEST_DATASETS = [ + [ + "http://data.statmt.org/wmt17/translation-task/dev.tgz", + ("dev/newstest2013.en", "dev/newstest2013.cs") + ], +] + + +@registry.register_problem +class TranslateEncsWmt32k(translate.TranslateProblem): + """Problem spec for WMT English-Czech translation.""" + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS + + def vocab_data_files(self): + datasets = self.source_data_files(problem.DatasetSplit.TRAIN) + vocab_datasets = [] + if datasets[0][0].endswith("data-plaintext-format.tar"): + vocab_datasets.append([ + datasets[0][0], [ + "%s-compiled-train.lang1" % self.name, + "%s-compiled-train.lang2" % self.name + ] + ]) + datasets = datasets[1:] + vocab_datasets += [[item[0], [item[1][0], item[1][1]]] for item in datasets] + return vocab_datasets + + +@registry.register_problem +class TranslateEncsWmtCharacters(translate.TranslateProblem): + """Problem spec for WMT En-Cs character-based translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + datasets = _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS + tag = "train" if train else "dev" + data_path = translate.compile_data(tmp_dir, datasets, + "wmt_encs_chr_%s" % tag) + return text_problems.text2text_txt_iterator(data_path + ".lang1", + data_path + ".lang2") diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_ende.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_ende.py new file mode 100644 index 0000000000000000000000000000000000000000..dac26f8bac1a3c64dfe9007ea645f359809e68ec --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_ende.py @@ -0,0 +1,218 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from TensorFlow.nlp.transformer.data_generators import problem +from TensorFlow.nlp.transformer.data_generators import text_problems +from TensorFlow.nlp.transformer.data_generators import translate +from TensorFlow.nlp.transformer.utils import registry + + +_ENDE_TRAIN_DATASETS = [ + [ + "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz", # pylint: disable=line-too-long + ("training-parallel-nc-v13/news-commentary-v13.de-en.en", + "training-parallel-nc-v13/news-commentary-v13.de-en.de") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", + ("commoncrawl.de-en.en", "commoncrawl.de-en.de") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", + ("training/europarl-v7.de-en.en", "training/europarl-v7.de-en.de") + ], +] + +_ENDE_EVAL_DATASETS = [ + [ + "http://data.statmt.org/wmt17/translation-task/dev.tgz", + ("dev/newstest2013.en", "dev/newstest2013.de") + ], +] + +_ENDE_RAPID_TRAIN_DATASET = [ + # additional training data available for WMT 18 news task training data + # as defined by http://www.statmt.org/wmt18/translation-task.html + [ + "http://data.statmt.org/wmt18/translation-task/rapid2016.tgz", + ("rapid2016.de-en.en", "rapid2016.de-en.de"), + ], +] + +_ENDE_PARACRAWL_DATASETS = [ + [ + "https://s3.amazonaws.com/web-language-models/paracrawl/release4/en-de.bicleaner07.tmx.gz", # pylint: disable=line-too-long + ("tmx", "en-de.bicleaner07.tmx.gz") + ] +] + + +@registry.register_problem +class TranslateEndeWmt32k(translate.TranslateProblem): + """En-de translation trained on WMT corpus.""" + + @property + def additional_training_datasets(self): + """Allow subclasses to add training datasets.""" + return [] + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + train_datasets = _ENDE_TRAIN_DATASETS + self.additional_training_datasets + return train_datasets if train else _ENDE_EVAL_DATASETS + + +@registry.register_problem +class TranslateEnde2018Wmt32k(translate.TranslateProblem): + """En-de translation trained on WMT18 corpus.""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + @property + def additional_training_datasets(self): + """WMT18 adds rapid data.""" + return _ENDE_RAPID_TRAIN_DATASET + + +@registry.register_problem +class TranslateEndeWmtClean32k(TranslateEndeWmt32k): + """En-de translation trained on WMT with further cleaning.""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + @property + def datatypes_to_clean(self): + return ["txt"] + + +@registry.register_problem +class TranslateEndePc32k(translate.TranslateProblem): + """En-de translation trained on Paracrawl (bicleaner corpus).""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + @property + def additional_training_datasets(self): + """Allow subclasses to add training datasets.""" + return [] + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + train_datasets = ( + _ENDE_PARACRAWL_DATASETS + self.additional_training_datasets) + return train_datasets if train else _ENDE_EVAL_DATASETS + + +@registry.register_problem +class TranslateEndePcClean32k(TranslateEndePc32k): + """En-de translation trained on Paracrawl with further cleaning.""" + + @property + def datatypes_to_clean(self): + return ["tmx"] + + +@registry.register_problem +class TranslateEndeWmtPc32k(TranslateEndeWmt32k): + """En-de translation trained on WMT plus Paracrawl.""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + @property + def additional_training_datasets(self): + return _ENDE_PARACRAWL_DATASETS + + +@registry.register_problem +class TranslateEndeWmtCleanPc32k(TranslateEndeWmtPc32k): + """En-de translation trained on cleaned WMT plus Paracrawl.""" + + @property + def datatypes_to_clean(self): + return ["txt"] + + +@registry.register_problem +class TranslateEndeWmtPcClean32k(TranslateEndeWmtPc32k): + """En-de translation trained on WMT plus cleaned Paracrawl.""" + + @property + def datatypes_to_clean(self): + return ["tmx"] + + +@registry.register_problem +class TranslateEndeWmtCleanPcClean32k(TranslateEndeWmtPcClean32k): + """En-de translation trained on cleaned WMT plus cleaned Paracrawl.""" + + @property + def datatypes_to_clean(self): + return ["txt", "tmx"] + + +@registry.register_problem +class TranslateEndeWmt32kPacked(TranslateEndeWmt32k): + + @property + def packed_length(self): + return 256 + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + +@registry.register_problem +class TranslateEndeWmt8k(TranslateEndeWmt32k): + """Problem spec for WMT En-De translation.""" + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + +@registry.register_problem +class TranslateEndeWmt8kPacked(TranslateEndeWmt8k): + + @property + def packed_length(self): + return 256 + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt8k() + + +@registry.register_problem +class TranslateEndeWmtCharacters(TranslateEndeWmt8k): + """Problem spec for WMT En-De translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enfr.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enfr.py new file mode 100644 index 0000000000000000000000000000000000000000..1cf63c519417518cf29cc115bc44a4fa9c9663f8 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enfr.py @@ -0,0 +1,235 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from TensorFlow.nlp.transformer.data_generators import problem +from TensorFlow.nlp.transformer.data_generators import text_encoder +from TensorFlow.nlp.transformer.data_generators import text_problems +from TensorFlow.nlp.transformer.data_generators import translate +from TensorFlow.nlp.transformer.utils import registry + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +_ENFR_TRAIN_SMALL_DATA = [ + [ + "https://s3.amazonaws.com/opennmt-trainingdata/baseline-1M-enfr.tgz", + ("baseline-1M-enfr/baseline-1M_train.en", + "baseline-1M-enfr/baseline-1M_train.fr") + ], +] +_ENFR_TEST_SMALL_DATA = [ + [ + "https://s3.amazonaws.com/opennmt-trainingdata/baseline-1M-enfr.tgz", + ("baseline-1M-enfr/baseline-1M_valid.en", + "baseline-1M-enfr/baseline-1M_valid.fr") + ], +] +_ENFR_TRAIN_LARGE_DATA = [ + [ + "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", + ("commoncrawl.fr-en.en", "commoncrawl.fr-en.fr") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", + ("training/europarl-v7.fr-en.en", "training/europarl-v7.fr-en.fr") + ], + [ + "http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz", + ("training/news-commentary-v9.fr-en.en", + "training/news-commentary-v9.fr-en.fr") + ], + [ + "http://www.statmt.org/wmt10/training-giga-fren.tar", + ("giga-fren.release2.fixed.en.gz", + "giga-fren.release2.fixed.fr.gz") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-un.tgz", + ("un/undoc.2000.fr-en.en", "un/undoc.2000.fr-en.fr") + ], +] +_ENFR_TEST_LARGE_DATA = [ + [ + "http://data.statmt.org/wmt17/translation-task/dev.tgz", + ("dev/newstest2013.en", "dev/newstest2013.fr") + ], +] + + +@registry.register_problem +class TranslateEnfrWmtSmall8k(translate.TranslateProblem): + """Problem spec for WMT En-Fr translation.""" + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + @property + def use_small_dataset(self): + return True + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + if self.use_small_dataset: + datasets = _ENFR_TRAIN_SMALL_DATA if train else _ENFR_TEST_SMALL_DATA + else: + datasets = _ENFR_TRAIN_LARGE_DATA if train else _ENFR_TEST_LARGE_DATA + return datasets + + def vocab_data_files(self): + return (_ENFR_TRAIN_SMALL_DATA if self.use_small_dataset + else _ENFR_TRAIN_LARGE_DATA) + + +@registry.register_problem +class TranslateEnfrWmtSmall32k(TranslateEnfrWmtSmall8k): + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + +@registry.register_problem +class TranslateEnfrWmt8k(TranslateEnfrWmtSmall8k): + + @property + def use_small_dataset(self): + return False + + +@registry.register_problem +class TranslateEnfrWmt32k(TranslateEnfrWmtSmall32k): + + @property + def use_small_dataset(self): + return False + + +@registry.register_problem +class TranslateEnfrWmt32kPacked(TranslateEnfrWmt32k): + + @property + def packed_length(self): + return 256 + + @property + def use_vocab_from_other_problem(self): + return TranslateEnfrWmt32k() + + +@registry.register_problem +class TranslateEnfrWmt32kWithBacktranslateFr(TranslateEnfrWmt32k): + """En-Fr translation with added French data, back-translated.""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEnfrWmt32k() + + @property + def already_shuffled(self): + return True + + @property + def skip_random_fraction_when_training(self): + return False + + @property + def backtranslate_data_filenames(self): + """List of pairs of files with matched back-translated data.""" + # Files must be placed in tmp_dir, each similar size to authentic data. + return [("fr_mono_en.txt", "fr_mono_fr.txt")] + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 1, # Use just 1 shard so as to not mix data. + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + datasets = self.source_data_files(dataset_split) + tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev" + data_path = translate.compile_data( + tmp_dir, datasets, "%s-compiled-%s" % (self.name, tag)) + # For eval, use authentic data. + if dataset_split != problem.DatasetSplit.TRAIN: + for example in text_problems.text2text_txt_iterator( + data_path + ".lang1", data_path + ".lang2"): + yield example + else: # For training, mix synthetic and authentic data as follows. + for (file1, file2) in self.backtranslate_data_filenames: + path1 = os.path.join(tmp_dir, file1) + path2 = os.path.join(tmp_dir, file2) + # Synthetic data first. + for example in text_problems.text2text_txt_iterator(path1, path2): + yield example + # Now authentic data. + for example in text_problems.text2text_txt_iterator( + data_path + ".lang1", data_path + ".lang2"): + yield example + + +@registry.register_problem +class TranslateEnfrWmt32kWithBacktranslateEn( + TranslateEnfrWmt32kWithBacktranslateFr): + """En-Fr translation with added English data, back-translated.""" + + @property + def backtranslate_data_filenames(self): + """List of pairs of files with matched back-translated data.""" + # Files must be placed in tmp_dir, each similar size to authentic data. + return [("en_mono_en.txt%d" % i, "en_mono_fr.txt%d" % i) for i in [0, 1, 2]] + + +@registry.register_problem +class TranslateEnfrWmtSmallCharacters(translate.TranslateProblem): + """Problem spec for WMT En-Fr translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + @property + def use_small_dataset(self): + return True + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + if self.use_small_dataset: + datasets = _ENFR_TRAIN_SMALL_DATA if train else _ENFR_TEST_SMALL_DATA + else: + datasets = _ENFR_TRAIN_LARGE_DATA if train else _ENFR_TEST_LARGE_DATA + return datasets + + +@registry.register_problem +class TranslateEnfrWmtCharacters(TranslateEnfrWmtSmallCharacters): + + @property + def use_small_dataset(self): + return False diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enro.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enro.py new file mode 100644 index 0000000000000000000000000000000000000000..fcafe53289cc99ba12bcdf2aaeaccbee209f6f5e --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enro.py @@ -0,0 +1,142 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import random + +from TensorFlow.nlp.transformer.data_generators import problem +from TensorFlow.nlp.transformer.data_generators import text_problems +from TensorFlow.nlp.transformer.data_generators import translate +from TensorFlow.nlp.transformer.utils import registry + + +_ENRO_TRAIN_DATASETS = [ + [ + "http://www.statmt.org/europarl/v7/ro-en.tgz", + ("europarl-v7.ro-en.en", "europarl-v7.ro-en.ro") + ], + [ + "http://opus.nlpl.eu/download.php?f=SETIMES/v2/moses/en-ro.txt.zip", + ("SETIMES.en-ro.en", "SETIMES.en-ro.ro") + ] +] +_ENRO_TEST_DATASETS = [ + [ + ("http://data.statmt.org/wmt16/translation-task/" + "dev-romanian-updated.tgz"), + ("dev/newsdev2016-roen-ref.en.sgm", "dev/newsdev2016-roen-src.ro.sgm") + ], +] + + +@registry.register_problem +class TranslateEnroWmt8k(translate.TranslateProblem): + """Problem spec for WMT En-Ro translation.""" + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENRO_TRAIN_DATASETS if train else _ENRO_TEST_DATASETS + + +@registry.register_problem +class TranslateEnroWmt32k(TranslateEnroWmt8k): + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + +@registry.register_problem +class TranslateEnroWmtCharacters(TranslateEnroWmt8k): + """Problem spec for WMT En-Ro translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + +@registry.register_problem +class TranslateEnroWmtMulti64k(TranslateEnroWmt8k): + """Translation with muli-lingual vocabulary.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + +@registry.register_problem +class TranslateEnroWmtMultiSmall64k(TranslateEnroWmt8k): + """Translation with muli-lingual vocabulary, small (6K) training data.""" + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 16, # It's a small dataset, TPUs like at least a few shards. + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + @property + def how_many_examples_to_sample(self): + return 6000 + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate just the first 6k samples for training.""" + # If not training, do the same as before. + if dataset_split != problem.DatasetSplit.TRAIN: + for x in super(TranslateEnroWmtMultiSmall64k, self).generate_samples( + data_dir, tmp_dir, dataset_split): + yield x + raise StopIteration + # Now we assume we're training. + counter = 0 + # The size of this data-set in total is around 614K, we want to sample so + # that in expectation we take the requested number of samples in 1 go. + sample_prob = self.how_many_examples_to_sample / float(614000) + # Let's sample. + for x in super(TranslateEnroWmtMultiSmall64k, self).generate_samples( + data_dir, tmp_dir, dataset_split): + if random.random() > sample_prob: + continue + counter += 1 + if counter > self.how_many_examples_to_sample: + raise StopIteration + yield x + # We do it again if we don't have enough samples. + if counter < self.how_many_examples_to_sample: + for x in super(TranslateEnroWmtMultiSmall64k, self).generate_samples( + data_dir, tmp_dir, dataset_split): + if random.random() > sample_prob: + continue + counter += 1 + if counter > self.how_many_examples_to_sample: + raise StopIteration + yield x diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_envi.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_envi.py new file mode 100644 index 0000000000000000000000000000000000000000..74a27d19114718882489c39f8940845e3418e169 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_envi.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for En-Vi translation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from TensorFlow.nlp.transformer.data_generators import problem +from TensorFlow.nlp.transformer.data_generators import text_encoder +from TensorFlow.nlp.transformer.data_generators import translate +from TensorFlow.nlp.transformer.utils import registry + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +# For English-Vietnamese the IWSLT'15 corpus +# from https://nlp.stanford.edu/projects/nmt/ is used. +# The original dataset has 133K parallel sentences. +_ENVI_TRAIN_DATASETS = [[ + "https://github.com/stefan-it/nmt-en-vi/raw/master/data/train-en-vi.tgz", # pylint: disable=line-too-long + ("train.en", "train.vi") +]] + +# For development 1,553 parallel sentences are used. +_ENVI_TEST_DATASETS = [[ + "https://github.com/stefan-it/nmt-en-vi/raw/master/data/dev-2012-en-vi.tgz", # pylint: disable=line-too-long + ("tst2012.en", "tst2012.vi") +]] + + +# See this PR on github for some results with Transformer on this Problem. +# https://github.com/tensorflow/tensor2tensor/pull/611 + + +@registry.register_problem +class TranslateEnviIwslt32k(translate.TranslateProblem): + """Problem spec for IWSLT'15 En-Vi translation.""" + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENVI_TRAIN_DATASETS if train else _ENVI_TEST_DATASETS diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enzh.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enzh.py new file mode 100644 index 0000000000000000000000000000000000000000..83aad9773476b1a80e0a1dec4c2d3bc37594364d --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enzh.py @@ -0,0 +1,280 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from TensorFlow.nlp.transformer.data_generators import generator_utils +from TensorFlow.nlp.transformer.data_generators import problem +from TensorFlow.nlp.transformer.data_generators import text_encoder +from TensorFlow.nlp.transformer.data_generators import text_problems +from TensorFlow.nlp.transformer.data_generators import translate +from TensorFlow.nlp.transformer.utils import registry + +import tensorflow.compat.v1 as tf + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +# This is far from being the real WMT18 task - only toyset here +# you need to register to get UN data and CWT data. Also, by convention, +# this is EN to ZH - use translate_enzh_wmt8k_rev for ZH to EN task +# +# News Commentary, around 252k lines +# This dataset is only a small fraction of full WMT18 task +_STAT_MT_URL = "http://data.statmt.org/wmt18/translation-task/" +_NC_TRAIN_DATASETS = [[ + _STAT_MT_URL + "training-parallel-nc-v13.tgz", [ + "training-parallel-nc-v13/news-commentary-v13.zh-en.en", + "training-parallel-nc-v13/news-commentary-v13.zh-en.zh" + ] +]] + +# Test set from News Commentary. 2000 lines +_NC_TEST_DATASETS = [[ + _STAT_MT_URL + "dev.tgz", + ("dev/newsdev2017-enzh-src.en.sgm", "dev/newsdev2017-enzh-ref.zh.sgm") +]] + +# UN parallel corpus. 15,886,041 lines +# Visit source website to download manually: +# https://conferences.unite.un.org/UNCorpus +# +# NOTE: You need to register to download dataset from official source +# place into tmp directory e.g. /tmp/t2t_datagen/dataset.tgz +_UN_TRAIN_DATASETS = [[ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/UNv1.0.en-zh.tar" + ".gz", ["en-zh/UNv1.0.en-zh.en", "en-zh/UNv1.0.en-zh.zh"] +]] + +# CWMT corpus +# Visit source website to download manually: +# http://nlp.nju.edu.cn/cwmt-wmt/ +# +# casia2015: 1,050,000 lines +# casict2015: 2,036,833 lines +# datum2015: 1,000,003 lines +# datum2017: 1,999,968 lines +# NEU2017: 2,000,000 lines +# +# NOTE: You need to register to download dataset from official source +# place into tmp directory e.g. /tmp/t2t_datagen/dataset.tgz + +_CWMT_TRAIN_DATASETS = [[ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/casia2015/casia2015_en.txt", "cwmt/casia2015/casia2015_ch.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/casict2015/casict2015_en.txt", "cwmt/casict2015/casict2015_ch.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/neu2017/NEU_en.txt", "cwmt/neu2017/NEU_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2015/datum_en.txt", "cwmt/datum2015/datum_ch.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book1_en.txt", "cwmt/datum2017/Book1_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book2_en.txt", "cwmt/datum2017/Book2_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book3_en.txt", "cwmt/datum2017/Book3_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book4_en.txt", "cwmt/datum2017/Book4_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book5_en.txt", "cwmt/datum2017/Book5_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book6_en.txt", "cwmt/datum2017/Book6_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book7_en.txt", "cwmt/datum2017/Book7_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book8_en.txt", "cwmt/datum2017/Book8_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book9_en.txt", "cwmt/datum2017/Book9_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book10_en.txt", "cwmt/datum2017/Book10_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book11_en.txt", "cwmt/datum2017/Book11_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book12_en.txt", "cwmt/datum2017/Book12_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book13_en.txt", "cwmt/datum2017/Book13_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book14_en.txt", "cwmt/datum2017/Book14_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book15_en.txt", "cwmt/datum2017/Book15_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book16_en.txt", "cwmt/datum2017/Book16_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book17_en.txt", "cwmt/datum2017/Book17_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book18_en.txt", "cwmt/datum2017/Book18_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book19_en.txt", "cwmt/datum2017/Book19_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book20_en.txt", "cwmt/datum2017/Book20_cn.txt"] +]] + + +def get_filename(dataset): + return dataset[0][0].split("/")[-1] + + +@registry.register_problem +class TranslateEnzhWmt32k(translate.TranslateProblem): + """Problem spec for WMT En-Zh translation. + + Attempts to use full training dataset, which needs website + registration and downloaded manually from official sources: + + CWMT: + - http://nlp.nju.edu.cn/cwmt-wmt/ + - Website contains instructions for FTP server access. + - You'll need to download CASIA, CASICT, DATUM2015, DATUM2017, + NEU datasets + + UN Parallel Corpus: + - https://conferences.unite.un.org/UNCorpus + - You'll need to register your to download the dataset. + + NOTE: place into tmp directory e.g. /tmp/t2t_datagen/dataset.tgz + """ + + @property + def approx_vocab_size(self): + return 2**15 # 32k + + @property + def source_vocab_name(self): + return "%s.en" % self.vocab_filename + + @property + def target_vocab_name(self): + return "%s.zh" % self.vocab_filename + + def get_training_dataset(self, tmp_dir): + """UN Parallel Corpus and CWMT Corpus need to be downloaded manually. + + Append to training dataset if available + + Args: + tmp_dir: path to temporary dir with the data in it. + + Returns: + paths + """ + full_dataset = _NC_TRAIN_DATASETS + for dataset in [_CWMT_TRAIN_DATASETS, _UN_TRAIN_DATASETS]: + filename = get_filename(dataset) + tmp_filepath = os.path.join(tmp_dir, filename) + if tf.gfile.Exists(tmp_filepath): + full_dataset += dataset + else: + tf.logging.info("[TranslateEzhWmt] dataset incomplete, you need to " + "manually download %s" % filename) + return full_dataset + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + train_dataset = self.get_training_dataset(tmp_dir) + datasets = train_dataset if train else _NC_TEST_DATASETS + source_datasets = [[item[0], [item[1][0]]] for item in train_dataset] + target_datasets = [[item[0], [item[1][1]]] for item in train_dataset] + source_vocab = generator_utils.get_or_generate_vocab( + data_dir, + tmp_dir, + self.source_vocab_name, + self.approx_vocab_size, + source_datasets, + file_byte_budget=1e8, + max_subtoken_length=self.max_subtoken_length) + target_vocab = generator_utils.get_or_generate_vocab( + data_dir, + tmp_dir, + self.target_vocab_name, + self.approx_vocab_size, + target_datasets, + file_byte_budget=1e8, + max_subtoken_length=self.max_subtoken_length) + tag = "train" if train else "dev" + filename_base = "wmt_enzh_%sk_tok_%s" % (self.approx_vocab_size, tag) + data_path = translate.compile_data(tmp_dir, datasets, filename_base) + return text_problems.text2text_generate_encoded( + text_problems.text2text_txt_iterator(data_path + ".lang1", + data_path + ".lang2"), + source_vocab, target_vocab) + + def feature_encoders(self, data_dir): + source_vocab_filename = os.path.join(data_dir, self.source_vocab_name) + target_vocab_filename = os.path.join(data_dir, self.target_vocab_name) + source_token = text_encoder.SubwordTextEncoder(source_vocab_filename) + target_token = text_encoder.SubwordTextEncoder(target_vocab_filename) + return { + "inputs": source_token, + "targets": target_token, + } + + +@registry.register_problem +class TranslateEnzhWmt8k(TranslateEnzhWmt32k): + """Problem spec for WMT En-Zh translation. + + This is far from being the real WMT17 task - only toyset here + """ + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + @property + def dataset_splits(self): + return [ + { + "split": problem.DatasetSplit.TRAIN, + "shards": 10, # this is a small dataset + }, + { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + } + ] + + def get_training_dataset(self, tmp_dir): + """Uses only News Commentary Dataset for training.""" + return _NC_TRAIN_DATASETS diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/datagen.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/datagen.py new file mode 100644 index 0000000000000000000000000000000000000000..fbbe57cbdffe2fcef6139e09773d3ed289f013a0 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/datagen.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python3 + +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - added shebang +# - organized imports +# - removed unsupported problem generators +# - renamed from t2t_datagen.py to datagen.py + +"""Produces the training and dev data for --problem into --data_dir. + +Produces sharded and shuffled TFRecord files of tensorflow.Example protocol +buffers for a variety of registered datasets. + +All Problems are registered with @registry.register_problem or are in +_SUPPORTED_PROBLEM_GENERATORS in this file. Each entry maps a string name +(selectable on the command-line with --problem) to a function that takes 2 +arguments - input_directory and mode (one of "train" or "dev") - and yields for +each training example a dictionary mapping string feature names to lists of +{string, int, float}. The generator will be run once for each mode. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import multiprocessing +import os +import random +import tempfile + +import numpy as np + +from TensorFlow.nlp.transformer.utils import problems as problems_lib # pylint: disable=unused-import +from TensorFlow.nlp.transformer.data_generators import generator_utils +from TensorFlow.nlp.transformer.utils import registry +from TensorFlow.nlp.transformer.utils import usr_dir + +# Improrting here to prevent pylint from ungrouped-imports warning. +import tensorflow.compat.v1 as tf # pylint: disable=g-import-not-at-top + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("data_dir", "", "Data directory.") +flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen", + "Temporary storage directory.") +flags.DEFINE_string("problem", "", + "The name of the problem to generate data for.") +flags.DEFINE_string("exclude_problems", "", + "Comma-separates list of problems to exclude.") +flags.DEFINE_integer( + "num_shards", 0, "How many shards to use. Ignored for " + "registered Problems.") +flags.DEFINE_integer("max_cases", 0, + "Maximum number of cases to generate (unbounded if 0).") +flags.DEFINE_integer( + "env_problem_max_env_steps", 0, + "Maximum number of steps to take for environment-based problems. " + "Actions are chosen randomly") +flags.DEFINE_integer( + "env_problem_batch_size", 0, + "Number of environments to simulate for environment-based problems.") +flags.DEFINE_bool("only_list", False, + "If true, we only list the problems that will be generated.") +flags.DEFINE_integer("random_seed", 429459, "Random seed to use.") +flags.DEFINE_integer("task_id", -1, "For distributed data generation.") +flags.DEFINE_integer("task_id_start", -1, "For distributed data generation.") +flags.DEFINE_integer("task_id_end", -1, "For distributed data generation.") +flags.DEFINE_integer( + "num_concurrent_processes", None, + "Applies only to problems for which multiprocess_generate=True.") +flags.DEFINE_string( + "t2t_usr_dir", "", "Path to a Python module that will be imported. The " + "__init__.py file should include the necessary imports. " + "The imported files should contain registrations, " + "e.g. @registry.register_problem calls, that will then be " + "available to t2t-datagen.") +flags.DEFINE_bool("with_padding", False, "If true dataset features will be padded") + +# Mapping from problems that we can generate data for to their generators. +_SUPPORTED_PROBLEM_GENERATORS = {} + + +def set_random_seed(): + """Set the random seed from flag everywhere.""" + tf.set_random_seed(FLAGS.random_seed) + random.seed(FLAGS.random_seed) + np.random.seed(FLAGS.random_seed) + + +def main(_): + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + # Calculate the list of problems to generate. + problems = sorted( + list(_SUPPORTED_PROBLEM_GENERATORS) + registry.list_base_problems() + + registry.list_env_problems()) + for exclude in FLAGS.exclude_problems.split(","): + if exclude: + problems = [p for p in problems if exclude not in p] + if FLAGS.problem and FLAGS.problem[-1] == "*": + problems = [p for p in problems if p.startswith(FLAGS.problem[:-1])] + elif FLAGS.problem and "," in FLAGS.problem: + problems = [p for p in problems if p in FLAGS.problem.split(",")] + elif FLAGS.problem: + problems = [p for p in problems if p == FLAGS.problem] + else: + problems = [] + + # Remove TIMIT if paths are not given. + if getattr(FLAGS, "timit_paths", None): + problems = [p for p in problems if "timit" not in p] + # Remove parsing if paths are not given. + if getattr(FLAGS, "parsing_path", None): + problems = [p for p in problems if "parsing_english_ptb" not in p] + + if not problems: + problems_str = "\n * ".join( + sorted( + list(_SUPPORTED_PROBLEM_GENERATORS) + + registry.list_base_problems() + registry.list_env_problems())) + error_msg = ("You must specify one of the supported problems to " + "generate data for:\n * " + problems_str + "\n") + error_msg += ("TIMIT and parsing need data_sets specified with " + "--timit_paths and --parsing_path.") + raise ValueError(error_msg) + + if not FLAGS.data_dir: + FLAGS.data_dir = tempfile.gettempdir() + tf.logging.warning( + "It is strongly recommended to specify --data_dir. " + "Data will be written to default data_dir=%s.", FLAGS.data_dir) + FLAGS.data_dir = os.path.expanduser(FLAGS.data_dir) + tf.gfile.MakeDirs(FLAGS.data_dir) + + tf.logging.info("Generating problems:\n%s" % + registry.display_list_by_prefix(problems, starting_spaces=4)) + if FLAGS.only_list: + return + for problem in problems: + set_random_seed() + + if problem in _SUPPORTED_PROBLEM_GENERATORS: + generate_data_for_problem(problem) + elif problem in registry.list_base_problems(): + generate_data_for_registered_problem(problem) + elif problem in registry.list_env_problems(): + generate_data_for_env_problem(problem) + else: + tf.logging.error("Problem %s is not a supported problem for datagen.", + problem) + + +def generate_data_for_problem(problem): + """Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS.""" + training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem] + + num_train_shards = FLAGS.num_shards or 10 + tf.logging.info("Generating training data for %s.", problem) + train_output_files = generator_utils.train_data_filenames( + problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, + num_train_shards) + generator_utils.generate_files(training_gen(), train_output_files, + FLAGS.max_cases) + num_dev_shards = int(num_train_shards * 0.1) + tf.logging.info("Generating development data for %s.", problem) + dev_output_files = generator_utils.dev_data_filenames( + problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, + num_dev_shards) + generator_utils.generate_files(dev_gen(), dev_output_files) + num_test_shards = int(num_train_shards * 0.1) + test_output_files = [] + test_gen_data = test_gen() + if test_gen_data is not None: + tf.logging.info("Generating test data for %s.", problem) + test_output_files = generator_utils.test_data_filenames( + problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, + num_test_shards) + generator_utils.generate_files(test_gen_data, test_output_files) + all_output_files = train_output_files + dev_output_files + test_output_files + generator_utils.shuffle_dataset(all_output_files) + + +def generate_data_in_process(arg): + problem_name, data_dir, tmp_dir, task_id = arg + problem = registry.problem(problem_name) + problem.generate_data(data_dir, tmp_dir, task_id) + + +def generate_data_for_env_problem(problem_name): + """Generate data for `EnvProblem`s.""" + assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps " + "should be greater than zero") + assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be" + " greather than zero") + problem = registry.env_problem(problem_name) + task_id = None if FLAGS.task_id < 0 else FLAGS.task_id + data_dir = os.path.expanduser(FLAGS.data_dir) + tmp_dir = os.path.expanduser(FLAGS.tmp_dir) + # TODO(msaffar): Handle large values for env_problem_batch_size where we + # cannot create that many environments within the same process. + problem.initialize(batch_size=FLAGS.env_problem_batch_size) + env_problem_utils.play_env_problem_randomly( + problem, num_steps=FLAGS.env_problem_max_env_steps) + problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id) + + +def generate_data_for_registered_problem(problem_name): + """Generate data for a registered problem.""" + tf.logging.info("Generating data for %s.", problem_name) + if FLAGS.num_shards: + raise ValueError("--num_shards should not be set for registered Problem.") + problem = registry.problem(problem_name) + task_id = None if FLAGS.task_id < 0 else FLAGS.task_id + data_dir = os.path.expanduser(FLAGS.data_dir) + tmp_dir = os.path.expanduser(FLAGS.tmp_dir) + if task_id is None and problem.multiprocess_generate: + if FLAGS.task_id_start != -1: + assert FLAGS.task_id_end != -1 + task_id_start = FLAGS.task_id_start + task_id_end = FLAGS.task_id_end + else: + task_id_start = 0 + task_id_end = problem.num_generate_tasks + pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes) + problem.prepare_to_generate(data_dir, tmp_dir) + args = [(problem_name, data_dir, tmp_dir, task_id) + for task_id in range(task_id_start, task_id_end)] + pool.map(generate_data_in_process, args) + else: + problem.generate_data(data_dir, tmp_dir, task_id) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/decoder.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5ae5eb702e5e4df46b14de004004c115cfff265a --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/decoder.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python3 + +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021-2022 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - renamed from t2t_decoder.py to decoder.py +# - added shebang +# - organized imports +# - added support for HPU +# - renamed t2t_trainer to trainer +# - added use_hpu hparam +# - added workarounds to run on HPU +# - added support for recipe cache +# - added support for fast inference +# - added support for horovod + +r"""Decode from trained T2T models. + +This binary performs inference using the Estimator API. + +Example usage to decode from dataset: + + ./decoder.py \ + --data_dir ~/data \ + --problem=algorithmic_identity_binary40 \ + --model=transformer + --hparams_set=transformer_base + +Set FLAGS.decode_interactive or FLAGS.decode_from_file for alternative decode +sources. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil +from TensorFlow.nlp.transformer import trainer +from TensorFlow.nlp.transformer.data_generators import problem # pylint: disable=unused-import +from TensorFlow.nlp.transformer.data_generators import text_encoder +from TensorFlow.nlp.transformer.utils import decoding +from TensorFlow.nlp.transformer.utils import registry +from TensorFlow.nlp.transformer.utils import trainer_lib +from TensorFlow.nlp.transformer.utils import usr_dir + +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +# Additional flags in trainer.py and utils/flags.py +flags.DEFINE_string("checkpoint_path", None, + "Path to the model checkpoint. Overrides output_dir.") +flags.DEFINE_bool("keep_timestamp", False, + "Set the mtime of the decoded file to the " + "checkpoint_path+'.index' mtime.") +flags.DEFINE_bool("decode_interactive", False, + "Interactive local inference mode.") +flags.DEFINE_integer("decode_shards", 1, "Number of decoding replicas.") +flags.DEFINE_string("score_file", "", "File to score. Each line in the file " + "must be in the format input \t target.") +flags.DEFINE_bool("decode_in_memory", False, "Decode in memory.") +flags.DEFINE_bool("disable_grappler_optimizations", False, + "Disable Grappler if need be to avoid tensor format errors.") +flags.DEFINE_bool("use_fast_inference", True, "Use fast inference with static shapes") + +def create_hparams(): + hparams_path = None + if FLAGS.output_dir: + hparams_path = os.path.join(FLAGS.output_dir, "hparams.json") + return trainer_lib.create_hparams( + FLAGS.hparams_set, + FLAGS.hparams, + data_dir=os.path.expanduser(FLAGS.data_dir), + problem_name=FLAGS.problem, + hparams_path=hparams_path) + + +def create_decode_hparams(): + decode_hp = decoding.decode_hparams(FLAGS.decode_hparams) + decode_hp.shards = FLAGS.decode_shards + decode_hp.shard_id = FLAGS.worker_id + decode_in_memory = FLAGS.decode_in_memory or decode_hp.decode_in_memory + decode_hp.decode_in_memory = decode_in_memory + decode_hp.decode_to_file = FLAGS.decode_to_file + decode_hp.decode_reference = FLAGS.decode_reference + return decode_hp + + +def decode(estimator, hparams, decode_hp): + """Decode from estimator. Interactive, from file, or from dataset.""" + if FLAGS.decode_interactive: + if estimator.config.use_tpu: + raise ValueError("TPU can only decode from dataset.") + decoding.decode_interactively(estimator, hparams, decode_hp, + checkpoint_path=FLAGS.checkpoint_path) + elif FLAGS.decode_from_file: + decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams, + decode_hp, FLAGS.decode_to_file, + checkpoint_path=FLAGS.checkpoint_path) + if FLAGS.checkpoint_path and FLAGS.keep_timestamp: + ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index") + os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time)) + else: + decoding.decode_from_dataset( + estimator, + FLAGS.problem, + hparams, + decode_hp, + decode_to_file=FLAGS.decode_to_file, + dataset_split="test" if FLAGS.eval_use_test_set else None, + checkpoint_path=FLAGS.checkpoint_path) + + +def score_file(filename): + """Score each line in a file and return the scores.""" + # Prepare model. + hparams = create_hparams() + encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir) + has_inputs = "inputs" in encoders + + # Prepare features for feeding into the model. + if has_inputs: + inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. + batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. + targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. + batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. + if has_inputs: + features = {"inputs": batch_inputs, "targets": batch_targets} + else: + features = {"targets": batch_targets} + + # Prepare the model and the graph when model runs on features. + model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL) + _, losses = model(features) + saver = tf.train.Saver() + + with tf.Session() as sess: + # Load weights from checkpoint. + if FLAGS.checkpoint_path is None: + ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir) + ckpt = ckpts.model_checkpoint_path + else: + ckpt = FLAGS.checkpoint_path + saver.restore(sess, ckpt) + # Run on each line. + with tf.gfile.Open(filename) as f: + lines = f.readlines() + results = [] + for line in lines: + tab_split = line.split("\t") + if len(tab_split) > 2: + raise ValueError("Each line must have at most one tab separator.") + if len(tab_split) == 1: + targets = tab_split[0].strip() + else: + targets = tab_split[1].strip() + inputs = tab_split[0].strip() + # Run encoders and append EOS symbol. + targets_numpy = encoders["targets"].encode( + targets) + [text_encoder.EOS_ID] + if has_inputs: + inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID] + # Prepare the feed. + if has_inputs: + feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} + else: + feed = {targets_ph: targets_numpy} + # Get the score. + np_loss = sess.run(losses["training"], feed) + results.append(np_loss) + return results + +def get_workaround_flag(name): + return f'WA_{name}' + +def is_workaround_enabled(name): + flag = get_workaround_flag(name) + is_enabled = os.environ.get(flag, 'true') == 'true' + if is_enabled: + print(f"Warning! Workaround {flag} is enabled. Run with {flag}=false to disable it.") + return is_enabled + +def main(_): + tf.disable_v2_behavior() + tf.enable_resource_variables() + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + if FLAGS.use_hpu: + from habana_frameworks.tensorflow import load_habana_module + load_habana_module() + + hvd = trainer.init_multinode() + + if FLAGS.use_hpu: + if FLAGS.recipe_cache: + trainer.prepare_recipe_cache() + if FLAGS.use_bf16: + if not is_workaround_enabled('FORCE_FP32'): + os.environ['TF_BF16_CONVERSION'] = FLAGS.bf16_config_path + else: + print("Warning! BF16 precision is not supported in inference mode. Switching back to fp32...") + if is_workaround_enabled('DISABLE_DYNAMIC_SHAPES'): + os.environ['TF_ENABLE_DYNAMIC_SHAPES'] = 'false' + + if FLAGS.score_file: + filename = os.path.expanduser(FLAGS.score_file) + if not tf.gfile.Exists(filename): + raise ValueError("The file to score doesn't exist: %s" % filename) + results = score_file(filename) + if not FLAGS.decode_to_file: + raise ValueError("To score a file, specify --decode_to_file for results.") + write_file = tf.gfile.Open(os.path.expanduser(FLAGS.decode_to_file), "w") + for score in results: + write_file.write("%.6f\n" % score) + write_file.close() + return + + hp = create_hparams() + hp.add_hparam("use_hpu", FLAGS.use_hpu) + hp.add_hparam("use_horovod", FLAGS.use_horovod) + decode_hp = create_decode_hparams() + decode_hp.add_hparam("use_horovod", hp.use_horovod) + + if FLAGS.use_horovod: + hp.add_hparam("hvd_worker_id", hvd.rank()) + hp.add_hparam("hvd_size", hvd.size()) + decode_hp.add_hparam("hvd_worker_id", hp.hvd_worker_id) + decode_hp.add_hparam("hvd_size", hp.hvd_size) + run_config = trainer.create_run_config(hp) + if FLAGS.disable_grappler_optimizations: + run_config.session_config.graph_options.rewrite_options.disable_meta_optimizer = True + + assert FLAGS.use_fast_inference or not FLAGS.use_horovod, "Multinode inference is only supported with use_fast_inference=True" + + # summary-hook in tf.estimator.EstimatorSpec requires + # hparams.model_dir to be set. + hp.add_hparam("model_dir", run_config.model_dir) + + estimator = trainer_lib.create_estimator( + FLAGS.model, + hp, + run_config, + decode_hparams=decode_hp, + use_tpu=FLAGS.use_tpu) + + decode(estimator, hp, decode_hp) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/__init__.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b0a2368a3e35eb4db097267915c30a0dc569953 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/area_attention.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/area_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f4b9089a0ac7f3673c9a2ba5545002c4ca5540 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/area_attention.py @@ -0,0 +1,433 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for area attention.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from six.moves import range # pylint: disable=redefined-builtin +from TensorFlow.nlp.transformer.layers import common_layers +import tensorflow.compat.v1 as tf + + +def lengths_to_area_mask(feature_length, length, max_area_size): + """Generates a non-padding mask for areas based on lengths. + + Args: + feature_length: a tensor of [batch_size] + length: the length of the batch + max_area_size: the maximum area size considered + Returns: + mask: a tensor in shape of [batch_size, num_areas] + """ + + paddings = tf.cast(tf.expand_dims( + tf.logical_not( + tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32) + _, _, area_sum, _, _ = compute_area_features(paddings, + max_area_width=max_area_size) + mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2]) + return mask + + +def _pool_one_shape(features_2d, area_width, area_height, batch_size, + width, height, depth, fn=tf.reduce_max, name=None): + """Pools for an area in features_2d. + + Args: + features_2d: a Tensor in a shape of [batch_size, height, width, depth]. + area_width: the max width allowed for an area. + area_height: the max height allowed for an area. + batch_size: the batch size. + width: the width of the memory. + height: the height of the memory. + depth: the depth of the features. + fn: the TF function for the pooling. + name: the op name. + Returns: + pool_tensor: A Tensor of shape [batch_size, num_areas, depth] + """ + with tf.name_scope(name, default_name="pool_one_shape"): + images = [] + for y_shift in range(area_height): + image_height = tf.maximum(height - area_height + 1 + y_shift, 0) + for x_shift in range(area_width): + image_width = tf.maximum(width - area_width + 1 + x_shift, 0) + area = features_2d[:, y_shift:image_height, x_shift:image_width, :] + flatten_area = tf.reshape(area, [batch_size, -1, depth, 1]) + images.append(flatten_area) + image_tensor = tf.concat(images, axis=3) + max_tensor = fn(image_tensor, axis=3) + return max_tensor + + +def basic_pool(features, max_area_width, max_area_height=1, height=1, + fn=tf.reduce_max, name=None): + """Pools for each area based on a given pooling function (fn). + + Args: + features: a Tensor in a shape of [batch_size, height * width, depth]. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + height: the height of the image. + fn: the TF function for the pooling. + name: the namescope. + Returns: + pool_results: A Tensor of shape [batch_size, num_areas, depth] + area_heights: A Tensor of shape [batch_size, num_areas, 1] + area_widths: A Tensor of shape [batch_size, num_areas, 1] + """ + with tf.name_scope(name, default_name="basic_pool"): + feature_shape = common_layers.shape_list(features) + batch_size = feature_shape[0] + length = feature_shape[-2] + depth = feature_shape[-1] + width = length // height + features_2d = tf.reshape(features, [batch_size, height, width, depth]) + height_list = [] + width_list = [] + pool_list = [] + size_tensor = tf.ones_like(features_2d[:, :, :, 0], dtype=tf.int32) + for area_height in range(max_area_height): + for area_width in range(max_area_width): + pool_tensor = _pool_one_shape(features_2d, + area_width=area_width + 1, + area_height=area_height + 1, + batch_size=batch_size, + width=width, + height=height, + depth=depth, + fn=fn) + pool_list.append( + tf.reshape(pool_tensor, [batch_size, -1, depth])) + height_list.append( + tf.reshape( + size_tensor[:, area_height:, area_width:] *\ + (area_height + 1), [batch_size, -1])) + width_list.append( + tf.reshape( + size_tensor[:, area_height:, area_width:] *\ + (area_width + 1), [batch_size, -1])) + pool_results = tf.concat(pool_list, axis=1) + area_heights = tf.expand_dims(tf.concat(height_list, axis=1), 2) + area_widths = tf.expand_dims(tf.concat(width_list, axis=1), 2) + return pool_results, area_heights, area_widths + + +def _compute_sum_image(features, max_area_width, max_area_height=1, height=1, + name=None): + """Computes area sums for features. + + Args: + features: a Tensor in a shape of [batch_size, height * width, depth]. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + height: the height of the image. + name: the namescope. + Returns: + sum_image: A Tensor of shape [batch_size, num_areas, depth] + area_heights: A Tensor of shape [batch_size, num_areas, 1] + area_widths: A Tensor of shape [batch_size, num_areas, 1] + """ + with tf.name_scope(name, default_name="compute_sum_image"): + feature_shape = common_layers.shape_list(features) + batch_size = feature_shape[0] + length = feature_shape[-2] + depth = feature_shape[-1] + width = length // height + features_2d = tf.reshape(features, [batch_size, height, width, depth]) + width_cum = tf.cumsum(features_2d, axis=-2, name="compute_integral_h") + integral_image = tf.cumsum(width_cum, axis=-3, name="compute_integral_v") + padded_image = tf.pad( + integral_image, [[0, 0], [1, 0], [1, 0], [0, 0]], constant_values=0) + height_list = [] + width_list = [] + dst_images = [] + src_images_diag = [] + src_images_h = [] + src_images_v = [] + size_tensor = tf.ones_like(padded_image[:, :, :, 0], + dtype=tf.int32) + for area_height in range(max_area_height): + for area_width in range(max_area_width): + dst_images.append( + tf.reshape( + padded_image[:, area_height + 1:, area_width + 1:, :], + [batch_size, -1, depth])) + src_images_diag.append( + tf.reshape( + padded_image[:, :-area_height - 1, :-area_width - 1, :], + [batch_size, -1, depth])) + src_images_h.append( + tf.reshape( + padded_image[:, area_height + 1:, :-area_width - 1, :], + [batch_size, -1, depth])) + src_images_v.append( + tf.reshape( + padded_image[:, :-area_height - 1, area_width + 1:, :], + [batch_size, -1, depth])) + height_list.append( + tf.reshape( + size_tensor[:, area_height + 1:, area_width + 1:] *\ + (area_height + 1), [batch_size, -1])) + width_list.append( + tf.reshape( + size_tensor[:, area_height + 1:, area_width + 1:] *\ + (area_width + 1), [batch_size, -1])) + sum_image = tf.subtract( + tf.concat(dst_images, axis=1) + tf.concat(src_images_diag, axis=1), + tf.concat(src_images_v, axis=1) + tf.concat(src_images_h, axis=1)) + area_heights = tf.expand_dims(tf.concat(height_list, axis=1), 2) + area_widths = tf.expand_dims(tf.concat(width_list, axis=1), 2) + return sum_image, area_heights, area_widths + + +def compute_area_features(features, max_area_width, max_area_height=1, height=1, + epsilon=1e-6): + """Computes features for each area. + + Args: + features: a Tensor in a shape of [batch_size, height * width, depth]. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + height: the height of the image. + epsilon: the epsilon added to the variance for computing standard deviation. + Returns: + area_mean: A Tensor of shape [batch_size, num_areas, depth] + area_std: A Tensor of shape [batch_size, num_areas, depth] + area_sum: A Tensor of shape [batch_size, num_areas, depth] + area_heights: A Tensor of shape [batch_size, num_areas, 1] + area_widths: A Tensor of shape [batch_size, num_areas, 1] + """ + with tf.name_scope("compute_area_features"): + tf.logging.info("area_attention compute_area_features: %d x %d", + max_area_height, max_area_width) + area_sum, area_heights, area_widths = _compute_sum_image( + features, max_area_width=max_area_width, + max_area_height=max_area_height, height=height) + area_squared_sum, _, _ = _compute_sum_image( + tf.pow(features, 2), max_area_width=max_area_width, + max_area_height=max_area_height, height=height) + sizes = tf.multiply(area_heights, area_widths) + float_area_sizes = tf.cast(sizes, tf.float32) + area_mean = tf.div(area_sum, float_area_sizes) + s2_n = tf.div(area_squared_sum, float_area_sizes) + area_variance = tf.subtract(s2_n, tf.pow(area_mean, 2)) + area_std = tf.sqrt(tf.abs(area_variance) + epsilon) + return area_mean, area_std, area_sum, area_heights, area_widths + + +def compute_area_key(features, max_area_width, max_area_height=1, height=1, + mode="mean", training=True, name=None): + """Computes the key for each area. + + Args: + features: a Tensor in a shape of [batch_size, height * width, depth]. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + height: the height of the image. + mode: whether to combine different area features or only use + the vector mean of each area, which can be "mean", "concat", "sum", + "sample_concat", and "sample_sum". + training: indicating if it is in the training mode. + name: the name for setting the variable scope. + Returns: + area_key: a Tensor in the shape of [batch_size, num_areas, depth] + """ + + tf.logging.info("area_attention mode=%s", mode) + area_mean, area_std, _, area_heights, area_widths =\ + compute_area_features(features, max_area_width=max_area_width, + max_area_height=max_area_height, height=height) + if mode == "mean": + return area_mean + elif mode == "max": + area_max, _, _ = basic_pool(features, max_area_width=max_area_width, + max_area_height=max_area_height, height=height) + return area_max + elif mode == "sample": + if training: + area_mean += (area_std * tf.random_normal(tf.shape(area_std))) + return area_mean + with tf.variable_scope( + name, default_name="combine_area_features", + values=[area_mean, area_std, area_heights, area_widths]): + depth = common_layers.shape_list(area_mean)[-1] + height_embed = tf.nn.embedding_lookup( + params=tf.get_variable("area_height_emb", + [max_area_height, depth // 2]), + ids=area_heights[:, :, 0] - 1) + width_embed = tf.nn.embedding_lookup( + params=tf.get_variable("area_width_emb", + [max_area_width, depth // 2]), + ids=area_widths[:, :, 0] - 1) + size_embed = tf.concat([height_embed, width_embed], -1) + if mode == "concat": + feature_concat = tf.concat([area_mean, area_std, size_embed], -1) + elif mode == "max_concat": + area_max, _, _ = basic_pool(features, max_area_width=max_area_width, + max_area_height=max_area_height, + height=height) + feature_concat = tf.concat([area_max, size_embed], -1) + elif mode == "sum": + feature_concat = size_embed + area_mean + area_std + elif mode == "sample_concat": + if training: + area_mean += (area_std * tf.random_normal(tf.shape(area_std))) + feature_concat = tf.concat([area_mean, size_embed], -1) + elif mode == "sample_sum": + if training: + area_mean += (area_std * tf.random_normal(tf.shape(area_std))) + feature_concat = area_mean + size_embed + else: + raise ValueError("Unsupported area key mode=%s" % mode) + feature_hidden = tf.layers.dense(inputs=feature_concat, + units=depth, + activation=tf.nn.relu) + area_key = tf.layers.dense(feature_hidden, units=depth) + return area_key + + +def dot_product_area_attention(q, + k, + v, + bias, + dropout_rate=0.0, + image_shapes=None, + name=None, + attention_image_summary=None, + save_weights_to=None, + dropout_broadcast_dims=None, + max_area_width=1, + max_area_height=1, + memory_height=1, + area_key_mode="mean", + area_value_mode="sum", + top_k_areas=0, + area_temperature=1.0, + training=True): + """Dot-product area attention. + + Args: + q: Tensor with shape [..., length_q, depth_k]. + k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must + match with q. + v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must + match with q. + bias: bias Tensor (see attention_bias()) + dropout_rate: a float. + image_shapes: optional tuple of integer scalars. + see comments for attention_image_summary() + name: an optional string + attention_image_summary: the callback for making image summary of attention. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + dropout_broadcast_dims: an optional list of integers less than rank of q. + Specifies in which dimensions to broadcast the dropout decisions. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + memory_height: the height of the memory. + area_key_mode: the mode for computing area keys, which can be "mean", + "concat", "sum", "sample_concat", and "sample_sum". + area_value_mode: the mode for computing area values, which can be either + "mean", or "sum". + top_k_areas: Use the top key areas for attention. + area_temperature: the temperature for attention softmax. + training: indicating if it is in the training mode. + Returns: + Tensor with shape [..., length_q, depth_v]. + """ + + tf.logging.info("dot_product_area_attention: " + "area_h=%d, area_w=%d, mem_h=%d, " + "area_key_mode=%s, area_value_mode=%s, " + "area_temperature=%f", + max_area_height, max_area_width, memory_height, + area_key_mode, area_value_mode, + area_temperature) + with tf.variable_scope( + name, default_name="dot_product_area_attention", + values=[q, k, v]) as scope: + mem_shape = common_layers.shape_list(k) + batch_size = mem_shape[0] + head_size = mem_shape[1] + length = mem_shape[2] + depth = mem_shape[3] + k_area = compute_area_key( + tf.reshape(k, [-1, length, depth]), + max_area_width=max_area_width, + max_area_height=max_area_height, + height=memory_height, + mode=area_key_mode, + training=training) + if area_value_mode == "mean": + v_area, _, _, _, _ = compute_area_features( + tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width, + max_area_height=max_area_height, height=memory_height) + elif area_value_mode == "max": + v_area, _, _ = basic_pool(tf.reshape(v, [-1, length, depth]), + max_area_width=max_area_width, + max_area_height=max_area_height, + height=memory_height, + fn=tf.reduce_max) + elif area_value_mode == "sum": + _, _, v_area, _, _ = compute_area_features( + tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width, + max_area_height=max_area_height, height=memory_height) + else: + raise ValueError("Unsupported area value mode=%s" % area_value_mode) + k = tf.reshape(k_area, [batch_size, head_size, -1, depth]) + v = tf.reshape(v_area, [batch_size, head_size, -1, depth]) + logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv] + if bias is not None: + bias = common_layers.cast_like(bias, logits) + with tf.name_scope("compute_area_att_bias", values=[bias]): + bias_shape = common_layers.shape_list(bias) + mem_length = bias_shape[-1] + bias_values = tf.reshape( + tf.cast(tf.less(bias, -1), tf.float32), [-1, mem_length, 1]) + _, _, padding_sum, _, _ = compute_area_features( + bias_values, max_area_width=max_area_width, + max_area_height=max_area_height, height=memory_height) + bias = tf.where( + tf.cast(tf.to_int32(padding_sum), tf.bool), + tf.fill(tf.shape(padding_sum), -np.inf), + tf.zeros_like(padding_sum, dtype=tf.float32)) + bias = tf.reshape(bias, + [bias_shape[0], bias_shape[1], + bias_shape[2], -1]) + logits += bias + logits = logits / area_temperature + weights = tf.nn.softmax(logits, name="attention_weights") + if top_k_areas > 0: + tf.logging.info("area_attention top_k_areas=%d", top_k_areas) + top_k = tf.minimum(common_layers.shape_list(weights)[-1], top_k_areas) + top_weights, _ = tf.nn.top_k(weights, k=top_k) + min_values = tf.reduce_min(top_weights, -1, keepdims=True) + weights = tf.where(tf.greater_equal(weights, min_values), + weights, tf.zeros_like(weights)) + weights = tf.div(weights, tf.reduce_sum(weights, -1, keepdims=True)) + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + # Drop out attention links for each head. + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and attention_image_summary: + attention_image_summary(weights, image_shapes) + return tf.matmul(weights, v) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_attention.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..a75d94082c5897283e412576e2b9c402889c149d --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_attention.py @@ -0,0 +1,6233 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2022 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - changed tf.python.ops.alias_inplace_update to tf.add + tf.scatter_nd + +"""Utilities for attention.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import functools +import itertools +import math +import operator + +import numpy as np + +from six.moves import range # pylint: disable=redefined-builtin +from six.moves import zip # pylint: disable=redefined-builtin + +from TensorFlow.nlp.transformer.layers import area_attention +from TensorFlow.nlp.transformer.layers import common_layers +from TensorFlow.nlp.transformer.utils import contrib +from TensorFlow.nlp.transformer.utils import expert_utils + +import tensorflow.compat.v1 as tf +import tensorflow_probability as tfp + +# pylint: disable=g-direct-tensorflow-import +from tensorflow.python.framework import function +from tensorflow.python.ops import inplace_ops +# pylint: enable=g-direct-tensorflow-import + + +# TODO(lukaszkaiser): remove this function when not needed any more. +def layers(): + return common_layers.layers() + +def to_float(x): + """Cast x to float; created because tf.to_float is deprecated.""" + return tf.cast(x, tf.float32) + +def large_compatible_negative(tensor_type): + """Large negative number as Tensor. + + This function is necessary because the standard value for epsilon + in this module (-1e9) cannot be represented using tf.float16 + + Args: + tensor_type: a dtype to determine the type. + + Returns: + a large negative number. + """ + if tensor_type == tf.float16: + return tf.float16.min + return -1e9 + + +def mixed_precision_is_enabled( + activation_dtype=None, weight_dtype=None, hparams=None): + assert not (hparams and (activation_dtype or weight_dtype)), ( + "Provide only hparams or activation_dtype and weight_dtype") + if (hparams and hasattr(hparams, "activation_dtype") and + hasattr(hparams, "weight_dtype")): + activation_dtype = hparams.activation_dtype + weight_dtype = hparams.weight_dtype + return activation_dtype == tf.float16 and weight_dtype == tf.float32 + + +def maybe_upcast(logits, + activation_dtype=None, weight_dtype=None, hparams=None): + if mixed_precision_is_enabled(activation_dtype, weight_dtype, hparams): + return tf.cast(logits, tf.float32) + return logits + + +# Struct containing the sequences ids and order on a batch (are send to the +# expert to allow them to compute the bias mask) +BatchInfo = collections.namedtuple("BatchInfo", "coordinates, order") + +_expert_count = 0 + + +def get_standardized_layers(hparams, dp=None): + """Get the common attention and feed-forward layers. + + The returned layer functions will have the following signature: + + y, extra_loss = fct(x) + + extra_loss is set to 0.0 if the layer doesn't have extra loss. + If dp is provided, the layers will be distributed within the devices. + If moe wants to be used, both dp and model need to be set. + + Args: + hparams (tf.HParams): the model hparameters + dp (expert_utils.Parallelism): A data parallelism object. If not given, + the dp calls are simply ignored. + + Returns: + dict[str:fct]: A dictionary containing the standardized functions + """ + + def partial(fct, *args, **kwargs): + """Same as functools.partial but with functools.wraps.""" + return functools.wraps(fct)(functools.partial(fct, *args, **kwargs)) + + def register_layer( + fct_in, + default_args=None, + default_kwargs=None, + use_dp=True, + recompute_grad=False, + ): + """Turn a function into its standardized version. + + Args: + fct_in (fct): The function to register + default_args (list): The default parameters to add to the function. + default_kwargs (dict): The default parameters to add to the function. + Those arguments can be overwritten when calling the function. + use_dp (bool): Wrap the function call within a dataparallelism object if + dp is available. Some layers (like MOE) must be called without dp. + recompute_grad (bool): If True, recompute the function during the + backward pass to save memory + + Returns: + fct: the standardized layer function. + """ + # The kwargs given when calling the function overwrite the default ones + fct_in = partial(fct_in, *(default_args or []), **(default_kwargs or {})) + + @functools.wraps(fct_in) + def decorator(x, *args, **kwargs): + """Call the layer function.""" + fct = fct_in # For closure. Could use nonlocal with Python 3 + # Eventually create the memory optimized version of the function + if recompute_grad: + fct = partial(fct, **kwargs) # recompute_grad only accept args + fct = common_layers.recompute_grad(fct) + kwargs = {} + + # Eventually use dp (if given and not MoE) + if use_dp and dp is not None: + y = dp(fct, x, *args, **kwargs) + else: + y = fct(x, *args, **kwargs) + + # Eventually capture the extra loss + extra_loss = 0.0 + if isinstance(y, tuple): + y, extra_loss = y + + return y, extra_loss + + return decorator + + total_key_depth = hparams.attention_key_channels or hparams.hidden_size + total_value_depth = hparams.attention_value_channels or hparams.hidden_size + + # Attention layers: + + # === Multi-head full attention layer === + multihead_attention_fn = register_layer( + multihead_attention, + default_kwargs=dict( + memory_antecedent=None, # Self-attention by default + bias=None, + total_key_depth=total_key_depth, + total_value_depth=total_value_depth, + output_depth=hparams.hidden_size, + num_heads=hparams.num_heads, + dropout_rate=hparams.attention_dropout, + )) + + # === Memory efficient full-attention layer === + # Save memory by not storing the activations and + # recomputing them during the backward pass + memeff_attention_base_fn = register_layer( + multihead_attention, + default_kwargs=dict( + total_key_depth=total_key_depth, + total_value_depth=total_value_depth, + output_depth=hparams.hidden_size, + num_heads=hparams.num_heads, + dropout_rate=hparams.attention_dropout, + ), + recompute_grad=True, + ) + + def memeff_attention_fn(*args, **kwargs): + """Modify args/kwargs for compatibility with recompute_grad.""" + kwargs = kwargs.copy() + assert len(args) == 1 + x = args[0] + memory_antecedent = kwargs.pop("memory_antecedent", x) # Same as x if None + if kwargs.get("bias", None) is not None: # Case where bias has been set + args = (x, memory_antecedent, kwargs.pop("bias")) + else: + # Otherwise, only 2 args. This is necessary as recompute_grad does not + # support None values. + args = (x, memory_antecedent) + return memeff_attention_base_fn(*args, **kwargs) + + # === Local attention (unmasked) layer === + # Reuse same parameters as multihead_attention + # Don't mask the future + local_attention_fn = partial( + multihead_attention_fn, + block_length=hparams.attention_loc_block_length, + block_width=hparams.attention_loc_block_width, + attention_type="local_unmasked", + ) + + # === Local attention (masked) layer === + # Reuse same parameters as multihead_attention + # Only works for self attention. Always mask the future. + local_attention_masked_fn = partial( + multihead_attention_fn, + block_length=hparams.attention_loc_block_length, + attention_type="local_mask_right", + ) + + # === Masked memory-compressed multihead self attention layer === + # Only works for self attention. Always mask the future. + compressed_attention_masked_fn = register_layer( + multihead_self_attention_reduced, + default_kwargs=dict( + factor=hparams.attention_red_factor, + nonlinearity=hparams.attention_red_nonlinearity, + reduction_type=hparams.attention_red_type, + multihead_params=dict( + total_key_depth=total_key_depth, + total_value_depth=total_value_depth, + num_heads=hparams.num_heads, + dropout_rate=hparams.attention_dropout, + ), + ), + ) + + # === Unmasked memory-compressed multihead self attention layer === + # Only works for self attention. Never mask the future. Bias never added + compressed_attention_fn = partial( + compressed_attention_masked_fn, + add_mask=False, + ) + + # Feed-forwards layers: + + # === FC layer === + conv_hidden_relu = register_layer( + common_layers.conv_hidden_relu, + default_kwargs=dict( + hidden_size=hparams.filter_size, + output_size=hparams.hidden_size, + dropout=hparams.relu_dropout, + ), + ) + + # === Separable convolution layer === + # No mask applied + sep_conv_relu = partial( + conv_hidden_relu, + padding="SAME", + # Parameters copied from the transformer model, could add hparams + kernel_size=(3, 1), + second_kernel_size=(31, 1), + ) + + # === Separable convolution layer (masked version) === + # Mask the future + sep_conv_relu_masked = partial( + sep_conv_relu, + padding="LEFT", # Mask future for decoder + ) + + # Define all available layers + + cur_layers = dict( + # Attention layers: + a=multihead_attention_fn, # Multihead full attention + loc=local_attention_fn, # Local attention + locm=local_attention_masked_fn, # Local attention (masked) + red=compressed_attention_fn, # Memory-compressed attention + redm=compressed_attention_masked_fn, # Memory-compressed att (masked) + mem=memeff_attention_fn, # Memory efficient + # Feed-forward layers: + fc=conv_hidden_relu, # Fully connected + sep=sep_conv_relu, # Separable convolution (unmasked) + sepm=sep_conv_relu_masked, # Separable convolution (masked) + ) + return cur_layers + + +def add_standard_attention_hparams(hparams): + """Adds the hparams used by get_standardized_layers.""" + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + + # hparams used and which should have been defined outside (in + # common_hparams): + # Global flags + # hparams.mode + # hparams.hidden_size + # Pre-post processing flags + # hparams.layer_preprocess_sequence + # hparams.layer_postprocess_sequence + # hparams.layer_prepostprocess_dropout + # hparams.norm_type + # hparams.norm_epsilon + # Mixture-of-Expert flags + # hparams.moe_hidden_sizes + # hparams.moe_num_experts + # hparams.moe_k + # hparams.moe_loss_coef + + # Attention layers flags + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + hparams.add_hparam("attention_dropout", 0.0) + # Attention: Local + hparams.add_hparam("attention_loc_block_length", 256) + # Attention: Local (unmasked only): How much to look left. + hparams.add_hparam("attention_loc_block_width", 128) + # Attention: Memory-compressed + hparams.add_hparam("attention_red_factor", 3) + hparams.add_hparam("attention_red_type", "conv") + hparams.add_hparam("attention_red_nonlinearity", "none") + + # Fully connected layers flags + # To be more consistent, should use filter_size to also control the MOE + # size if moe_hidden_sizes not set. + hparams.add_hparam("filter_size", 2048) + hparams.add_hparam("relu_dropout", 0.0) + + return hparams + + +def encoder_decoder_attention_loss(expected_attention_logits, + actual_attentions, + loss_type="kl_divergence", + loss_multiplier=1.0): + """Computes encdec attention loss between expected and actual attentions. + + Args: + expected_attention_logits: Tensor storing the expected encoder-decoder + attention logits with shape [batch_size, target_length, input_length]. + actual_attentions: Dictionary with actual attention logits for different + attention types and hidden layers. + loss_type: type of the loss function. + loss_multiplier: multiplier for the attention loss. + + Returns: + KL_divergence loss between the actual and expected attention logits. + """ + + def combine_attentions(attention_list): + """Combine different layer attentions and then average over layers/heads.""" + # Stack all hidden layer attention tensors to get a tensor with shape + # [num_hidden_layers, batch_size, num_heads, target_length, input_length]. + attentions = tf.stack(attention_list) + # Reduce mean across all layers (axis=0) and all heads (axis=2) to get a + # tensor with shape [batch_size, target_length, input_length]. + return tf.reduce_mean(attentions, [0, 2]) + + def kl_divergence_loss(expected_logits, actual_logits): + p = tfp.distributions.Categorical(logits=expected_logits) + q = tfp.distributions.Categorical(logits=actual_logits) + return tfp.distributions.kl_divergence(p, q) + + def mse_loss(expected_logits, actual_weights): + expected_weights = tf.nn.softmax(expected_logits) + return tf.losses.mean_squared_error(expected_weights, actual_weights) + + # For each hidden layer, we have attention-logit and attention-weight tensors + # with shape [batch_size, num_heads, target_length, input_length]. + loss = 0.0 + if loss_type == "mse": + actual_encdec_attention_weights = [ + t for layer_key, t in actual_attentions.items() + if "encdec_attention" in layer_key and not layer_key.endswith("/logits") + ] + actual_attention_weights = combine_attentions( + actual_encdec_attention_weights) + loss = mse_loss(expected_attention_logits, actual_attention_weights) + else: + actual_encdec_attention_logits = [ + t for layer_key, t in actual_attentions.items() + if "encdec_attention" in layer_key and layer_key.endswith("/logits") + ] + actual_attention_logits = combine_attentions(actual_encdec_attention_logits) + loss = kl_divergence_loss(expected_attention_logits, + actual_attention_logits) + return loss * loss_multiplier + + +@expert_utils.add_name_scope() +def get_timing_signal_1d(length, + channels, + min_timescale=1.0, + max_timescale=1.0e4, + start_index=0): + """Gets a bunch of sinusoids of different frequencies. + + Each channel of the input Tensor is incremented by a sinusoid of a different + frequency and phase. + + This allows attention to learn to use absolute and relative positions. + Timing signals should be added to some precursors of both the query and the + memory inputs to attention. + + The use of relative position is possible because sin(x+y) and cos(x+y) can be + expressed in terms of y, sin(x) and cos(x). + + In particular, we use a geometric sequence of timescales starting with + min_timescale and ending with max_timescale. The number of different + timescales is equal to channels / 2. For each timescale, we + generate the two sinusoidal signals sin(timestep/timescale) and + cos(timestep/timescale). All of these sinusoids are concatenated in + the channels dimension. + + Args: + length: scalar, length of timing signal sequence. + channels: scalar, size of timing embeddings to create. The number of + different timescales is equal to channels / 2. + min_timescale: a float + max_timescale: a float + start_index: index of first position + + Returns: + a Tensor of timing signals [1, length, channels] + """ + position = tf.cast(tf.range(length) + start_index, tf.float32) + num_timescales = channels // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + tf.maximum(to_float(num_timescales) - 1, 1)) + inv_timescales = min_timescale * tf.exp( + to_float(tf.range(num_timescales)) * -log_timescale_increment) + scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) + # Please note that this slightly differs from the published paper. + # See a discussion here: https://github.com/tensorflow/tensor2tensor/pull/177 + signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) + signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]]) + signal = tf.reshape(signal, [1, length, channels]) + return signal + + +@expert_utils.add_name_scope() +def add_timing_signal_1d(x, + min_timescale=1.0, + max_timescale=1.0e4, + start_index=0): + """Adds a bunch of sinusoids of different frequencies to a Tensor. + + Each channel of the input Tensor is incremented by a sinusoid of a different + frequency and phase. + + This allows attention to learn to use absolute and relative positions. + Timing signals should be added to some precursors of both the query and the + memory inputs to attention. + + The use of relative position is possible because sin(x+y) and cos(x+y) can be + expressed in terms of y, sin(x) and cos(x). + + In particular, we use a geometric sequence of timescales starting with + min_timescale and ending with max_timescale. The number of different + timescales is equal to channels / 2. For each timescale, we + generate the two sinusoidal signals sin(timestep/timescale) and + cos(timestep/timescale). All of these sinusoids are concatenated in + the channels dimension. + + Args: + x: a Tensor with shape [batch, length, channels] + min_timescale: a float + max_timescale: a float + start_index: index of first position + + Returns: + a Tensor the same shape as x. + """ + length = common_layers.shape_list(x)[1] + channels = common_layers.shape_list(x)[2] + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale, + start_index) + return x + common_layers.cast_like(signal, x) + + +@expert_utils.add_name_scope() +def get_layer_timing_signal_learned_1d(channels, layer, num_layers): + """get n-dimensional embedding as the layer (vertical) timing signal. + + Adds embeddings to represent the position of the layer in the tower. + + Args: + channels: dimension of the timing signal + layer: layer num + num_layers: total number of layers + + Returns: + a Tensor of timing signals [1, 1, channels]. + """ + shape = [num_layers, 1, 1, channels] + layer_embedding = ( + tf.get_variable( + "layer_embedding", + shape, + initializer=tf.random_normal_initializer(0, channels**-0.5)) * + (channels**0.5)) + return layer_embedding[layer, :, :, :] + + +@expert_utils.add_name_scope() +def add_layer_timing_signal_learned_1d(x, layer, num_layers): + """Add n-dimensional embedding as the layer (vertical) timing signal. + + Adds embeddings to represent the position of the layer in the tower. + + Args: + x: a tensor with shape [batch, length, depth] + layer: layer num + num_layers: total number of layers + + Returns: + a Tensor the same shape as x. + """ + channels = common_layers.shape_list(x)[-1] + signal = get_layer_timing_signal_learned_1d(channels, layer, num_layers) + x += signal + return x + + +@expert_utils.add_name_scope() +def get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers): + """Add sinusoids of different frequencies as layer (vertical) timing signal. + + Args: + channels: dimension of the timing signal + layer: layer num + num_layers: total number of layers + + Returns: + a Tensor of timing signals [1, 1, channels]. + """ + + signal = get_timing_signal_1d(num_layers, channels) + layer_signal = tf.expand_dims(signal[:, layer, :], axis=1) + + return layer_signal + + +@expert_utils.add_name_scope() +def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers): + """Add sinusoids of different frequencies as layer (vertical) timing signal. + + Args: + x: a Tensor with shape [batch, length, channels] + layer: layer num + num_layers: total number of layers + + Returns: + a Tensor the same shape as x. + """ + + channels = common_layers.shape_list(x)[-1] + signal = get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers) + + return x + signal + + +@expert_utils.add_name_scope() +def add_timing_signals_given_positions(x, + positions, + min_timescale=1.0, + max_timescale=1.0e4): + """Adds sinusoids of diff frequencies to a Tensor, with timing positions given. + + Args: + x: a Tensor with shape [batch, length, channels] + positions: a list of positions, each of which can either be a Tensor of + shape [batch, length] or None for a default of (0..length] + min_timescale: a float + max_timescale: a float + + Returns: + a Tensor the same shape as x. + """ + shape = common_layers.shape_list(x) + batch = shape[0] + length = shape[1] + channels = shape[2] + num_dims = len(positions) + num_timescales = channels // (num_dims * 2) + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (to_float(num_timescales) - 1)) + inv_timescales = min_timescale * tf.exp( + to_float(tf.range(num_timescales)) * -log_timescale_increment) + for dim, position in enumerate(positions): + if position is None: + # Create a [batch, length] Tensor of incrementing positions 0..length-1. + position = tf.tile( + tf.transpose(tf.expand_dims(tf.range(0, length), axis=1)), [batch, 1]) + scaled_time = ( + tf.expand_dims(to_float(position), 2) * + tf.expand_dims(tf.expand_dims(inv_timescales, 0), 0)) + signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) + prepad = dim * 2 * num_timescales + postpad = channels - (dim + 1) * 2 * num_timescales + signal = tf.pad(signal, [[0, 0], [0, 0], [prepad, postpad]]) + signal = common_layers.cast_like(signal, x) + x += signal + return x + + +@expert_utils.add_name_scope() +def add_timing_signals_from_features(x, + features, + position_features, + min_timescale=1.0, + max_timescale=1.0e4): + """Adds timing signals from features named in `position_features`. + + Args: + x: a Tensor with shape [batch, length, channels] + features: a features dictionary + position_features: a comma-delimited string where each item is either a + feature key or the empty string (which denotes a default position tensor + of [0..length]) + min_timescale: a float + max_timescale: a float + + Returns: + a Tensor the same shape as x. + """ + return add_timing_signals_given_positions(x, [ + features.get(position_feature) + for position_feature in position_features.split(",") + ], min_timescale, max_timescale) + + +@expert_utils.add_name_scope() +def add_timing_signal_1d_given_position(x, + position, + min_timescale=1.0, + max_timescale=1.0e4): + """Adds sinusoids of diff frequencies to a Tensor, with timing position given. + + Args: + x: a Tensor with shape [batch, length, channels] + position: a Tensor with shape [batch, length] + min_timescale: a float + max_timescale: a float + + Returns: + a Tensor the same shape as x. + """ + channels = common_layers.shape_list(x)[2] + num_timescales = channels // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (tf.cast(num_timescales, tf.float32) - 1)) + inv_timescales = min_timescale * tf.exp( + tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment) + scaled_time = ( + tf.expand_dims(to_float(position), 2) * tf.expand_dims( + tf.expand_dims(inv_timescales, 0), 0)) + signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) + signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) + signal = common_layers.cast_like(signal, x) + return x + signal + + +@expert_utils.add_name_scope() +def add_timing_signal_nd(x, min_timescale=1.0, max_timescale=1.0e4): + """Adds a bunch of sinusoids of different frequencies to a Tensor. + + Each channel of the input Tensor is incremented by a sinusoid of a different + frequency and phase in one of the positional dimensions. + + This allows attention to learn to use absolute and relative positions. + Timing signals should be added to some precursors of both the query and the + memory inputs to attention. + + The use of relative position is possible because sin(a+b) and cos(a+b) can be + expressed in terms of b, sin(a) and cos(a). + + x is a Tensor with n "positional" dimensions, e.g. one dimension for a + sequence or two dimensions for an image + + We use a geometric sequence of timescales starting with + min_timescale and ending with max_timescale. The number of different + timescales is equal to channels // (n * 2). For each timescale, we + generate the two sinusoidal signals sin(timestep/timescale) and + cos(timestep/timescale). All of these sinusoids are concatenated in + the channels dimension. + + Args: + x: a Tensor with shape [batch, d1 ... dn, channels] + min_timescale: a float + max_timescale: a float + + Returns: + a Tensor the same shape as x. + """ + num_dims = len(x.get_shape().as_list()) - 2 + channels = common_layers.shape_list(x)[-1] + num_timescales = channels // (num_dims * 2) + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (to_float(num_timescales) - 1)) + inv_timescales = min_timescale * tf.exp( + to_float(tf.range(num_timescales)) * -log_timescale_increment) + for dim in range(num_dims): + length = common_layers.shape_list(x)[dim + 1] + position = to_float(tf.range(length)) + scaled_time = tf.expand_dims(position, 1) * tf.expand_dims( + inv_timescales, 0) + signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) + prepad = dim * 2 * num_timescales + postpad = channels - (dim + 1) * 2 * num_timescales + signal = tf.pad(signal, [[0, 0], [prepad, postpad]]) + for _ in range(1 + dim): + signal = tf.expand_dims(signal, 0) + for _ in range(num_dims - 1 - dim): + signal = tf.expand_dims(signal, -2) + x += signal + return x + + +def add_positional_embedding(x, max_length, name=None, positions=None): + """Adds positional embedding. + + Args: + x: Tensor with shape [batch, length, depth]. + max_length: int representing static maximum size of any dimension. + name: str representing name of the embedding tf.Variable. + positions: Tensor with shape [batch, length]. + + Returns: + Tensor of same shape as x. + """ + with tf.name_scope("add_positional_embedding"): + _, length, depth = common_layers.shape_list(x) + var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype) + if positions is None: + pad_length = tf.maximum(0, length - max_length) + sliced = tf.cond( + tf.less(length, max_length), + lambda: tf.slice(var, [0, 0], [length, -1]), + lambda: tf.pad(var, [[0, pad_length], [0, 0]])) + return x + tf.expand_dims(sliced, 0) + else: + return x + tf.gather(var, tf.to_int32(positions)) + + +def add_positional_embedding_nd(x, max_length, name=None): + """Adds n-dimensional positional embedding. + + The embeddings add to all positional dimensions of the tensor. + + Args: + x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional + dimensions, i.e., 1 for text, 2 for images, 3 for video, etc. + max_length: int representing static maximum size of any dimension. + name: str representing name of the embedding tf.Variable. + + Returns: + Tensor of same shape as x. + """ + with tf.name_scope("add_positional_embedding_nd"): + x_shape = common_layers.shape_list(x) + num_dims = len(x_shape) - 2 + depth = x_shape[-1] + base_shape = [1] * (num_dims + 1) + [depth] + base_start = [0] * (num_dims + 2) + base_size = [-1] + [1] * num_dims + [depth] + for i in range(num_dims): + shape = base_shape[:] + start = base_start[:] + size = base_size[:] + shape[i + 1] = max_length + size[i + 1] = x_shape[i + 1] + var = tf.get_variable( + name + "_%d" % i, + shape, + initializer=tf.random_normal_initializer(0, depth**-0.5)) + var = var * depth**0.5 + x += tf.slice(var, start, size) + return x + + +def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None): + """Gets edge vectors for the edge types in the adjacency matrix. + + Args: + adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. + num_edge_types: Number of different edge types + depth: Number of channels + name: a string + Returns: + A [batch, num_nodes, num_nodes, depth] vector of tensors + """ + with tf.variable_scope(name, default_name="edge_vectors"): + att_adj_vectors_shape = [num_edge_types, depth] + adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix) + adj_vectors = ( + tf.get_variable( + "adj_vectors", + att_adj_vectors_shape, + initializer=tf.random_normal_initializer(0, depth**-0.5)) * + (depth**0.5)) + # Avoiding gathers so that it works on TPUs + # adjacency_matrix_one_hot has shape + # [batch, num_nodes, num_nodes, num_edge_types] + + adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types) + + att_adj_vectors = tf.matmul( + tf.reshape(to_float(adjacency_matrix_one_hot), [-1, num_edge_types]), + adj_vectors) + return tf.reshape(att_adj_vectors, + [adjacency_matrix_shape[0], adjacency_matrix_shape[1], + adjacency_matrix_shape[2], depth]) + + +class LshGating(object): + """Class to split key/queries into separate buckets.""" + + def __init__(self, depth, nb_hyperplanes, nb_replicat=1, trainable=False): + """Construct the gating function parameters. + + Compute the gates for a single head. + + Args: + depth (int): Dimension of the key/queries to dispatch + nb_hyperplanes (int): Nb of vectors use to split the space. Will determine + the number of buckets (2^nb_hyperplanes - 1). + nb_replicat (int): Redundancy to avoid the edge cases (to be in one bucket + the input should be in a majority) + trainable (bool): If True, a balance loss is added to force the hyperplane + to divide the key/query space evenly + """ + self.depth = depth + self.nb_hyperplanes = nb_hyperplanes + self.nb_buckets = 2**nb_hyperplanes + self.nb_replicat = nb_replicat # Unused for now + self.trainable = trainable # Unused for now + + self.dispatchers = {} + + assert self.nb_replicat == 1 # For now + + with tf.variable_scope("lsh_gating"): + # Vectors defining the hyperplanes + self.t_vectors = tf.get_variable( + "vector", + shape=(self.depth, self.nb_hyperplanes * self.nb_replicat), + dtype=tf.float32, + trainable=self.trainable, + ) + # Projection vector from the bit space to similarity score space + self.t_group = tf.constant( + [self._idx_to_bits(i) for i in range(self.nb_buckets)], + dtype=tf.float32, + name="group") + + def _idx_to_bits(self, i): + """Convert an group index to its bit representation.""" + bits = bin(i)[2:].zfill(self.nb_hyperplanes) # Pad the bits str with 0 + return [-1.0 if b == "0" else 1.0 for b in bits] + + @expert_utils.add_name_scope("lsh_gating") + def get_gates(self, x): + """Return the bucket id of the given tensor. + + Args: + x (tf.Tensor): float32 of shape [length, depth] + + Returns: + tf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets] + containing the id of the bucket + """ + + # The balance loss don't propagate to the rest of the network + x = tf.stop_gradient(x) + # [length, depth] * [depth, nb_vectors * replicat] + x = tf.matmul(x, self.t_vectors) + # [length, nb_vector * replicat] + x = tf.sign(x) # Get on which side of the hyperplane the keys are. + + # x = tf.reshape(x, [-1, nb_replicat, nb_vector]) + # [length, replicat, nb_vector] * [nb_vector, 2^nb_vector - 1] + + x = tf.matmul(x, self.t_group, transpose_b=True) / self.nb_hyperplanes + # We get a similarity score for each of the group between [-1, 1] + # [length, (replicat,) 2^nb_vector - 1] + # Do an argmax to get the most likely group for each replicat + x = tf.argmax(x, axis=-1) + # [length(, replicat)] + # One-hot for compatibility with the sparse dispatcher + x = tf.one_hot(x, self.nb_buckets) + # TODO(epot): Use a loss to force an even distribution + return x + + +@expert_utils.add_name_scope() +def embedding_to_padding(emb): + """Calculates the padding mask based on which embeddings are all zero. + + We have hacked symbol_modality to return all-zero embeddings for padding. + + Args: + emb: a Tensor with shape [..., depth]. + + Returns: + a float Tensor with shape [...]. Each element is 1 if its corresponding + embedding vector is all zero, and is 0 otherwise. + """ + emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1) + return to_float(tf.equal(emb_sum, 0.0)) + + +@expert_utils.add_name_scope() +def padding_to_length(padding): + """Calculate the length of mask based on padding. + + Args: + padding: a Tensor with shape [..., length]. + Returns: + a Tensor with shape [...]. + """ + non_padding = 1.0 - padding + return tf.to_int32(tf.reduce_sum(non_padding, axis=-1)) + + +@expert_utils.add_name_scope() +def attention_bias_local(length, max_backward, max_forward): + """Create an bias tensor to be added to attention logits. + + A position may attend to positions at most max_distance from it, + forward and backwards. + + This does not actually save any computation. + + Args: + length: int + max_backward: int, maximum distance backward to attend. Negative values + indicate unlimited. + max_forward: int, maximum distance forward to attend. Negative values + indicate unlimited. + + Returns: + a `Tensor` with shape [1, 1, length, length]. + """ + band = common_layers.ones_matrix_band_part( + length, + length, + max_backward, + max_forward, + out_shape=[1, 1, length, length]) + return -1e9 * (1.0 - band) + + +@expert_utils.add_name_scope() +def attention_bias_lower_triangle(length): + """Create an bias tensor to be added to attention logits. + + Allows a query to attend to all positions up to and including its own. + + Args: + length: a Scalar. + + Returns: + a `Tensor` with shape [1, 1, length, length]. + """ + return attention_bias_local(length, -1, 0) + + +@expert_utils.add_name_scope() +def attention_bias_same_segment(query_segment_id, memory_segment_id): + """Create an bias tensor to be added to attention logits. + + Positions with the same segment_ids can see each other. + + Args: + query_segment_id: a float `Tensor` with shape [batch, query_length]. + memory_segment_id: a float `Tensor` with shape [batch, memory_length]. + + Returns: + a `Tensor` with shape [batch, 1, query_length, memory_length]. + """ + ret = (tf.cast( + tf.not_equal( + tf.expand_dims(query_segment_id, 2), + tf.expand_dims(memory_segment_id, 1)), tf.float32) * + large_compatible_negative(memory_segment_id.dtype)) + return tf.expand_dims(ret, axis=1) + + +@expert_utils.add_name_scope() +def attention_bias_ignore_padding(memory_padding): + """Create an bias tensor to be added to attention logits. + + Args: + memory_padding: a float `Tensor` with shape [batch, memory_length]. + + Returns: + a `Tensor` with shape [batch, 1, 1, memory_length]. + """ + ret = memory_padding * large_compatible_negative(memory_padding.dtype) + return tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1) + + +@expert_utils.add_name_scope() +def attention_bias_to_padding(attention_bias, + cast_fn=(lambda x: tf.cast(x, tf.float32))): + """Inverse of attention_bias_ignore_padding(). + + Args: + attention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as + returned by attention_bias_ignore_padding(). + cast_fn: function used to cast to output type. + + Returns: + a Tensor with shape [batch, memory_length] with 1.0 in padding positions + and 0.0 in non-padding positions. Type is determined by cast_fn. + """ + # `attention_bias` is a large negative number in padding positions and 0.0 + # elsewhere. + return tf.squeeze(cast_fn(tf.less(attention_bias, -1)), axis=[1, 2]) + + +@expert_utils.add_name_scope() +def attention_bias_prepend_inputs_full_attention(padding): + """Create a bias tensor for prepend_mode="prepend_inputs_full_attention". + + Produces a bias tensor to be used in self-attention. + + This bias tensor allows for full connectivity in the "inputs" part of + the sequence and masked connectivity in the targets part. + + Args: + padding: a float `Tensor` with shape [batch, length] with + ones in positions corresponding to padding. In each row, a single + padding position separates the input part from the target part. + + Returns: + a `Tensor` with shape [batch, 1, length, length]. + """ + # Everything past the first padding position is part of the target. + # This Tensor has zeros for the source portion and separator, + # and ones for the target portion. + in_target = tf.cumsum(padding, axis=1, exclusive=True) + # The position within the target, or 0 if part of the source. + target_pos = tf.cumsum(in_target, axis=1) + # A position with a lesser target_pos cannot see a position with greater + # target_pos. + illegal_connections = tf.greater( + tf.expand_dims(target_pos, 1), tf.expand_dims(target_pos, 2)) + bias = to_float(illegal_connections) * -1e9 + bias = tf.expand_dims(bias, 1) + return bias + + +@expert_utils.add_name_scope() +def attention_bias_proximal(length): + """Bias for self-attention to encourage attention to close positions. + + Args: + length: an integer scalar. + + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = to_float(tf.range(length)) + diff = tf.expand_dims(r, 0) - tf.expand_dims(r, 1) + return tf.expand_dims(tf.expand_dims(-tf.log1p(tf.abs(diff)), 0), 0) + + +@expert_utils.add_name_scope() +def attention_bias_batch(batch_coordinates_q, + batch_coordinates_k=None, + condition_fn=None): + """Generate a mask to prevent the batch to attend to each others. + + Args: + batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the + coordinates of the batches + batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the + coordinates of the batches. If None, do self-attention. + condition_fn: Callable defining the attention mask. + + Returns: + Float-like Tensor of shape [length_q, length_k] containing either 0 or + -infinity (-1e9). + """ + if batch_coordinates_k is None: + batch_coordinates_k = batch_coordinates_q + + # Convert to float first because of b/25387198. + def to_float(bc): + bc = tf.squeeze(bc, 1) + bc = to_float(bc) + return bc + + # Broadcast to create [length_q, length_k] mask. + bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1) + bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0) + bias_batch = bc_h - bc_v + bias_batch = condition_fn(bias_batch) + bias_batch *= -1e9 + return bias_batch + + +# Mask to prevent individual sequences of the same batch to attend to each other +attention_bias_coordinates = functools.partial( + attention_bias_batch, + condition_fn=lambda bias: tf.minimum(1.0, tf.abs(bias)), +) + +# Mask similar to upper triangular mask, but allow dispatching +attention_bias_future = functools.partial( + attention_bias_batch, + # Elems can attend to themselves (otherwise would use bias_batch + 1.0). + # No tf.abs to consider the order, + # tf.maximum and tf.minimum to threshold the values. + condition_fn=lambda bias: tf.maximum(0.0, tf.minimum(1.0, bias)), +) + + +@expert_utils.add_name_scope() +def split_last_dimension(x, n): + """Reshape x so that the last dimension becomes two dimensions. + + The first of these two dimensions is n. + + Args: + x: a Tensor with shape [..., m] + n: an integer. + + Returns: + a Tensor with shape [..., n, m/n] + """ + x_shape = common_layers.shape_list(x) + m = x_shape[-1] + if isinstance(m, int) and isinstance(n, int): + assert m % n == 0 + return tf.reshape(x, x_shape[:-1] + [n, m // n]) + + +@expert_utils.add_name_scope() +def combine_last_two_dimensions(x): + """Reshape x so that the last two dimension become one. + + Args: + x: a Tensor with shape [..., a, b] + + Returns: + a Tensor with shape [..., ab] + """ + x_shape = common_layers.shape_list(x) + a, b = x_shape[-2:] + return tf.reshape(x, x_shape[:-2] + [a * b]) + + +@expert_utils.add_name_scope() +def combine_first_two_dimensions(x): + """Reshape x so that the first two dimension become one. + + Args: + x: a Tensor with shape [a, b, ...] + + Returns: + a Tensor with shape [ab, ...] + """ + ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0)) + old_shape = x.get_shape().dims + a, b = old_shape[:2] + new_shape = [a * b if a and b else None] + old_shape[2:] + ret.set_shape(new_shape) + return ret + + +@expert_utils.add_name_scope() +def split_heads(x, num_heads): + """Split channels (dimension 2) into multiple heads (becomes dimension 1). + + Args: + x: a Tensor with shape [batch, length, channels] + num_heads: an integer + + Returns: + a Tensor with shape [batch, num_heads, length, channels / num_heads] + """ + return tf.transpose(split_last_dimension(x, num_heads), [0, 2, 1, 3]) + + +@expert_utils.add_name_scope() +def split_heads_2d(x, num_heads): + """Split channels (dimension 3) into multiple heads (becomes dimension 1). + + Args: + x: a Tensor with shape [batch, height, width, channels] + num_heads: an integer + + Returns: + a Tensor with shape [batch, num_heads, height, width, channels / num_heads] + """ + return tf.transpose(split_last_dimension(x, num_heads), [0, 3, 1, 2, 4]) + + +def split_heads_nd(x, num_heads): + """Split the depth dimension (last dimension) into multiple heads. + + Args: + x: a [batch, d1, ..., dn, depth] tensor + num_heads: an integer + + Returns: + a [batch, num_heads, d1, ..., dn, depth // num_heads] + """ + num_dimensions = len(common_layers.shape_list(x)) - 2 + return tf.transpose( + split_last_dimension(x, num_heads), [0, num_dimensions + 1] + + list(range(1, num_dimensions + 1)) + [num_dimensions + 2]) + + +@expert_utils.add_name_scope() +def combine_heads(x): + """Inverse of split_heads. + + Args: + x: a Tensor with shape [batch, num_heads, length, channels / num_heads] + + Returns: + a Tensor with shape [batch, length, channels] + """ + return combine_last_two_dimensions(tf.transpose(x, [0, 2, 1, 3])) + + +@expert_utils.add_name_scope() +def combine_heads_2d(x): + """Inverse of split_heads_2d. + + Args: + x: a Tensor with shape + [batch, num_heads, height, width, channels / num_heads] + + Returns: + a Tensor with shape [batch, height, width, channels] + """ + return combine_last_two_dimensions(tf.transpose(x, [0, 2, 3, 1, 4])) + + +def combine_heads_nd(x): + """Inverse of split_heads_nd. + + Args: + x: a [batch, num_heads, d1, ..., dn, depth // num_heads] tensor + + Returns: + a [batch, d1, ...., dn, depth] tensor + """ + num_dimensions = len(common_layers.shape_list(x)) - 3 + return combine_last_two_dimensions( + tf.transpose(x, [0] + list(range(2, num_dimensions + 2)) + + [1, num_dimensions + 2])) + + +def attention_image_summary(attn, image_shapes=None): + """Compute color image summary. + + Args: + attn: a Tensor with shape [batch, num_heads, query_length, memory_length] + image_shapes: optional tuple of integer scalars. + If the query positions and memory positions represent the + pixels of flattened images, then pass in their dimensions: + (query_rows, query_cols, memory_rows, memory_cols). + If the query positions and memory positions represent the + pixels x channels of flattened images, then pass in their dimensions: + (query_rows, query_cols, query_channels, + memory_rows, memory_cols, memory_channels). + """ + attn = tf.cast(attn, tf.float32) + num_heads = common_layers.shape_list(attn)[1] + # [batch, query_length, memory_length, num_heads] + image = tf.transpose(attn, [0, 2, 3, 1]) + image = tf.pow(image, 0.2) # for high-dynamic-range + # Each head will correspond to one of RGB. + # pad the heads to be a multiple of 3 + image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, tf.mod(-num_heads, 3)]]) + image = split_last_dimension(image, 3) + image = tf.reduce_max(image, 4) + if image_shapes is not None: + if len(image_shapes) == 4: + q_rows, q_cols, m_rows, m_cols = list(image_shapes) + image = tf.reshape(image, [-1, q_rows, q_cols, m_rows, m_cols, 3]) + image = tf.transpose(image, [0, 1, 3, 2, 4, 5]) + image = tf.reshape(image, [-1, q_rows * m_rows, q_cols * m_cols, 3]) + else: + assert len(image_shapes) == 6 + q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels = list( + image_shapes) + image = tf.reshape( + image, + [-1, q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels, 3]) + image = tf.transpose(image, [0, 1, 4, 3, 2, 5, 6, 7]) + image = tf.reshape( + image, + [-1, q_rows * m_rows * q_channnels, q_cols * m_cols * m_channels, 3]) + tf.summary.image("attention", image, max_outputs=1) + + +def grouped_attention_multihead(query_antecedent, + memory_antecedent, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + num_groups, + memory_target_density=2.0, + multiplicative_overhead=1.25, + additive_overhead=8.0, + mask_right=False, + make_image_summary=True, + name=None): + """Multi-head dot-product attention with sparsity. + + For each attention head, the queries are partitioned into groups. + For each group, only a subset of the key-value pairs are considered. + + The choices of groups are selected based on trained predictors of + the total attention given the group inclusion. + + memory_target_density indicates the average how many groups in which + a key-value pair should participate. + + We use auxiliary losses to ensure that each group contains roughly + the same number of queries and the same number of key-value pairs. + If for a given sequence, the actual number of queries/pairs sent to + an expert exceeds this target by a factor of more than + multiplicative_overhead, then the last ones are dropped. We use + this drop-last policy to avoid bleeding information backwards, which + is necessary when using this function with autoregressive + prediction. + + Args: + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: a Tensor with shape [batch, length_m, channels] + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + num_groups: an integer + memory_target_density: a floating point scalar + multiplicative_overhead: a floating point scalar + additive_overhead: a floating point scalar + mask_right: a boolean + make_image_summary: a boolean + name: an optional string + + Returns: + A Tensor with shape [batch, length_q, output_depth] + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + batch = common_layers.shape_list(query_antecedent)[0] + length_q = common_layers.shape_list(query_antecedent)[1] + length_kv = common_layers.shape_list(memory_antecedent)[1] + + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + depth_qk = total_key_depth // num_heads + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + depth_v = total_value_depth // num_heads + with tf.variable_scope( + name, default_name="multihead_attention_sparse", + values=[query_antecedent, memory_antecedent]): + q = common_layers.dense( + query_antecedent, total_key_depth, use_bias=False, name="q_transform") + kv = common_layers.dense( + memory_antecedent, + total_key_depth + total_value_depth, + use_bias=False, + name="kv_transform") + q = split_heads(q, num_heads) + kv = split_heads(kv, num_heads) + # Make predictions about q_total and m_total. + # These are used to determine group inclusion. + # We will train these by auxiliary losses. We use stop_gradient here + # to keep these losses from back-propagating to the rest of the model. + # We add biases that help balance the usage of the experts. + q_pred = common_layers.dense( + tf.stop_gradient(query_antecedent), + num_heads * num_groups, + use_bias=False, + name="q_pred") + q_pred = split_heads(q_pred, num_heads) + q_bias = tf.get_variable("q_bias", [1, num_heads, 1, num_groups]) + q_pred_biased = q_pred + q_bias + m_pred = common_layers.dense( + tf.stop_gradient(memory_antecedent), + num_heads * num_groups, + use_bias=False, + name="m_pred") + m_pred = split_heads(m_pred, num_heads) + m_bias = tf.get_variable("m_bias", [1, num_heads, 1, num_groups]) + m_pred_biased = m_pred + m_bias + q *= depth_qk**-0.5 + # q, kv, q_pred, m_pred are all [batch, heads, length_[q/m], ?] + # now reshape them all to [batch * heads, length, ?] + q = combine_first_two_dimensions(q) + kv = combine_first_two_dimensions(kv) + q_pred = combine_first_two_dimensions(q_pred) + m_pred = combine_first_two_dimensions(m_pred) + q_pred_biased = combine_first_two_dimensions(q_pred_biased) + m_pred_biased = combine_first_two_dimensions(m_pred_biased) + q_group = tf.argmax(q_pred_biased, axis=2) + q_requests = tf.one_hot(q_group, num_groups, axis=-1) + m_requests = to_float(tf.greater(m_pred_biased, 0.0)) + # include first memory position in all groups, to avoid division by zero. + m_requests = tf.maximum( + m_requests, tf.reshape(tf.one_hot([0], length_kv), [1, length_kv, 1])) + q_group_size = tf.reduce_sum(q_requests, 1) + m_group_size = tf.reduce_sum(m_requests, 1) + q_group_target_size = to_float(length_q) / to_float(num_groups) + m_group_target_size = ( + to_float(length_kv) * memory_target_density / + to_float(num_groups)) + capacity_q = tf.minimum( + length_q, + tf.to_int32(q_group_target_size * multiplicative_overhead + + additive_overhead)) + capacity_m = tf.minimum( + length_kv, + tf.to_int32(m_group_target_size * multiplicative_overhead + + additive_overhead)) + q_dispatcher = expert_utils.TruncatingDispatcher(q_requests, capacity_q) + m_dispatcher = expert_utils.TruncatingDispatcher(m_requests, capacity_m) + q_gates = q_dispatcher.gates() + m_gates = m_dispatcher.gates() + dispatched_q = q_dispatcher.dispatch(q) + dispatched_kv = m_dispatcher.dispatch(kv) + # dispatched_q: [batch * num_heads, num_groups, capacity_q, depth_qk] + # dispatched_kv: + # [batch * num_heads, num_groups, capacity_m, depth_qk + depth_v] + k, v = tf.split(dispatched_kv, [depth_qk, depth_v], axis=3) + logits = tf.matmul(dispatched_q, k, transpose_b=True) + bias = tf.expand_dims((m_dispatcher.nonpadding() - 1.0) * 1e9, 2) + if mask_right: + q_coordinate = to_float( + tf.expand_dims(q_dispatcher.length_coordinate(), 3)) + m_coordinate = to_float( + tf.expand_dims(m_dispatcher.length_coordinate(), 2)) + bias += to_float(tf.greater(m_coordinate, q_coordinate)) * -1e9 + logits += bias + log_weights = tf.nn.log_softmax(logits) + weights = tf.exp(log_weights) + # For each query, this is the log of the sum of the unnormalized weights. + q_total = tf.stop_gradient(logits[:, :, :, :1] - log_weights[:, :, :, :1]) + # For each key, this is the sum of the normalized weights. + m_total = tf.expand_dims( + tf.reduce_sum(tf.stop_gradient(weights), axis=2), -1) + o = tf.matmul(weights, v) + o = q_dispatcher.combine(o) + + o = tf.reshape(o, [batch, num_heads, length_q, depth_v]) + o = combine_heads(o) + o = common_layers.dense( + o, output_depth, use_bias=False, name="output_transform") + + m_total = m_dispatcher.combine(m_total) + q_total = q_dispatcher.combine(q_total) + q_total = tf.squeeze(q_total, -1) + m_total = tf.squeeze(m_total, -1) + # Compute summed m predictions for all groups + m_pred_used = tf.reduce_sum(tf.exp(m_pred) * m_dispatcher.gates(), axis=2) + q_pred_used = tf.reduce_sum(q_pred * q_dispatcher.gates(), axis=2) + epsilon = 1e-3 + m_pred_used = tf.log(m_pred_used + epsilon) + m_total = tf.log(m_total + epsilon) + m_loss = tf.nn.l2_loss(m_total - m_pred_used) + q_loss = tf.nn.l2_loss( + (q_total - q_pred_used) * tf.reduce_sum(q_gates, axis=2)) + + q_loss /= to_float(batch * length_q) + m_loss /= to_float(batch * length_kv) + + # We would like the query groups to be equal sized. The group + # size is discrete, so we need some trick here. We add a loss + # proportional to the product of the group size and the + # predictions for that group. This encourages the predictions to + # decrease for groups that are too big. + q_group_deviation = (q_group_size / q_group_target_size) - 1.0 + q_balance_loss = tf.reduce_sum( + tf.reduce_mean(q_pred_biased, axis=1) * + q_group_deviation) / to_float(batch) + m_group_deviation = (m_group_size / m_group_target_size) - 1.0 + m_balance_loss = tf.reduce_sum( + tf.reduce_mean(m_pred_biased, axis=1) * + m_group_deviation) / to_float(batch) + + # The losses in this function only propagate back to variables + # defined in this function, and the losses outside of this + # function only propagate back to variables outside of this + # function. Assuming some kind of adaptive learning algorithm, + # it should not matter how much we scale the losses in this function. + # Still we scale them down a lot so that they should not show up + # much in the overall loss for the model. + extra_loss_multiplier = 1e-3 + extra_loss = q_loss + m_loss + q_balance_loss + m_balance_loss + extra_loss *= extra_loss_multiplier + + # Show a bunch of summaries. + if common_layers.should_generate_summaries() and make_image_summary: + tf.summary.histogram("q_group_size", q_group_size) + tf.summary.histogram("m_group_size", m_group_size) + tf.summary.scalar("q_loss", q_loss) + tf.summary.scalar("m_loss", m_loss) + tf.summary.scalar("q_balance_loss", q_balance_loss) + tf.summary.scalar("m_balance_loss", m_balance_loss) + tf.summary.histogram("m_pred_used", m_pred_used) + tf.summary.histogram("m_total", m_total) + tf.summary.histogram("q_pred_used", q_pred_used) + tf.summary.histogram("q_total", q_total) + if make_image_summary: + # image summaries are expensive. + # So we restrict them to head_num<4, query_position<512, batch_index=0. + trunc_heads = min(4, num_heads) + trunc_length_q = tf.minimum(length_q, 512) + # We recompute the attention for the first example, in an inefficient + # way - masking. This lets us show pretty pictures. + # [trunc_heads, length_q, group] + q_gates_trunc = q_gates[:trunc_heads, :trunc_length_q, :] + # [trunc_heads, length_kv, group] + m_gates_trunc = m_gates[:trunc_heads, :, :] + grouping_mask = tf.matmul( + q_gates_trunc, m_gates_trunc, transpose_b=True) + q_trunc = q[:trunc_heads, :trunc_length_q, :] + k_trunc = kv[:trunc_heads, :, :depth_qk] + logits_trunc = tf.matmul(q_trunc, k_trunc, transpose_b=True) + if mask_right: + band = common_layers.ones_matrix_band_part(trunc_length_q, length_kv, + -1, 0) + trunc_bias = tf.expand_dims((1.0 - band) * -1e9, 0) + logits_trunc += trunc_bias + att_trunc = tf.nn.softmax(logits_trunc) + mask_coverage = tf.reduce_sum(grouping_mask * att_trunc) / ( + to_float(trunc_length_q) * trunc_heads) + tf.summary.scalar("coverage", mask_coverage) + att_trunc_hdr = tf.pow(att_trunc, 0.2) # for high-dynamic-range + mask_channel = grouping_mask * tf.maximum(att_trunc_hdr, 0.3) + image = tf.stack([att_trunc_hdr, mask_channel, mask_channel], axis=3) + tf.summary.image("att", image, max_outputs=trunc_heads) + # show one group for each head. + att_per_group = tf.expand_dims(weights[:trunc_heads, 0, :, :], -1) + tf.summary.image( + "att_per_group_%d", + tf.pow(att_per_group, 0.2), + max_outputs=trunc_heads) + return o, extra_loss + + +def harden_attention_weights(weights, k, gumbel_noise_weight): + """Make attention weights non-0 only on the top k ones.""" + if gumbel_noise_weight > 0.: + gumbel_noise = -tf.log(-tf.log(tf.random_uniform(tf.shape(weights), + minval=1e-5, + maxval=1 - 1e-5))) + weights += gumbel_noise * gumbel_noise_weight + + # Subtract the top-kth weight and zero-out all lower ones. + # Note that currently in case of numerical ties it will retain more + # than k elements. In the future, we may want to avoid this. + weights -= common_layers.top_kth_iterative(weights, k) + weights = tf.nn.relu(weights) + # Re-normalize the weights. + weights_sum = tf.reduce_sum(weights, axis=-1, keep_dims=True) + weights_sum = tf.maximum(weights_sum, 1e-6) # Avoid division by 0. + weights /= weights_sum + return weights + + +def dot_product_attention(q, + k, + v, + bias, + dropout_rate=0.0, + image_shapes=None, + name=None, + make_image_summary=True, + save_weights_to=None, + dropout_broadcast_dims=None, + activation_dtype=None, + weight_dtype=None, + hard_attention_k=0, + gumbel_noise_weight=0.0): + """Dot-product attention. + + Args: + q: Tensor with shape [..., length_q, depth_k]. + k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must + match with q. + v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must + match with q. + bias: bias Tensor (see attention_bias()) + dropout_rate: a float. + image_shapes: optional tuple of integer scalars. + see comments for attention_image_summary() + name: an optional string + make_image_summary: True if you want an image summary. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + dropout_broadcast_dims: an optional list of integers less than rank of q. + Specifies in which dimensions to broadcast the dropout decisions. + activation_dtype: Used to define function activation dtype when using + mixed precision. + weight_dtype: The dtype weights are stored in when using mixed precision + hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) + gumbel_noise_weight: if > 0, apply Gumbel noise with weight + `gumbel_noise_weight` before picking top-k. This is a no op if + hard_attention_k <= 0. + + Returns: + Tensor with shape [..., length_q, depth_v]. + """ + with tf.variable_scope( + name, default_name="dot_product_attention", values=[q, k, v]) as scope: + logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv] + if bias is not None: + bias = common_layers.cast_like(bias, logits) + logits += bias + # If logits are fp16, upcast before softmax + logits = maybe_upcast(logits, activation_dtype, weight_dtype) + weights = tf.nn.softmax(logits, name="attention_weights") + if hard_attention_k > 0: + weights = harden_attention_weights(weights, hard_attention_k, + gumbel_noise_weight) + weights = common_layers.cast_like(weights, q) + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + # Drop out attention links for each head. + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + return tf.matmul(weights, v) + + +def _generate_relative_positions_matrix(length_q, length_k, + max_relative_position, + cache=False): + """Generates matrix of relative positions between inputs.""" + if not cache: + if length_q == length_k: + range_vec_q = range_vec_k = tf.range(length_q) + else: + range_vec_k = tf.range(length_k) + range_vec_q = range_vec_k[-length_q:] + distance_mat = range_vec_k[None, :] - range_vec_q[:, None] + else: + distance_mat = tf.expand_dims(tf.range(-length_k+1, 1, 1), 0) + distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position, + max_relative_position) + # Shift values to be >= 0. Each integer still uniquely identifies a relative + # position difference. + final_mat = distance_mat_clipped + max_relative_position + return final_mat + + +def _generate_relative_positions_embeddings(length_q, length_k, depth, + max_relative_position, name, + cache=False): + """Generates tensor of size [1 if cache else length_q, length_k, depth].""" + with tf.variable_scope(name): + relative_positions_matrix = _generate_relative_positions_matrix( + length_q, length_k, max_relative_position, cache=cache) + vocab_size = max_relative_position * 2 + 1 + # Generates embedding for each relative position of dimension depth. + embeddings_table = tf.get_variable("embeddings", [vocab_size, depth]) + embeddings = tf.gather(embeddings_table, relative_positions_matrix) + return embeddings + + +def _relative_attention_inner(x, y, z, transpose): + """Relative position-aware dot-product attention inner calculation. + + This batches matrix multiply calculations to avoid unnecessary broadcasting. + + Args: + x: Tensor with shape [batch_size, heads, length or 1, length or depth]. + y: Tensor with shape [batch_size, heads, length or 1, depth]. + z: Tensor with shape [length or 1, length, depth]. + transpose: Whether to transpose inner matrices of y and z. Should be true if + last dimension of x is depth, not length. + + Returns: + A Tensor with shape [batch_size, heads, length, length or depth]. + """ + batch_size = tf.shape(x)[0] + heads = x.get_shape().as_list()[1] + length = tf.shape(x)[2] + + # xy_matmul is [batch_size, heads, length or 1, length or depth] + xy_matmul = tf.matmul(x, y, transpose_b=transpose) + # x_t is [length or 1, batch_size, heads, length or depth] + x_t = tf.transpose(x, [2, 0, 1, 3]) + # x_t_r is [length or 1, batch_size * heads, length or depth] + x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1]) + # x_tz_matmul is [length or 1, batch_size * heads, length or depth] + x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose) + # x_tz_matmul_r is [length or 1, batch_size, heads, length or depth] + x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1]) + # x_tz_matmul_r_t is [batch_size, heads, length or 1, length or depth] + x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3]) + return xy_matmul + x_tz_matmul_r_t + + +def dot_product_attention_relative(q, + k, + v, + bias, + max_relative_position, + dropout_rate=0.0, + image_shapes=None, + save_weights_to=None, + name=None, + make_image_summary=True, + cache=False, + allow_memory=False, + hard_attention_k=0, + gumbel_noise_weight=0.0): + """Calculate relative position-aware dot-product self-attention. + + The attention calculation is augmented with learned representations for the + relative position between each element in q and each element in k and v. + + Args: + q: a Tensor with shape [batch, heads, length, depth]. + k: a Tensor with shape [batch, heads, length, depth]. + v: a Tensor with shape [batch, heads, length, depth]. + bias: bias Tensor. + max_relative_position: an integer specifying the maximum distance between + inputs that unique position embeddings should be learned for. + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + name: an optional string. + make_image_summary: Whether to make an attention image summary. + cache: whether use cache mode + allow_memory: whether to assume that recurrent memory is in use. If True, + the length dimension of k/v/bias may be longer than the queries, and it is + assumed that the extra memory entries precede the non-memory entries. + hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) + gumbel_noise_weight: if > 0, apply Gumbel noise with weight + `gumbel_noise_weight` before picking top-k. This is a no op if + hard_attention_k <= 0. + + Returns: + A Tensor. + + Raises: + ValueError: if max_relative_position is not > 0. + """ + if not max_relative_position: + raise ValueError("Max relative position (%s) should be > 0 when using " + "relative self attention." % (max_relative_position)) + with tf.variable_scope( + name, default_name="dot_product_attention_relative", + values=[q, k, v]) as scope: + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape, unless memory is enabled. + if not cache and not allow_memory: + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape().assert_is_compatible_with(v.get_shape()) + + # Use separate embeddings suitable for keys and values. + depth = k.get_shape().as_list()[3] + length_k = common_layers.shape_list(k)[2] + length_q = common_layers.shape_list(q)[2] if allow_memory else length_k + relations_keys = _generate_relative_positions_embeddings( + length_q, length_k, depth, max_relative_position, + "relative_positions_keys", cache=cache) + relations_values = _generate_relative_positions_embeddings( + length_q, length_k, depth, max_relative_position, + "relative_positions_values", cache=cache) + + # Compute self attention considering the relative position embeddings. + logits = _relative_attention_inner(q, k, relations_keys, True) + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + if hard_attention_k > 0: + weights = harden_attention_weights(weights, hard_attention_k, + gumbel_noise_weight) + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + weights = tf.nn.dropout(weights, 1.0 - dropout_rate) + if (not tf.get_variable_scope().reuse and + common_layers.should_generate_summaries() and + make_image_summary): + attention_image_summary(weights, image_shapes) + return _relative_attention_inner(weights, v, relations_values, False) + + +def _relative_position_to_absolute_position_masked(x): + """Helper to dot_product_self_attention_relative_v2. + + Rearrange an attention logits or weights Tensor. + + The dimensions of the input represent: + [batch, heads, query_position, memory_position - query_position + length - 1] + + The dimensions of the output represent: + [batch, heads, query_position, memory_position] + + Only works with masked_attention. Undefined behavior for regions of the + input where memory_position > query_position. + + Args: + x: a Tensor with shape [batch, heads, length, length] + + Returns: + a Tensor with shape [batch, heads, length, length] + """ + batch, heads, length, _ = common_layers.shape_list(x) + x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]]) + x = tf.reshape(x, [batch, heads, 1 + length, length]) + x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1]) + return x + + +def _absolute_position_to_relative_position_masked(x): + """Helper to dot_product_self_attention_relative_v2. + + Rearrange an attention logits or weights Tensor. + + The dimensions of the input represent: + [batch, heads, query_position, memory_position] + + The dimensions of the output represent: + [batch, heads, query_position, memory_position - query_position + length - 1] + + Only works with masked_attention. Undefined behavior for regions of the + input where memory_position > query_position. + + Args: + x: a Tensor with shape [batch, heads, length, length] + + Returns: + a Tensor with shape [batch, heads, length, length] + """ + batch, heads, length, _ = common_layers.shape_list(x) + x = tf.pad(x, [[0, 0], [0, 0], [1, 0], [0, 0]]) + x = tf.reshape(x, [batch, heads, length, length + 1]) + x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, length]) + return x + + +def get_relative_embeddings_left(max_relative_position, length, depth, + num_heads, heads_share_relative_embedding, + name): + """Instantiate or retrieve relative embeddings, sliced according to length. + + Use for masked case where the relative attention is only looking left. + + Args: + max_relative_position: an Integer for the number of entries in the relative + embedding, which corresponds to the max relative distance that is + considered. + length: an Integer, specifies the length of the input sequence for which + this relative embedding is retrieved for. + depth: an Integer, specifies the depth for relative embeddings. + num_heads: an Integer, specifies the number of heads. + heads_share_relative_embedding: a Boolean specifying if the relative + embedding is shared across heads. + name: a string giving the name of the embedding variables. + + Returns: + a Tensor with shape [length, depth] + """ + initializer_stddev = depth**-0.5 + if heads_share_relative_embedding: + embedding_shape = (max_relative_position, depth) + else: + embedding_shape = (num_heads, max_relative_position, depth) + relative_embeddings = tf.get_variable( + name=name, shape=embedding_shape, + initializer=tf.random_normal_initializer(stddev=initializer_stddev)) + # Pad first before slice to avoid using tf.cond. + pad_length = tf.maximum(length - max_relative_position, 0) + start_slice_position = tf.maximum(max_relative_position - length, 0) + if heads_share_relative_embedding: + padded_relative_embeddings = tf.pad( + relative_embeddings, + [[pad_length, 0], [0, 0]]) + used_relative_embeddings = tf.slice( + padded_relative_embeddings, + [start_slice_position, 0], [length, -1]) + else: + padded_relative_embeddings = tf.pad( + relative_embeddings, + [[0, 0], [pad_length, 0], [0, 0]]) + used_relative_embeddings = tf.slice( + padded_relative_embeddings, + [0, start_slice_position, 0], [-1, length, -1]) + return used_relative_embeddings + + +def dot_product_self_attention_relative_v2(q, + k, + v, + bias, + max_relative_position=None, + dropout_rate=0.0, + image_shapes=None, + save_weights_to=None, + name=None, + make_image_summary=True, + dropout_broadcast_dims=None, + heads_share_relative_embedding=False, + add_relative_to_values=False): + """Calculate relative position-aware dot-product self-attention. + + Only works for masked self-attention (no looking forward). + + The attention calculation is augmented with learned representations for the + relative position between each element in q and each element in k and v. + + Args: + q: a Tensor with shape [batch, heads, length, depth]. + k: a Tensor with shape [batch, heads, length, depth]. + v: a Tensor with shape [batch, heads, length, depth]. + bias: bias Tensor. + max_relative_position: an integer indicating the maximum relative distance + to look back - changing this invalidates checkpoints + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + name: an optional string. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + heads_share_relative_embedding: a boolean indicating wheather to share + relative embeddings between attention heads. + add_relative_to_values: a boolean for whether to add relative component to + values. + + Returns: + A Tensor. + + Raises: + ValueError: if max_relative_position is not > 0. + """ + if not max_relative_position: + raise ValueError("Max relative position (%s) should be > 0 when using " + "relative self attention." % (max_relative_position)) + with tf.variable_scope( + name, + default_name="dot_product_self_attention_relative_v2", + values=[q, k, v]) as scope: + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + # (Except v can have different depth.) + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) + + # Use separate embeddings suitable for keys and values. + _, num_heads, length, depth_k = common_layers.shape_list(k) + + # [batch, num_heads, query_length, memory_length] + logits = tf.matmul(q, k, transpose_b=True) + key_relative_embeddings = get_relative_embeddings_left( + max_relative_position, length, depth_k, num_heads, + heads_share_relative_embedding, "key_relative_embeddings") + + rel_logits = matmul_with_relative_keys(q, key_relative_embeddings, + heads_share_relative_embedding) + rel_logits = _relative_position_to_absolute_position_masked(rel_logits) + logits += rel_logits + if bias is not None: + logits += bias + + weights = tf.nn.softmax(logits, name="attention_weights") + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + # Dropping out the attention links for each of the heads. + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + output = tf.matmul(weights, v) + if add_relative_to_values: + # [batch, num_heads, query_length, memory_length] + relative_weights = _absolute_position_to_relative_position_masked(weights) + depth_v = common_layers.shape_list(v)[3] + value_relative_embeddings = get_relative_embeddings_left( + max_relative_position, length, depth_v, num_heads, + heads_share_relative_embedding, "value_relative_embeddings") + output += matmul_with_relative_values( + relative_weights, value_relative_embeddings, + heads_share_relative_embedding) + return output + + +def _absolute_position_to_relative_position_unmasked(x): + """Helper function for dot_product_unmasked_self_attention_relative_v2. + + Rearrange an attention logits or weights Tensor. + + The dimensions of the input represent: + [batch, heads, query_position, memory_position] + + The dimensions of the output represent: + [batch, heads, query_position, memory_position - query_position + length - 1] + + Only works with unmasked_attention. + + Args: + x: a Tensor with shape [batch, heads, length, length] + + Returns: + a Tensor with shape [batch, heads, length, 2*length-1] + """ + batch, heads, length, _ = common_layers.shape_list(x) + # padd along column + x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, length-1]]) + x_flat = tf.reshape(x, [batch, heads, length**2 + length*(length -1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = tf.pad(x_flat, [[0, 0], [0, 0], [length, 0]]) + x = tf.reshape(x_flat, [batch, heads, length, 2*length]) + x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, + 2*length -1]) + return x + + +def get_relative_embeddings_left_right(max_relative_position, length, depth, + num_heads, + heads_share_relative_embedding, + name): + """Instantiate or retrieve relative embeddings, sliced according to length. + + Use for unmasked case where the relative attention looks both left and right. + + Args: + max_relative_position: an Integer for the number of entries in the relative + embedding, which corresponds to the max relative distance that is + considered. + length: an Integer, specifies the length of the input sequence for which + this relative embedding is retrieved for. + depth: an Integer, specifies the depth for relative embeddings. + num_heads: an Integer, specifies the number of heads. + heads_share_relative_embedding: a Boolean specifying if the relative + embedding is shared across heads. + name: a string giving the name of the embedding variables. + + Returns: + a Tensor with shape [length, depth] + """ + initializer_stddev = depth**-0.5 + max_relative_position_unmasked = 2 * max_relative_position - 1 + if heads_share_relative_embedding: + embedding_shape = (max_relative_position_unmasked, depth) + else: + embedding_shape = (num_heads, max_relative_position_unmasked, depth) + relative_embeddings = tf.get_variable( + name=name, shape=embedding_shape, + initializer=tf.random_normal_initializer(stddev=initializer_stddev)) + # Pad first before slice to avoid using tf.cond. + pad_length = tf.maximum(length - max_relative_position, 0) + slice_start_position = tf.maximum(max_relative_position-length, 0) + if heads_share_relative_embedding: + padded_relative_embeddings = tf.pad( + relative_embeddings, + [[pad_length, pad_length], [0, 0]]) + used_relative_embeddings = tf.slice( + padded_relative_embeddings, + [slice_start_position, 0], [2 * length - 1, -1]) + else: + padded_relative_embeddings = tf.pad( + relative_embeddings, + [[0, 0], [pad_length, pad_length], [0, 0]]) + used_relative_embeddings = tf.slice( + padded_relative_embeddings, + [0, slice_start_position, 0], [-1, 2 * length - 1, -1]) + return used_relative_embeddings + + +def dot_product_unmasked_self_attention_relative_v2( + q, k, v, bias, max_relative_position=None, dropout_rate=0.0, + image_shapes=None, save_weights_to=None, name=None, make_image_summary=True, + dropout_broadcast_dims=None, heads_share_relative_embedding=False, + add_relative_to_values=False): + """Calculate relative position-aware dot-product self-attention. + + The attention calculation is augmented with learned representations for the + relative position between each element in q and each element in k and v. + + Args: + q: a Tensor with shape [batch, heads, length, depth]. + k: a Tensor with shape [batch, heads, length, depth]. + v: a Tensor with shape [batch, heads, length, depth]. + bias: bias Tensor. + max_relative_position: an integer the max relative embedding considered. + Changing this invalidates checkpoints. + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + name: an optional string. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + heads_share_relative_embedding: a boolean indicating wheather to share + relative embeddings between attention heads. + add_relative_to_values: a boolean for whether to add relative component to + values. + + Returns: + A Tensor. + + Raises: + ValueError: if max_relative_position is not > 0. + """ + if not max_relative_position: + raise ValueError("Max relative position (%s) should be > 0 when using " + "relative self attention." % (max_relative_position)) + + with tf.variable_scope( + name, + default_name="dot_product_unmasked_self_attention_relative_v2", + values=[q, k, v]) as scope: + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape().assert_is_compatible_with(v.get_shape()) + + # [batch, num_heads, query_length, memory_length] + logits = tf.matmul(q, k, transpose_b=True) + + length = common_layers.shape_list(q)[2] + k_shape = common_layers.shape_list(k) + num_heads = k_shape[1] + depth_k = k_shape[-1] + + key_relative_embeddings = get_relative_embeddings_left_right( + max_relative_position, length, depth_k, num_heads, + heads_share_relative_embedding, + "key_relative_embeddings") + unmasked_rel_logits = matmul_with_relative_keys( + q, key_relative_embeddings, heads_share_relative_embedding) + unmasked_rel_logits = _relative_position_to_absolute_position_unmasked( + unmasked_rel_logits) + logits += unmasked_rel_logits + + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + # dropping out the attention links for each of the heads + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + # relative_weights.set_shape([None, None, None, max_length]) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + ret = tf.matmul(weights, v) + if add_relative_to_values: + # Adds the contribution of the weighted relative embeddings to the values. + # [batch, num_heads, query_length, 2*memory_length-1] + relative_weights = _absolute_position_to_relative_position_unmasked( + weights) + depth_v = common_layers.shape_list(v)[3] + value_relative_embeddings = get_relative_embeddings_left_right( + max_relative_position, length, depth_v, num_heads, + heads_share_relative_embedding, "value_relative_embeddings") + ret += matmul_with_relative_values( + relative_weights, value_relative_embeddings, + heads_share_relative_embedding) + return ret + + +def _matmul_with_relative_keys_2d(x, y, heads_share_relative_embedding): + """Helper function for dot_product_unmasked_self_attention_relative_2d.""" + if heads_share_relative_embedding: + ret = tf.einsum("bhxyd,md->bhxym", x, y) + else: + ret = tf.einsum("bhxyd,hmd->bhxym", x, y) + return ret + + +def dot_product_unmasked_self_attention_relative_2d( + q, k, v, bias, max_relative_position=None, dropout_rate=0.0, + image_shapes=None, name=None, make_image_summary=True, + dropout_broadcast_dims=None, heads_share_relative_embedding=False, + add_relative_to_values=False): + """Calculate relative position unmasked dot-product self-attention 2d. + + + The attention calculation is augmented with learned representations for the + relative position between each element in q and each element in k and v in + height and width dimensions. for query index (i,j) and key index (l, m), + the logit is q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are + the set of relative embeddings in height and width spatial dimensions, + respectively. + + Args: + q: a Tensor with shape [batch, heads, height, width, depth]. + k: a Tensor with shape [batch, heads, height, width, depth]. + v: a Tensor with shape [batch, heads, height, width, depth]. + bias: bias Tensor. + max_relative_position: an integer the max relative embedding considered. + Changing this invalidates checkpoints. + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + name: an optional string. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + heads_share_relative_embedding: a boolean indicating wheather to share + relative embeddings between attention heads. + add_relative_to_values: a boolean for adding relative embeddings to values. + + Returns: + [batch, heads, height, width, depth] tensor, the output of attention. + height_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing + settings, which are the relative embeddings for height. + width_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing + settings, which are the relative embeddings for width. + + Raises: + ValueError: if max_relative_position is not > 0. + """ + if not max_relative_position: + raise ValueError("Max relative position (%s) should be > 0 when using " + "relative self attention." % (max_relative_position)) + + if add_relative_to_values: + raise ValueError("Adding relative embeddings to values is not implemented") + + with tf.variable_scope( + name, + default_name="dot_product_self_attention_relative_v2", + values=[q, k, v]): + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) + + (height, width) = (common_layers.shape_list(q)[2], + common_layers.shape_list(q)[3]) + k_shape = common_layers.shape_list(k) + num_heads = k_shape[1] + depth_k = k_shape[-1] + depth_v = common_layers.shape_list(v)[-1] + # flatten height width + flatten_hw = lambda x, d: tf.reshape(x, [-1, num_heads, height*width, d]) + # [batch, num_heads, query_length, memory_length] + logits = tf.matmul(flatten_hw(q, depth_k), flatten_hw(k, depth_k), + transpose_b=True) + + def _compute_2d_relative_logits( + query, key_relative_embeddings, height, width, + heads_share_relative_embedding, transpose_mask): + """compute relative logits.""" + unmasked_rel_logits = _matmul_with_relative_keys_2d( + query, key_relative_embeddings, heads_share_relative_embedding) + # collapse height and heads + unmasked_rel_logits = tf.reshape(unmasked_rel_logits, + [-1, num_heads*height, width, + 2*width-1]) + unmasked_rel_logits = ( + _relative_position_to_absolute_position_unmasked( + unmasked_rel_logits)) + # shape it back for tiling + unmasked_rel_logits = tf.reshape( + unmasked_rel_logits, [-1, num_heads, height, width, width]) + # tiling it height times + unmasked_rel_logits = tf.expand_dims( + unmasked_rel_logits, axis=3) + unmasked_rel_logits = tf.tile(unmasked_rel_logits, + [1, 1, 1, height, 1, 1]) + # bringing it to the right shape for adding to the logits. + unmasked_rel_logits = tf.transpose(unmasked_rel_logits, transpose_mask) + unmasked_rel_logits = tf.reshape(unmasked_rel_logits, + [-1, num_heads, height*width, + height*width]) + return unmasked_rel_logits + + # Relative logits in width dimension first. + width_key_relative_embeddings = get_relative_embeddings_left_right( + max_relative_position, width, depth_k, num_heads, + heads_share_relative_embedding, + "width_key_relative_embeddings") + # [batch, heads, height, 2*width-1, 2*width-1] + width_unmasked_rel_logits = _compute_2d_relative_logits( + q, width_key_relative_embeddings, height, width, + heads_share_relative_embedding, [0, 1, 2, 4, 3, 5]) + logits += width_unmasked_rel_logits + # Relative logits in height dimension next. For ease, we transpose + # height and width and repeat the above steps, and transpose to eventually + # put the logits in their right positions. + # [batch, heads, height, 2*height-1, 2*width-1] + height_key_relative_embeddings = get_relative_embeddings_left_right( + max_relative_position, height, depth_k, num_heads, + heads_share_relative_embedding, + "height_key_relative_embeddings") + + height_unmasked_rel_logits = _compute_2d_relative_logits( + tf.transpose(q, [0, 1, 3, 2, 4]), + height_key_relative_embeddings, + width, + height, + heads_share_relative_embedding, [0, 1, 4, 2, 5, 3]) + logits += height_unmasked_rel_logits + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + # dropping out the attention links for each of the heads + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + ret = tf.matmul(weights, flatten_hw(v, depth_v)) + # reshape back the same spatial dimensions as q + return ( + tf.reshape(ret, [-1, num_heads, height, width, depth_v]), + height_key_relative_embeddings, + width_key_relative_embeddings) + + +def _split_along_width(x_left_right_blocks): + """Helper function for local 2d attention. + + Takes a tensor of [batch, heads, num_h_blocks, num_w_blocks, + height, width, depth] and returns two tensors which contain every alternate + position along the width + + + Args: + x_left_right_blocks: A [batch, num_h_blocks, num_w_blocks, + height, width, depth] tensor + + Returns: + x_left_blocks, x_right_blocks: two [batch, num_h_blocks, + (num_w_blocks-2)/2, height, width, + depth] tensors + + """ + (_, x_num_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, + x_memory_flange_w, depth) = common_layers.shape_list(x_left_right_blocks) + x_num_w_blocks = (x_num_outer_w_blocks-1)//2 + # get it ready for splitting the left and right memory blocks + x_left_right_blocks = tf.reshape(x_left_right_blocks, + [-1, + x_num_h_blocks, + x_num_outer_w_blocks//2, 2, + x_memory_flange_h, + x_memory_flange_w, depth]) + + x_left_blocks, x_right_blocks = tf.split(x_left_right_blocks, + num_or_size_splits=2, axis=3) + x_left_blocks = tf.squeeze(x_left_blocks, axis=3) + x_right_blocks = tf.squeeze(x_right_blocks, axis=3) + x_left_blocks = tf.slice(x_left_blocks, [0, 0, 0, 0, 0, 0], + [-1, -1, x_num_w_blocks, -1, -1, -1]) + x_right_blocks = tf.slice(x_right_blocks, [0, 0, 1, 0, 0, 0], + [-1, -1, x_num_w_blocks, -1, -1, -1]) + return x_left_blocks, x_right_blocks + + +def _get_left_right_blocks(x): + """Helper function. Assumes that memory_flange is half of query sizes. + + This function splits the tensor of width 'n' into two halves, where the + first half gets the width indices 0, 2, 4.. and the second half gets the + width indices 3, 5, ... We also fuse two blocks along the h dimension. + + Args: + x: a 6-d tensor. + + Returns: + x_left_blocks, x_right_blocks: Two 6-d tensors + """ + (_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, + x_memory_flange_w, depth) = common_layers.shape_list(x) + x_left_right_blocks = tf.slice(x, + [0, 1, 0, 0, 0, 0], + [-1, x_num_outer_h_blocks-2, -1, -1, + -1, -1]) + num_blocks_h = (x_num_outer_h_blocks-2)//2 + x_left_right_blocks = tf.reshape(x_left_right_blocks, + [-1, + num_blocks_h, + 2, x_num_outer_w_blocks, + x_memory_flange_h, + x_memory_flange_w, depth]) + x_left_right_blocks = tf.transpose(x_left_right_blocks, + [0, 1, 3, 2, 4, 5, 6]) + x_left_right_blocks = tf.reshape(x_left_right_blocks, + [-1, num_blocks_h, + x_num_outer_w_blocks, 2*x_memory_flange_h, + x_memory_flange_w, depth]) + # get it ready for splitting the left and right memory blocks + x_left_blocks, x_right_blocks = _split_along_width(x_left_right_blocks) + + return x_left_blocks, x_right_blocks + # return x_left_right_blocks + + +def _extract_blocks(x, block_h, block_w): + """Helper function for local 2d attention. + + Args: + x: a [batch, height, width, depth] tensor + block_h: An integer. block height + block_w: An inteter. block width + + Returns: + a [batch, num_heads, height/block_h, width/block_w, depth] tensor + """ + (_, height, width, depth) = common_layers.shape_list(x) + assert height % block_h == 0 + assert width % block_w == 0 + x = tf.reshape(x, [-1, height//block_h, block_h, + width//block_w, block_w, depth]) + return tf.transpose(x, [0, 1, 3, 2, 4, 5]) + + +def get_2d_local_memory(x, query_shape, memory_flange): + """Stitches together the local 2d memory blocks. + + Args: + x: a [batch, height, width, depth tensor] + query_shape: 2-d integer list of query shape + memory_flange: 2-d integer list of memory flanges + + Returns: + x: A [batch, num_h_blocks, num_w_blocks, + query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] + tensor. + """ + (_, height, width, depth_x) = common_layers.shape_list(x) + x_center_blocks = _extract_blocks(x, query_shape[0], query_shape[1]) + # add extra padding to x so that we can extract the memory region + # around the center + paddings = [[0, 0], [memory_flange[0], memory_flange[0]], + [memory_flange[1], memory_flange[1]], [0, 0]] + padded_x = tf.pad(x, paddings) + padded_x.set_shape([None, height+2*memory_flange[0], + width+2*memory_flange[1], depth_x]) + x_outer_memory_blocks = _extract_blocks(padded_x, + memory_flange[0], memory_flange[1]) + # We'll extract left and right memory blocks, top and bottom memory blocks, + # and then the corner memory blocks + + # Each of these after will have shape + # [batch, num_h_blocks, num_w_blocks, query_shape[0], + # memory_flange[1], depth] + x_left_blocks, x_right_blocks = _get_left_right_blocks( + x_outer_memory_blocks) + t_hw_block = lambda x: tf.transpose(x, [0, 2, 1, 4, 3, 5]) + # now to get top and bottom blocks, we should just transpose the outer + # blocks, call the same function and transpose back to get shape + # [batch, num_h_blocks, num_w_blocks, memory_flange[0], + # query_shape[1], depth] + x_top_center_blocks, x_bottom_center_blocks = ( + map(t_hw_block, _get_left_right_blocks( + t_hw_block(x_outer_memory_blocks)))) + + # now to get the corner blocks + x_left_corner_blocks, x_right_corner_blocks = _split_along_width( + x_outer_memory_blocks) + # now to extract top and bottom for both k and v + # we need to transpose because _split_along_width separates along + # the width + # each of these should have shape [batch, num_h_blocks, + # num_w_blocks, memory_flange[0], memory_flange[1], depth] + + t_hw = lambda x: tf.transpose(x, [0, 2, 1, 3, 4, 5]) + x_top_left_corner_blocks, x_bottom_left_corner_blocks = ( + map(t_hw, _split_along_width(t_hw(x_left_corner_blocks)))) + x_top_right_corner_blocks, x_bottom_right_corner_blocks = ( + map(t_hw, _split_along_width(t_hw(x_right_corner_blocks)))) + + # The memory is top_left top_center top_right + # left_center middle right_center + # bottom_left bottom_center bottom_right + # Assembling the above row by row + # first [x_top_left, x_top, x_top_right] + # to get [batch, num_h_blocks, num_w_blocks, memory_flange[0], + # query_shape[1]+2*memory_flange[1], depth] + # then [x_left, x_center, x_right] + # then [x_bottom_left, x_bottom, x_bottom_right] + x_top_memory = tf.concat( + [x_top_left_corner_blocks, + x_top_center_blocks, + x_top_right_corner_blocks], axis=4) + x_middle_memory = tf.concat( + [x_left_blocks, x_center_blocks, x_right_blocks], axis=4) + x_bottom_memory = tf.concat( + [x_bottom_left_corner_blocks, + x_bottom_center_blocks, + x_bottom_right_corner_blocks], axis=4) + + # concat along height + x = tf.concat([x_top_memory, x_middle_memory, x_bottom_memory], axis=3) + return x + + +def get_2d_local_memory_v2(x, query_shape, memory_flange): + """Gathering memory blocks around query blocks. flange is half of query . + + Only works if memory flanges are half of query sizes. + + Args: + x: a [batch, height, width, depth tensor] + query_shape: 2-d integer list of query shape + memory_flange: 2-d integer list of memory flanges + + Returns: + x: A [batch, num_h_blocks, num_w_blocks, + query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] + tensor. + """ + (_, height, width, depth_x) = common_layers.shape_list(x) + # add extra padding to x so that we can extract the memory region + # around the center + paddings = [[0, 0], [memory_flange[0], memory_flange[0]], + [memory_flange[1], memory_flange[1]], [0, 0]] + padded_x = tf.pad(x, paddings) + padded_x.set_shape([None, height+2*memory_flange[0], + width+2*memory_flange[1], depth_x]) + num_h_memory_blocks = height//query_shape[0] + 1 + num_w_memory_blocks = width//query_shape[1] + 1 + x_memory_blocks = _extract_blocks(padded_x, + query_shape[0], query_shape[1]) + x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks, + 2) + x_left_width = tf.concat(x_width_blocks[:num_w_memory_blocks - 1], axis=2) + x_right_width = tf.concat(x_width_blocks[1:], axis=2) + x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4) + + x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1) + x_top_height = tf.concat(x_height_blocks[:num_h_memory_blocks - 1], axis=1) + x_bottom_height = tf.concat(x_height_blocks[1:], axis=1) + x = tf.concat([x_top_height, x_bottom_height], axis=3) + + return x + + +def dot_product_unmasked_attention_local_2d_tpu( + q, k, v, bias, max_relative_position=None, query_shape=(8, 8), + dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=False, + dropout_broadcast_dims=None): + """Calculate unmasked dot-product local self-attention 2d on tpu. + + Args: + q: a Tensor with shape [batch, heads, height, width, depth]. + k: a Tensor with shape [batch, heads, height, width, depth]. + v: a Tensor with shape [batch, heads, height, width, depth]. + bias: bias Tensor. + max_relative_position: an integer the max relative embedding considered. + Changing this invalidates checkpoints. + query_shape: a two tuple indicating query shape + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + name: an optional string. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + + Returns: + [batch, heads, height, width, depth] tensor, the output of attention. + + """ + if max_relative_position: + raise ValueError("Relative local 2d attention not implemented") + + with tf.variable_scope( + name, + default_name="dot_product_unmasked_attention_local_2d_tpu", + values=[q, k, v]): + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape().assert_is_compatible_with(v.get_shape()) + orig_q_shape = common_layers.shape_list(q) + # Pad query, key, value to ensure multiple of corresponding lengths. + memory_flange = [int(query_shape[0]//2), int(query_shape[1]//2)] + q = pad_to_multiple_2d(q, query_shape) + k = pad_to_multiple_2d(k, query_shape) + v = pad_to_multiple_2d(v, query_shape) + q_shape = common_layers.shape_list(q) + (height, width) = (q_shape[2], + q_shape[3]) + _, num_heads, height, width, depth_k = common_layers.shape_list(k) + depth_v = common_layers.shape_list(v)[-1] + num_h_blocks = height//query_shape[0] + num_w_blocks = width//query_shape[1] + # Extract center queries, keys, and values + q = tf.reshape(q, [-1, height, width, depth_k]) + queries = _extract_blocks( + q, query_shape[0], query_shape[1]) + k = tf.reshape(k, [-1, height, width, depth_k]) + keys = get_2d_local_memory_v2( + k, query_shape, memory_flange) + v = tf.reshape(v, [-1, height, width, depth_v]) + values = get_2d_local_memory_v2( + v, query_shape, memory_flange) + memory_h = query_shape[0] + 2*memory_flange[0] + memory_w = query_shape[1] + 2*memory_flange[1] + queries = tf.reshape(queries, [-1, num_heads, num_h_blocks, num_w_blocks, + query_shape[0]*query_shape[1], depth_k]) + keys = tf.reshape(keys, [-1, num_heads, num_h_blocks, num_w_blocks, + memory_h*memory_w, depth_k]) + values = tf.reshape(values, [-1, num_heads, num_h_blocks, num_w_blocks, + memory_h*memory_w, depth_v]) + logits = tf.matmul(queries, keys, transpose_b=True) + if bias is not None: + logits += bias + + weights = tf.nn.softmax(logits, name="attention_weights") + # Dropping out the attention links for each of the heads + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + ret = tf.matmul(weights, values) + # we need to get it back to shape [batch, heads, height, width] + ret = tf.reshape(ret, [-1, num_heads, num_h_blocks, num_w_blocks, + query_shape[0], query_shape[1], depth_v]) + ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5, 6]) + ret = tf.reshape(ret, [-1, num_heads, num_h_blocks*query_shape[0], + num_w_blocks*query_shape[1], depth_v]) + # slice if padding was introduced + ret = tf.slice(ret, [0, 0, 0, 0, 0], [-1, -1, orig_q_shape[2], + orig_q_shape[3], -1]) + return ret + + +def dot_product_unmasked_attention_local_2d_tpu_simple( + x, bias, total_key_depth, total_value_depth, num_heads, + query_shape=(8, 8), + dropout_rate=0.0, image_shapes=None, make_image_summary=False, + dropout_broadcast_dims=None): + + """Calculate simple unmasked dot-product local self-attention 2d on tpu. + + The query, key, and value blocks are the same. We do not do a second linear + transformation after computing the values + + Args: + x: a Tensor with shape [batch, height, width, depth]. + bias: bias Tensor. + total_key_depth: the dimensions of the keys + total_value_depth: the dimensions of the values + num_heads: number of heads + query_shape: a two tuple indicating query shape + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + + Returns: + ret: [batch, height, width, total_value_depth] tensor, + the output of attention. + q: [batch, height, width, total_key_depth] query tensor + k: [batch, height, width, total_key_depth] key tensor + v: [batch, height, width, total_value_depth] value tensor + + """ + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + orig_x_shape = common_layers.shape_list(x) + # Pad query, key, value to ensure multiple of corresponding lengths if + # necessary + is_padded = False + if (orig_x_shape[1]%query_shape[0]) != 0 or ( + orig_x_shape[2]%query_shape[1]) != 0: + x = pad_to_multiple_2d(x, query_shape) + is_padded = True + _, height, width, depth = common_layers.shape_list(x) + assert depth%num_heads == 0 + num_h_blocks = height//query_shape[0] + num_w_blocks = width//query_shape[1] + # Extract center queries, keys, and values + x_blocks = _extract_blocks(x, query_shape[0], query_shape[1]) + x_blocks = tf.reshape(x_blocks, [-1, query_shape[0]*query_shape[1], depth]) + q, k, v = compute_qkv(x_blocks, None, total_key_depth, total_value_depth) + hsplit = lambda x: split_heads(x, num_heads) + q, k, v = map(hsplit, [q, k, v]) + logits = tf.matmul(q, k, transpose_b=True) + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + # Dropping out the attention links for each of the heads + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + output = tf.matmul(weights, v) + output = combine_heads(output) + # we need to get it back to shape [batch, height, width] + ret = tf.reshape(output, [-1, num_h_blocks, num_w_blocks, + query_shape[0], query_shape[1], total_value_depth]) + + ret = tf.transpose(ret, [0, 1, 3, 2, 4, 5]) + ret = tf.reshape(ret, [-1, num_h_blocks*query_shape[0], + num_w_blocks*query_shape[1], total_value_depth]) + # slice if padding was introduced + if is_padded: + ret = tf.slice(ret, [0, 0, 0, 0], [-1, orig_x_shape[1], + orig_x_shape[2], -1]) + return ret, q, k, v + + +def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None): + """Attention to the source and a neighborhood to the left within a block. + + The sequence is divided into blocks of length block_length. Attention for a + given query position can only see memory positions less than or equal to the + query position in the corresponding block. + + Args: + q: a Tensor with shape [batch, heads, length, depth_k] + k: a Tensor with shape [batch, heads, length, depth_k] + v: a Tensor with shape [batch, heads, length, depth_v] + block_length: an integer + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth_v] + """ + with tf.variable_scope( + name, default_name="within_local_attention_1d", values=[q, k, v]): + batch, heads, length, depth_k = common_layers.shape_list(q) + depth_v = common_layers.shape_list(v)[-1] + if isinstance(block_length, tf.Tensor): + const = contrib.util().constant_value(block_length) + if const is not None: + block_length = int(const) + + # Pad query, key, value to ensure multiple of block length. + original_length = length + padding_size = tf.mod(-length, block_length) + length += padding_size + padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] + q = tf.pad(q, padding) + k = tf.pad(k, padding) + v = tf.pad(v, padding) + + # Compute attention for all subsequent query blocks. + num_blocks = tf.div(length, block_length) + q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) + k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) + v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) + # [batch, heads, num_blocks, block_length, block_length] + attention = tf.matmul(q, k, transpose_b=True) + attention += tf.reshape(attention_bias_lower_triangle(block_length), + [1, 1, 1, block_length, block_length]) + attention = tf.nn.softmax(attention) + # [batch, heads, num_blocks, block_length, depth_v] + output = tf.matmul(attention, v) + output = tf.reshape(output, [batch, heads, -1, depth_v]) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in + (batch, heads, length, depth_v)]) + return output + + +def _relative_position_to_absolute_position_unmasked(x): + """Converts tensor from relative to aboslute indexing for local attention. + + Args: + x: a Tensor of shape [batch (or batch*num_blocks), heads, + length, 2 * length - 1] + + Returns: + A Tensor of shape [batch (or batch*num_blocks), heads, length, length] + """ + x_shape = common_layers.shape_list(x) + batch = x_shape[0] + heads = x_shape[1] + length = x_shape[2] + # Concat columns of pad to shift from relative to absolute indexing. + col_pad = tf.zeros((batch, heads, length, 1)) + x = tf.concat([x, col_pad], axis=3) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + flat_x = tf.reshape(x, [batch, heads, length * 2 * length]) + flat_pad = tf.zeros((batch, heads, length-1)) + flat_x_padded = tf.concat([flat_x, flat_pad], axis=2) + + # Reshape and slice out the padded elements. + final_x = tf.reshape(flat_x_padded, [batch, heads, length+1, 2*length-1]) + final_x = final_x[:, :, :, length-1:] + final_x = final_x[:, :, :length, :] + return final_x + + +def masked_local_attention_1d(q, + k, + v, + block_length=128, + make_image_summary=False, + dropout_rate=0., + name=None): + """Attention to the source position and a neighborhood to the left of it. + + The sequence is divided into blocks of length block_length. Attention for a + given query position can only see memory positions less than or equal to the + query position, in the corresponding block and the previous block. + + Args: + q: a Tensor with shape [batch, heads, length, depth_k] + k: a Tensor with shape [batch, heads, length, depth_k] + v: a Tensor with shape [batch, heads, length, depth_v] + block_length: an integer + make_image_summary: a boolean, whether to make an attention image summary. + dropout_rate: Dropout rate for attention dropout + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth_v] + """ + with tf.variable_scope( + name, default_name="local_attention_1d", values=[q, k, v]): + batch, heads, length, depth_k = common_layers.shape_list(q) + depth_v = common_layers.shape_list(v)[-1] + if isinstance(block_length, tf.Tensor): + const = contrib.util().constant_value(block_length) + if const is not None: + block_length = int(const) + # If (length < 2 * block_length), then we use only one block. + if isinstance(length, int) and isinstance(block_length, int): + block_length = length if length < block_length * 2 else block_length + else: + block_length = tf.where( + tf.less(length, block_length * 2), length, block_length) + + # Pad query, key, value to ensure multiple of block length. + original_length = length + padding_size = tf.mod(-length, block_length) + length += padding_size + padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] + q = tf.pad(q, padding) + k = tf.pad(k, padding) + v = tf.pad(v, padding) + + if isinstance(length, int) and isinstance(block_length, int): + num_blocks = length // block_length + else: + num_blocks = tf.div(length, block_length) + + # Compute attention for the first query block. + first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1]) + first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1]) + first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1]) + + first_output = dot_product_attention( + first_q, + first_k, + first_v, + attention_bias_lower_triangle(block_length), + dropout_rate=dropout_rate, + make_image_summary=make_image_summary, + name="first_block") + + # Compute attention for all subsequent query blocks. + q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) + k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) + v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) + + local_k = _make_local_block(k, depth_k, batch, heads, num_blocks, + block_length) + local_v = _make_local_block(v, depth_v, batch, heads, num_blocks, + block_length) + tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) + tail_q = tf.reshape(tail_q, + [batch, heads, num_blocks - 1, block_length, depth_k]) + local_length = common_layers.shape_list(local_k)[3] + + # make sure source_pos <= target_pos + good_part = common_layers.ones_matrix_band_part( + block_length, + local_length, + -1, + block_length, + out_shape=[1, 1, 1, block_length, local_length]) + bias = (1.0 - good_part) * -1e9 + # TODO(noam): figure out how to show a summary for the remaining blocks. + # The naive way currently causes errors due to empty tensors. + # output: [batch, heads, num_blocks-1, block_length, depth_v] + tail_output = dot_product_attention( + tail_q, + local_k, + local_v, + bias, + dropout_rate=dropout_rate, + make_image_summary=False, + name="tail_block") + tail_output = tf.reshape( + tail_output, [batch, heads, (num_blocks - 1) * block_length, depth_v]) + output = tf.concat([first_output, tail_output], axis=2) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output = tf.reshape(output, [batch, heads, original_length, depth_v]) + return output + + +def _make_local_block(x, depth, batch, heads, num_blocks, block_length): + """Helper function to create a local version of the keys or values for 1d.""" + prev_block = tf.slice(x, [0, 0, 0, 0, 0], + [-1, -1, num_blocks - 1, -1, -1]) + cur_block = tf.slice(x, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) + local_block = tf.concat([prev_block, cur_block], 3) + return tf.reshape(local_block, + [batch, heads, num_blocks - 1, block_length * 2, depth]) + + +def masked_relative_local_attention_1d(q, + k, + v, + block_length=128, + make_image_summary=False, + dropout_rate=0., + heads_share_relative_embedding=False, + add_relative_to_values=False, + name=None): + """Masked local 1d attention with relative positions. + + The sequence is divided into blocks of length block_size. + Attention for a given query position can only see memory positions + less than or equal to the query position, in the corresponding block + and the previous block. + + If mask_right is True, then a target position cannot see greater source + positions. + + Args: + q: a Tensor with shape [batch, heads, length, depth_k] + k: a Tensor with shape [batch, heads, length, depth_k] + v: a Tensor with shape [batch, heads, length, depth_v] + block_length: an integer + make_image_summary: a boolean, whether to make an attention image summary. + dropout_rate: Dropout rate for attention dropout + heads_share_relative_embedding: a boolean for sharing relative embeddings. + add_relative_to_values: a boolean for whether to add relative component to + values. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth_v] + + Raises: + ValueError: wwhen the name for the variable scope is not passed. + """ + if not name: + raise ValueError("Name must be assigned since reuse for variable scope is " + "set to tf.AUTO_REUSE, in order to reuse relative " + "embeddings of keys and values.") + + # Reuse flag is set to auto_reuse to reuse relative embeddings of keys and + # values across blocks (first and tail blocks). + with tf.variable_scope( + name, default_name="masked_relative_local_attention_1d", + values=[q, k, v], reuse=tf.AUTO_REUSE): + + default_block_length = block_length + batch = common_layers.shape_list(q)[0] + heads = common_layers.shape_list(q)[1] + length = common_layers.shape_list(q)[2] + # If (length < 2 * block_length), then we use only one block. + if isinstance(length, int) and isinstance(block_length, int): + block_length = length if length < block_length * 2 else block_length + else: + block_length = tf.where( + tf.less(length, block_length * 2), length, block_length) + depth_k = common_layers.shape_list(k)[3] + depth_v = common_layers.shape_list(v)[3] + original_length = length + padding_size = tf.mod(-length, block_length) + length += padding_size + padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] + q = tf.pad(q, padding) + k = tf.pad(k, padding) + v = tf.pad(v, padding) + + num_blocks = length // block_length + # compute attention for the first query block. + first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1]) + first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1]) + first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1]) + # Relative embeddings will be used later as well. + # TODO(avaswani,annahuang): check why 2*bl was breaking for music + # Needs to be known at static shape inference time, hence cannot be + # 2 * block_length. + rel_embed_length = 4 * default_block_length + # We only multiply with the needed embeddings as we slice them out. + first_rel_embeddings = get_relative_embeddings_left( + rel_embed_length, block_length, depth_k, heads, + heads_share_relative_embedding, "relative_embeddings") + first_rel_logits = matmul_with_relative_keys( + first_q, first_rel_embeddings, heads_share_relative_embedding) + first_logits = tf.matmul(first_q, first_k, transpose_b=True) + first_logits += ( + _relative_position_to_absolute_position_masked(first_rel_logits)) + # adding a mask + first_logits += ( + common_layers.cast_like(attention_bias_lower_triangle(block_length), + first_logits)) + first_att = tf.nn.softmax(first_logits, + name="first_attention_weights") + # dropping out the attention links for each of the heads + first_att = common_layers.dropout_with_broadcast_dims( + first_att, 1.0 - dropout_rate, + broadcast_dims=None) + # only call image summary for the first block + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(first_att, None) + first_output = tf.matmul(first_att, first_v) + + # compute attention for all subsequent query blocks. + q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) + k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) + v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) + local_k = _make_local_block(k, depth_k, batch, heads, num_blocks, + block_length) + local_v = _make_local_block(v, depth_v, batch, heads, num_blocks, + block_length) + tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) + tail_q = tf.reshape(tail_q, + [batch, heads, num_blocks - 1, block_length, depth_k]) + local_length = common_layers.shape_list(local_k)[3] + + # collapsing num blocks and batch size so that we can reuse + # functions + def _reshape_for_relative(x): + x_shape = common_layers.shape_list(x) + # [batch, num_blocks, heads, length, depth] + x = tf.transpose(x, [0, 2, 1, 3, 4]) + x = tf.reshape(x, [batch*x_shape[2], heads, x_shape[3], + x_shape[4]]) + return x + rel_tail_q = _reshape_for_relative(tail_q) + rel_k = _reshape_for_relative(local_k) + rel_v = _reshape_for_relative(local_v) + rel_embeddings = get_relative_embeddings_left( + rel_embed_length, 2 * block_length, depth_k, heads, + heads_share_relative_embedding, "relative_embeddings") + rel_logits = matmul_with_relative_keys( + rel_tail_q, rel_embeddings, heads_share_relative_embedding) + # Computing relative logits separately for the masked and unmasked parts + # because the reshaping logic is different for both + masked_rel_logits = tf.slice(rel_logits, [0, 0, 0, block_length], + [-1, -1, -1, -1]) + masked_rel_logits = _relative_position_to_absolute_position_masked( + masked_rel_logits) + unmasked_rel_logits = tf.slice(rel_logits, [0, 0, 0, 0], + [-1, -1, -1, 2*block_length-1]) + unmasked_rel_logits = _relative_position_to_absolute_position_unmasked( + unmasked_rel_logits) + all_rel_logits = tf.concat([unmasked_rel_logits, masked_rel_logits], + axis=3) + all_logits = ( + tf.matmul(rel_tail_q, rel_k, transpose_b=True) + all_rel_logits) + # make sure source_pos <= target_pos + good_part = common_layers.ones_matrix_band_part(block_length, + local_length, + -1, block_length) + mask = (1.0 - good_part) * -1e9 + mask = common_layers.cast_like(mask, all_logits) + all_logits += tf.reshape(mask, [1, 1, block_length, local_length]) + weights = tf.nn.softmax(all_logits, name="attention_weights") + # [batch (* num_blocks), heads, query_length (=block_length), + # key_length (=2*block_length)] + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, + broadcast_dims=None) + + output = tf.matmul(weights, rel_v) + if add_relative_to_values: + # Adds the contribution of the weighted relative embeddings to the values. + weights_for_unmasked, weights_for_masked = ( + tf.split(weights, 2, axis=3)) + rel_weights_unmasked = _absolute_position_to_relative_position_unmasked( + weights_for_unmasked) + rel_weights_masked = _absolute_position_to_relative_position_masked( + weights_for_masked) + + value_rel_embeddings_unmasked = get_relative_embeddings_left( + rel_embed_length, 2 * block_length, depth_v, + heads, heads_share_relative_embedding, + "value_relative_embeddings") + # The unmasked part starts with index -1 as opposed 0 has take uptil last. + if heads_share_relative_embedding: + value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:-1, :] + else: + value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:, :-1, :] + value_rel_embeddings_masked = get_relative_embeddings_left( + rel_embed_length, block_length, depth_v, + heads, heads_share_relative_embedding, + "value_relative_embeddings") + + # [batch (*num_blocks), heads, query length, key length] + rel_weights = tf.concat( + [rel_weights_unmasked, rel_weights_masked], axis=3) + if heads_share_relative_embedding: + value_rel_embeddings_concat_axis = 0 + else: + value_rel_embeddings_concat_axis = 1 + value_rel_embeddings = tf.concat( + [value_rel_embeddings_unmasked, value_rel_embeddings_masked], + axis=value_rel_embeddings_concat_axis) + output_rel = matmul_with_relative_values( + rel_weights, value_rel_embeddings, heads_share_relative_embedding) + output += output_rel + + # bring to [batch, heads, num_blocks-1, block_length, depth] + output = tf.reshape(output, + [batch, num_blocks-1, heads, block_length, depth_v]) + output = tf.transpose(output, [0, 2, 1, 3, 4]) + + output = tf.reshape( + output, [batch, heads, (num_blocks - 1) * block_length, depth_v]) + output = tf.concat([first_output, output], axis=2) + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output = tf.reshape(output, [batch, heads, original_length, depth_v]) + return output + + +def matmul_with_relative_values(x, y, heads_share_relative_embedding): + if heads_share_relative_embedding: + ret = tf.einsum("bhlm,md->bhld", x, y) + else: + ret = tf.einsum("bhlm,hmd->bhld", x, y) + return ret + + +def matmul_with_relative_keys(x, y, heads_share_relative_embedding): + if heads_share_relative_embedding: + ret = tf.einsum("bhld,md->bhlm", x, y) + else: + ret = tf.einsum("bhld,hmd->bhlm", x, y) + return ret + + +def local_attention_1d(q, k, v, block_length=128, filter_width=100, name=None): + """Strided block local self-attention. + + The sequence is divided into blocks of length block_length. Attention for a + given query position can see all memory positions in the corresponding block + and filter_width many positions to the left and right of the block. + + Args: + q: a Tensor with shape [batch, heads, length, depth_k] + k: a Tensor with shape [batch, heads, length, depth_k] + v: a Tensor with shape [batch, heads, length, depth_v] + block_length: an integer + filter_width: an integer indicating how much to look left and right of the + block. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth_v] + """ + with tf.variable_scope( + name, default_name="local_self_attention_1d", values=[q, k, v]): + # Check that q, k, v have the same shape except in their depth dimension. + q.get_shape()[:-1].assert_is_compatible_with(k.get_shape()[:-1]) + q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) + + batch_size, num_heads, original_length, _ = common_layers.shape_list(q) + + # Pad query, key, value to ensure multiple of corresponding lengths. + def pad_to_multiple(x, pad_length): + x_length = common_layers.shape_list(x)[2] + return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) + + def pad_l_and_r(x, pad_length): + return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]]) + + # Set up query blocks. + # [batch, heads, blocks_q, block_length, depth_k] + q = pad_to_multiple(q, block_length) + q = reshape_by_blocks(q, common_layers.shape_list(q), block_length) + total_query_blocks = common_layers.shape_list(q)[2] + + # Set up key and value blocks. + # [batch, heads, blocks_k, block_length, depth_k] + blocks_per_filter_width = filter_width // block_length + remaining_items = filter_width % block_length + k = pad_to_multiple(k, block_length) + v = pad_to_multiple(v, block_length) + k = pad_l_and_r(k, filter_width + block_length - remaining_items) + v = pad_l_and_r(v, filter_width + block_length - remaining_items) + k = reshape_by_blocks(k, common_layers.shape_list(k), block_length) + v = reshape_by_blocks(v, common_layers.shape_list(v), block_length) + + total_kv_blocks = common_layers.shape_list(k)[2] + + slices = [] + # prepare the left-most and right-most partial blocks if needed + if remaining_items: + first_partial_block_k = tf.slice( + k, [0, 0, 0, block_length - remaining_items, 0], + [-1, -1, total_query_blocks, -1, -1]) + first_partial_block_v = tf.slice( + v, [0, 0, 0, block_length - remaining_items, 0], + [-1, -1, total_query_blocks, -1, -1]) + last_partial_block_k = tf.slice( + k, [0, 0, total_kv_blocks - total_query_blocks, 0, 0], + [-1, -1, -1, remaining_items, -1]) + last_partial_block_v = tf.slice( + v, [0, 0, total_kv_blocks - total_query_blocks, 0, 0], + [-1, -1, -1, remaining_items, -1]) + slices.append((first_partial_block_k, first_partial_block_v)) + slices.append((last_partial_block_k, last_partial_block_v)) + + # Prepare the rest of the blocks + first_block_index = 1 if remaining_items else 0 + attention_blocks = 2 * blocks_per_filter_width + 1 + for i in range(first_block_index, attention_blocks + first_block_index): + block_k = tf.slice(k, [0, 0, i, 0, 0], + [-1, -1, total_query_blocks, -1, -1]) + block_v = tf.slice(v, [0, 0, i, 0, 0], + [-1, -1, total_query_blocks, -1, -1]) + slices.append((block_k, block_v)) + # [batch, heads, blocks_q, block_length + 2 * filter_width, depth_k] + k = tf.concat([s[0] for s in slices], axis=3) + v = tf.concat([s[1] for s in slices], axis=3) + + attention_bias = tf.expand_dims(embedding_to_padding(k) * -1e9, axis=-2) + depth_v = common_layers.shape_list(v)[-1] + + output = dot_product_attention( + q, + k, + v, + attention_bias, + dropout_rate=0., + name="local_1d", + make_image_summary=False) + output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in + (batch_size, num_heads, original_length, depth_v)]) + return output + + +def reshape_by_blocks(x, x_shape, memory_block_size): + """Reshapes input by splitting its length over blocks of memory_block_size. + + Args: + x: a Tensor with shape [batch, heads, length, depth] + x_shape: tf.TensorShape of x. + memory_block_size: Integer which divides length. + + Returns: + Tensor with shape + [batch, heads, length // memory_block_size, memory_block_size, depth]. + """ + x = tf.reshape(x, [ + x_shape[0], x_shape[1], x_shape[2] // memory_block_size, + memory_block_size, x_shape[3] + ]) + return x + + +def dilated_self_attention_1d(q, + k, + v, + query_block_size=128, + memory_block_size=128, + gap_size=2, + num_memory_blocks=2, + name=None): + """Dilated self-attention. + + Args: + q: a Tensor with shape [batch, heads, length, depth] + k: a Tensor with shape [batch, heads, length, depth] + v: a Tensor with shape [batch, heads, length, depth] + query_block_size: an integer indicating size of query block + memory_block_size: an integer indicating the size of a memory block. + gap_size: an integer indicating the gap size + num_memory_blocks: how many memory blocks to look at to the left and right. + Each will be separated by gap_size. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth] + """ + with tf.variable_scope( + name, default_name="dilated_self_attention_1d", values=[q, k, v]): + v_list_shape = v.get_shape().as_list() + assert v_list_shape == k.shape.as_list(), "K and V depths must be equal" + v_shape = common_layers.shape_list(v) + depth_v = v_shape[3] + batch_size = v_shape[0] + num_heads = v_shape[1] + original_length = common_layers.shape_list(q)[2] + + # Pad query, key, value to ensure multiple of corresponding lengths. + def pad_to_multiple(x, pad_length): + x_length = common_layers.shape_list(x)[2] + return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) + + def pad_l_and_r(x, pad_length): + return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]]) + + q = pad_to_multiple(q, query_block_size) + v = pad_to_multiple(v, query_block_size) + k = pad_to_multiple(k, query_block_size) + + # Set up query blocks. + new_q_shape = common_layers.shape_list(q) + q = reshape_by_blocks(q, new_q_shape, query_block_size) + self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size) + self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size) + + # Set up key and value windows. + k_v_padding = (gap_size + memory_block_size) * num_memory_blocks + k = pad_l_and_r(k, k_v_padding) + v = pad_l_and_r(v, k_v_padding) + + # Get gather indices. + index_length = (new_q_shape[2] - query_block_size + memory_block_size) + indices = tf.range(0, index_length, delta=1, name="index_range") + indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs + kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1) + gather_indices = tf.nn.conv1d( + tf.cast(indices, tf.float32), + kernel, + query_block_size, + padding="VALID", + name="gather_conv") + + gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0) + + # Get left and right memory blocks for each query. + # [length, batch, heads, dim] + k_t = tf.transpose(k, [2, 0, 1, 3]) + v_t = tf.transpose(v, [2, 0, 1, 3]) + left_k = gather_dilated_memory_blocks( + k_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size, + query_block_size, memory_block_size, gather_indices) + left_v = gather_dilated_memory_blocks( + v_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size, + query_block_size, memory_block_size, gather_indices) + + right_k = gather_dilated_memory_blocks( + k_t[k_v_padding:, :, :, :], + num_memory_blocks, + gap_size, + query_block_size, + memory_block_size, + gather_indices, + direction="right") + right_v = gather_dilated_memory_blocks( + v_t[k_v_padding:, :, :, :], + num_memory_blocks, + gap_size, + query_block_size, + memory_block_size, + gather_indices, + direction="right") + + k_windows = tf.concat([left_k, self_k_part, right_k], axis=3) + v_windows = tf.concat([left_v, self_v_part, right_v], axis=3) + attention_bias = tf.expand_dims( + embedding_to_padding(k_windows) * -1e9, axis=-2) + + output = dot_product_attention( + q, + k_windows, + v_windows, + attention_bias, + dropout_rate=0., + name="dilated_1d", + make_image_summary=False) + output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output.set_shape(v_list_shape) + return output + + +def gather_dilated_memory_blocks(x, + num_memory_blocks, + gap_size, + query_block_size, + memory_block_size, + gather_indices, + direction="left"): + """Gathers blocks with gaps in between. + + Args: + x: Tensor of shape [length, batch, heads, depth] + num_memory_blocks: how many memory blocks to look in "direction". Each will + be separated by gap_size. + gap_size: an integer indicating the gap size + query_block_size: an integer indicating size of query block + memory_block_size: an integer indicating the size of a memory block. + gather_indices: The indices to gather from. + direction: left or right + + Returns: + Tensor of shape [batch, heads, blocks, block_length, depth] + """ + gathered_blocks = [] + # gathering memory blocks + for block_id in range(num_memory_blocks): + block_end_index = -(query_block_size + gap_size * + (block_id + 1) + memory_block_size * block_id) + block_start_index = ( + (memory_block_size + gap_size) * (num_memory_blocks - (block_id + 1))) + if direction != "left": + [block_end_index, + block_start_index] = [-block_start_index, -block_end_index] + if block_end_index == 0: + x_block = x[block_start_index:] + else: + x_block = x[block_start_index:block_end_index] + + def gather_dilated_1d_blocks(x, gather_indices): + x_new = tf.gather(x, gather_indices) + # [batch, heads, blocks, block_length, dim] + return tf.transpose(x_new, [2, 3, 0, 1, 4]) + + gathered_blocks.append(gather_dilated_1d_blocks(x_block, gather_indices)) + return tf.concat(gathered_blocks, 3) + + +def masked_dilated_self_attention_1d(q, + k, + v, + query_block_size=64, + memory_block_size=64, + gap_size=2, + num_memory_blocks=2, + name=None): + """Dilated self-attention. TODO(avaswani): Try it and write a paper on it. + + Args: + q: a Tensor with shape [batch, heads, length, depth] + k: a Tensor with shape [batch, heads, length, depth] + v: a Tensor with shape [batch, heads, length, depth] + query_block_size: an integer + memory_block_size: an integer indicating how much to look left. + gap_size: an integer indicating the gap size + num_memory_blocks: how many memory blocks to look at to the left. Each will + be separated by gap_size. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth] + """ + with tf.variable_scope( + name, default_name="masked_dilated_self_attention_1d", values=[q, k, v]): + v_list_shape = v.get_shape().as_list() + assert v_list_shape == k.shape.as_list(), "K and V depths must be equal" + v_shape = common_layers.shape_list(v) + depth_v = v_shape[3] + batch_size = v_shape[0] + num_heads = v_shape[1] + original_length = common_layers.shape_list(q)[2] + + # Pad query, key, value to ensure multiple of corresponding lengths. + def pad_to_multiple(x, pad_length): + x_length = common_layers.shape_list(x)[2] + return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) + + def pad_l(x, left_pad_length): + return tf.pad(x, [[0, 0], [0, 0], [left_pad_length, 0], [0, 0]]) + + q = pad_to_multiple(q, query_block_size) + v = pad_to_multiple(v, query_block_size) + k = pad_to_multiple(k, query_block_size) + + # Set up query blocks. + new_q_shape = common_layers.shape_list(q) + q = reshape_by_blocks(q, new_q_shape, query_block_size) + + # Set up key and value windows. + self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size) + self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size) + k_v_padding = (gap_size + memory_block_size) * num_memory_blocks + k = pad_l(k, k_v_padding) + v = pad_l(v, k_v_padding) + + # Get gather indices. + index_length = (new_q_shape[2] - query_block_size + memory_block_size) + + indices = tf.range(0, index_length, delta=1, name="index_range") + indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs + kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1) + gather_indices = tf.nn.conv1d( + tf.cast(indices, tf.float32), + kernel, + query_block_size, + padding="VALID", + name="gather_conv") + gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0) + + # Get left and right memory blocks for each query. + # [length, batch, heads, dim] + k_t = tf.transpose(k, [2, 0, 1, 3]) + v_t = tf.transpose(v, [2, 0, 1, 3]) + + k_unmasked_windows = gather_dilated_memory_blocks( + k_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, + gather_indices) + v_unmasked_windows = gather_dilated_memory_blocks( + v_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, + gather_indices) + + # Combine memory windows. + block_q_shape = common_layers.shape_list(q) + masked_attention_bias = tf.tile( + tf.expand_dims(attention_bias_lower_triangle(query_block_size), axis=0), + [block_q_shape[0], block_q_shape[1], block_q_shape[2], 1, 1]) + padding_attention_bias = tf.expand_dims( + embedding_to_padding(k_unmasked_windows) * -1e9, axis=-2) + padding_attention_bias = tf.tile(padding_attention_bias, + [1, 1, 1, query_block_size, 1]) + attention_bias = tf.concat( + [masked_attention_bias, padding_attention_bias], axis=-1) + # combine memory windows + k_windows = tf.concat([self_k_part, k_unmasked_windows], 3) + v_windows = tf.concat([self_v_part, v_unmasked_windows], 3) + output = dot_product_attention( + q, + k_windows, + v_windows, + attention_bias, + dropout_rate=0., + name="dilated_1d", + make_image_summary=False) + output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output.set_shape(v_list_shape) + return output + + +def local_attention_2d(q, + k, + v, + query_shape=(8, 16), + memory_flange=(8, 16), + name=None): + """Strided block local self-attention. + + The 2-D sequence is divided into 2-D blocks of shape query_shape. Attention + for a given query position can only see memory positions less than or equal to + the query position. The memory positions are the corresponding block with + memory_flange many positions to add to the height and width of the block + (namely, left, top, and right). + + Args: + q: a Tensor with shape [batch, heads, h, w, depth_k] + k: a Tensor with shape [batch, heads, h, w, depth_k] + v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current + implementation, depth_v must be equal to depth_k. + query_shape: an tuple indicating the height and width of each query block. + memory_flange: an integer indicating how much to look in height and width + from each query block. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, h, w, depth_v] + """ + with tf.variable_scope( + name, default_name="local_self_attention_2d", values=[q, k, v]): + v_shape = common_layers.shape_list(v) + + # Pad query, key, value to ensure multiple of corresponding lengths. + q = pad_to_multiple_2d(q, query_shape) + k = pad_to_multiple_2d(k, query_shape) + v = pad_to_multiple_2d(v, query_shape) + paddings = [[0, 0], [0, 0], [memory_flange[0], memory_flange[1]], + [memory_flange[0], memory_flange[1]], [0, 0]] + k = tf.pad(k, paddings) + v = tf.pad(v, paddings) + + # Set up query blocks. + q_indices = gather_indices_2d(q, query_shape, query_shape) + q_new = gather_blocks_2d(q, q_indices) + + # Set up key and value blocks. + memory_shape = (query_shape[0] + 2 * memory_flange[0], + query_shape[1] + 2 * memory_flange[1]) + k_and_v_indices = gather_indices_2d(k, memory_shape, query_shape) + k_new = gather_blocks_2d(k, k_and_v_indices) + v_new = gather_blocks_2d(v, k_and_v_indices) + + attention_bias = tf.expand_dims( + to_float(embedding_to_padding(k_new)) * -1e9, axis=-2) + output = dot_product_attention( + q_new, + k_new, + v_new, + attention_bias, + dropout_rate=0., + name="local_2d", + make_image_summary=False) + # Put representations back into original shapes. + padded_q_shape = common_layers.shape_list(q) + output = scatter_blocks_2d(output, q_indices, padded_q_shape) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0, 0], + [-1, -1, v_shape[2], v_shape[3], -1]) + return output + + +def pad_to_multiple_2d(x, block_shape): + """Making sure x is a multiple of shape. + + Args: + x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor + block_shape: a 2-d list of integer shapes + + Returns: + padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor + """ + old_shape = x.get_shape().dims + last = old_shape[-1] + if len(old_shape) == 4: + height_padding = -common_layers.shape_list(x)[1] % block_shape[0] + width_padding = -common_layers.shape_list(x)[2] % block_shape[1] + paddings = [[0, 0], [0, height_padding], [0, width_padding], [0, 0]] + elif len(old_shape) == 5: + height_padding = -common_layers.shape_list(x)[2] % block_shape[0] + width_padding = -common_layers.shape_list(x)[3] % block_shape[1] + paddings = [[0, 0], [0, 0], [0, height_padding], [0, width_padding], [0, 0]] + + padded_x = tf.pad(x, paddings) + padded_shape = padded_x.get_shape().as_list() + padded_shape = padded_shape[:-1] + [last] + padded_x.set_shape(padded_shape) + return padded_x + + +def reshape_range(tensor, i, j, shape): + """Reshapes a tensor between dimensions i and j.""" + t_shape = common_layers.shape_list(tensor) + target_shape = t_shape[:i] + shape + t_shape[j:] + return tf.reshape(tensor, target_shape) + + +def gather_blocks_2d(x, indices): + """Gathers flattened blocks from x.""" + x_shape = common_layers.shape_list(x) + x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])]) + # [length, batch, heads, dim] + x_t = tf.transpose(x, [2, 0, 1, 3]) + x_new = tf.gather(x_t, indices) + # returns [batch, heads, num_blocks, block_length ** 2, dim] + return tf.transpose(x_new, [2, 3, 0, 1, 4]) + + +def scatter_blocks_2d(x, indices, shape): + """scatters blocks from x into shape with indices.""" + x_shape = common_layers.shape_list(x) + # [length, batch, heads, dim] + x_t = tf.transpose( + tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3]) + x_t_shape = common_layers.shape_list(x_t) + indices = tf.reshape(indices, [-1, 1]) + scattered_x = tf.scatter_nd(indices, x_t, x_t_shape) + scattered_x = tf.transpose(scattered_x, [1, 2, 0, 3]) + return tf.reshape(scattered_x, shape) + + +def gather_indices_2d(x, block_shape, block_stride): + """Getting gather indices.""" + # making an identity matrix kernel + kernel = tf.eye(block_shape[0] * block_shape[1]) + kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1]) + # making indices [1, h, w, 1] to appy convs + x_shape = common_layers.shape_list(x) + indices = tf.range(x_shape[2] * x_shape[3]) + indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1]) + indices = tf.nn.conv2d( + tf.cast(indices, tf.float32), + kernel, + strides=[1, block_stride[0], block_stride[1], 1], + padding="VALID") + # making indices [num_blocks, dim] to gather + dims = common_layers.shape_list(indices)[:3] + if all([isinstance(dim, int) for dim in dims]): + num_blocks = functools.reduce(operator.mul, dims, 1) + else: + num_blocks = tf.reduce_prod(dims) + indices = tf.reshape(indices, [num_blocks, -1]) + return tf.cast(indices, tf.int32) + + +def make_2d_block_raster_mask(query_shape, memory_flange): + """Creates a mask for 2d block raster scan. + + The query mask can look to the left, top left, top, and top right, but + not to the right. Inside the query, we have the standard raster scan + masking. + Args: + query_shape: A tuple of ints (query_height, query_width) + memory_flange: A tuple of ints + (memory_flange_height, memory_flange_width) + + Returns: + A tensor of shape query_size, memory_size + """ + # mask inside the query block + query_triangle = common_layers.ones_matrix_band_part( + np.prod(query_shape), np.prod(query_shape), -1, 0) + split_query_masks = tf.split(query_triangle, query_shape[0], axis=1) + # adding mask for left and right + mask_pieces = [ + tf.concat( # pylint: disable=g-complex-comprehension + [tf.ones([np.prod(query_shape), memory_flange[1]]), + split_query_masks[i], + tf.zeros([np.prod(query_shape), memory_flange[1]])], + axis=1) for i in range(query_shape[0]) + ] + # adding mask for top + final_mask = tf.concat( + [ + tf.ones([ + np.prod(query_shape), + (query_shape[1] + 2 * memory_flange[1]) * memory_flange[0] + ]), + tf.concat(mask_pieces, axis=1) + ], + axis=1) + # 0.0 is visible location, 1.0 is masked. + return 1. - final_mask + + +def get_memory_region(x, query_block_shape, memory_flange, q_indices): + """Get the memory regions that surround a 2d query. + + The memory regions will be the left and top right. + + Args: + x: A tensor with shape [batch, heads, height, width, depth] + query_block_shape: a 2-d tuple of integers + memory_flange: a 2-d tuple of integers + q_indices: a tensor of indices for each of the center blocks. + [num_blocks, block_length] + Returns: + x_flange: A tensor of shape [batch, heads, #blocks, block_length, depth] + """ + # Padding x to be multiple of query_shape and then + # extracting the memory blocks from the same regions as the query blocks + x_query_padded = pad_to_multiple_2d(x, query_block_shape) + x_center = gather_blocks_2d(x_query_padded, q_indices) + # Then padding the flange region + paddings = [[0, 0], [0, 0], [memory_flange[0], 0], + [memory_flange[1], memory_flange[1]], [0, 0]] + x_memory_padded = tf.pad(x_query_padded, paddings) + left_x = None + top_x = None + # Extracting the memory regions around the query block. left_x_region extends + # to the left and the top_x_region is the combination of top left, top, and + # top right of the query block + # if no left region + if memory_flange[1] > 0: + left_x_region = x_memory_padded[:, :, memory_flange[ + 0]:, :-(query_block_shape[1] + memory_flange[1]), :] + left_memory_shape = (query_block_shape[0], memory_flange[1]) + left_indices = gather_indices_2d(left_x_region, left_memory_shape, + query_block_shape) + left_x = gather_blocks_2d(left_x_region, left_indices) + # if no top region + if memory_flange[0] > 0: + top_x_region = x_memory_padded[:, :, :-query_block_shape[0], :, :] + + top_memory_shape = (memory_flange[0], + query_block_shape[1] + 2 * memory_flange[1]) + + top_indices = gather_indices_2d(top_x_region, top_memory_shape, + query_block_shape) + + top_x = gather_blocks_2d(top_x_region, top_indices) + x_flange = None + if top_x is not None and left_x is not None: + x_flange = tf.concat([top_x, left_x], axis=3) + else: + x_flange = top_x if top_x is not None else left_x + return x_flange, x_center + + +def get_shifted_center_blocks(x, indices): + """Get right shifted blocks for masked local attention 2d. + + Args: + x: A tensor with shape [batch, heads, height, width, depth] + indices: The indices to gather blocks + + Returns: + x_shifted: a tensor of extracted blocks, each block right shifted along + length. + """ + center_x = gather_blocks_2d(x, indices) + + # Shift right along the length dimension + def shift_right_2d_blocks(x): + """Shift the second to last dimension of x right by one.""" + shifted_targets = ( + tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :, :-1, :]) + return shifted_targets + + x_shifted = shift_right_2d_blocks(center_x) + return x_shifted + + +def right_shift_blockwise(x, query_shape, name=None): + """Right shifts once in every block. + + Args: + x: a tensor of shape [batch, height, width, depth] + query_shape: A 2d tuple of ints + name: a string + + Returns: + output: a tensor of the same shape as x + """ + with tf.variable_scope( + name, default_name="right_shift_blockwise", values=[x]): + x_list_shape = x.get_shape().as_list() + x_shape = common_layers.shape_list(x) + # Add a dummy dimension for heads. + x = tf.expand_dims(x, axis=1) + x = pad_to_multiple_2d(x, query_shape) + padded_x_shape = common_layers.shape_list(x) + # Set up q blocks. + x_indices = gather_indices_2d(x, query_shape, query_shape) + x_new = get_shifted_center_blocks(x, x_indices) + + # Put representations back into original shapes. + output = scatter_blocks_2d(x_new, x_indices, padded_x_shape) + # Remove the dummy head dimension. + output = tf.squeeze(output, axis=1) + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, x_shape[1], x_shape[2], -1]) + output.set_shape(x_list_shape) + return output + + +def right_shift_blockwise_nd(x, block_shape): + """Right shift once in every block. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + block_shape: a tuple (q1, q2, ..., qn) representing the block shape + + Returns: + a [batch, d1, d2, ..., dn, depth] tensor, right shifted. + """ + blocked_x = break_into_blocks_nd(x, block_shape) + blocked_x_shape = common_layers.shape_list(blocked_x) + blocked_x = tf.reshape(blocked_x, + [blocked_x_shape[0], -1, blocked_x_shape[-1]]) + padded_x = tf.pad(blocked_x, [[0, 0], [1, 0], [0, 0]]) + x = tf.slice(padded_x, [0, 0, 0], + [-1, np.prod(blocked_x_shape[1:-1], dtype=np.int32), -1]) + x = tf.reshape(x, blocked_x_shape) + return put_back_blocks_nd(x, block_shape) + + +def masked_local_attention_2d(q, + k, + v, + query_shape=(8, 16), + memory_flange=(8, 16), + name=None): + """Strided block local self-attention. + + Each position in a query block can attend to all the generated queries in + the query block, which are generated in raster scan, and positions that are + generated to the left and top. The shapes are specified by query shape and + memory flange. Note that if you're using this function, you do not need to + right shift. Right shifting happens inside this function separately for each + block. + + Args: + q: a Tensor with shape [batch, heads, h, w, depth_k] + k: a Tensor with shape [batch, heads, h, w, depth_k] + v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current + implementation, depth_v must be equal to depth_k. + query_shape: an tuple indicating the height and width of each query block. + query_shape = block_shape + memory_flange: an integer indicating how much to look in height and width + from each query block. + memory shape = query_shape + (block_flange[0], 2*block_flange[1]) + name: an optional string + + Returns: + a Tensor of shape [batch, heads, h, w, depth_v] + """ + with tf.variable_scope( + name, default_name="local_masked_self_attention_2d", values=[q, k, v]): + v_shape = common_layers.shape_list(v) + + # Pad query to ensure multiple of corresponding lengths. + q = pad_to_multiple_2d(q, query_shape) + + # Set up query blocks. + q_indices = gather_indices_2d(q, query_shape, query_shape) + q_new = gather_blocks_2d(q, q_indices) + + # Set up key and value blocks. + k_flange, k_center = get_memory_region(k, query_shape, memory_flange, + q_indices) + v_flange, v_center = get_memory_region(v, query_shape, memory_flange, + q_indices) + if k_flange is not None: + k_new = tf.concat([k_flange, k_center], axis=3) + v_new = tf.concat([v_flange, v_center], axis=3) + else: + k_new = k_center + v_new = v_center + + # Set up the masks. + query_elements = np.prod(query_shape) + padding_mask = None + if k_flange is not None: + padding_mask = tf.expand_dims( + embedding_to_padding(k_flange) * -1e9, axis=-2) + padding_mask = tf.tile(padding_mask, [1, 1, 1, query_elements, 1]) + + center_attention_bias = attention_bias_lower_triangle( + np.prod(query_elements)) + center_attention_bias = tf.reshape( + center_attention_bias, [1, 1, 1, query_elements, query_elements]) + v_center_shape = common_layers.shape_list(v_center) + center_attention_bias = tf.tile( + center_attention_bias, + [v_center_shape[0], v_center_shape[1], v_center_shape[2], 1, 1]) + if padding_mask is not None: + # Combine the mask for padding and visible region. + attention_bias = tf.concat([padding_mask, center_attention_bias], axis=4) + else: + attention_bias = center_attention_bias + + output = dot_product_attention( + q_new, + k_new, + v_new, + attention_bias, + dropout_rate=0., + name="masked_local_2d", + make_image_summary=False) + # Put representations back into original shapes. + padded_q_shape = common_layers.shape_list(q) + output = scatter_blocks_2d(output, q_indices, padded_q_shape) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0, 0], + [-1, -1, v_shape[2], v_shape[3], -1]) + return output + + +def masked_local_attention_nd(q, + k, + v, + query_shape, + memory_flange, + decode_step=None, + name=None): + """Masked local attention nd. + + Each position in q can attend to positions in memory that are positioned less + than or equal to query position according to raster scan ordering and are in + the same memory block. A memory block is n-dimensional and each dimension 'i' + is of size q[i] + 2 * m[i] except for the first dimension which is of size + q[0] + m[0]. NOTE: This computation assumes memory_flange is divisible by + query_shape in every dimension. + + Args: + q: a [batch, heads, d1, d2, ..., dn, depth_k] tensor or a [batch, heads, 1, + 1, ..., 1, depth_k] tensor in decoding mode. + k: a [batch, heads, d1, d2, ..., dn, depth_k] tensor + v: a [batch, heads, d1, d2, ..., dn, depth_v] tensor + query_shape: a tuple (q1, q2, ..., qn) indicating the shape of query blocks. + memory_flange: a tuple (m1, m2, ..., mn) indicating the number of extra + positions in the attention memory. memory_shape=[q1 + m1, d2 + 2 * m2, + ..., dn + 2 * mn] + decode_step: an integer in fast decoding mode. + name: an optional string + + Returns: + a [batch, head, d1, d2, ..., dn, depth_v] tensor or + [batch, head, 1, 1, ..., 1, depth_v] if decode_step is not None. + """ + assert all([m % b == 0 for m, b in zip(memory_flange, query_shape)]) + with tf.variable_scope( + name, default_name="masked_local_attention_nd", values=[q, k, v]): + # This computation only applies to self attention, so assert q, k and v have + # the same dimensions. + if decode_step is None: + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) + else: + k.get_shape().assert_is_compatible_with(v.get_shape()) + + # move heads to batch dimension. This is needed to reduce number of + # dimensions as much as possible, since most ops support only up to 7 + # dimensions. + q_shape = common_layers.shape_list(q) + k_shape = common_layers.shape_list(k) + v_shape = common_layers.shape_list(v) + q = tf.reshape(q, [-1] + q_shape[2:]) + k = tf.reshape(k, [-1] + k_shape[2:]) + v = tf.reshape(v, [-1] + v_shape[2:]) + + # Pad query, key, value to ensure multiple of corresponding lengths. + if decode_step is None: + # don't pad query in fast decoding mode. We only need to calculate self + # attention for one position. + q = pad_to_multiple_nd(q, query_shape) + k = pad_to_multiple_nd(k, query_shape) + v = pad_to_multiple_nd(v, query_shape) + + # extract query and memory blocks + if decode_step is None: + q = break_into_blocks_nd(q, query_shape) + else: + # in fast decoding, q has 1 block with 1 item in it + # q shape will be [batch] + [1] * n + [1, depth] which is equivalent of + # [batch, b1, b2, ..., bn, items_in_block, depth] where there is 1 block + # and 1 item in that block + q = tf.reshape(q, [-1] + [1] * (len(q_shape) - 3) + [q_shape[-1]]) + k = break_into_memory_blocks_nd(k, query_shape, memory_flange, masked=True) + v = break_into_memory_blocks_nd(v, query_shape, memory_flange, masked=True) + + # extract just one block of k and v in fast decoding mode. + if decode_step is not None: + k = select_block_for_decode_step(k, decode_step, query_shape) + v = select_block_for_decode_step(v, decode_step, query_shape) + + # flatten q, k and v to [batch, num_blocks, items_in_block, depth] + q, blocks_per_dim = flatten_blocks_nd(q) + k, _ = flatten_blocks_nd(k) + v, _ = flatten_blocks_nd(v) + + # make attention bias for causal attention. + causal_attn_bias = causal_attention_bias_nd( + query_shape, memory_flange, decode_step=decode_step) + padding_attn_bias = tf.expand_dims( + embedding_to_padding(v[:1, :, :, :]) * -1e9, axis=-2) + + if decode_step is None: + num_blocks = common_layers.shape_list(v)[1] + causal_attn_bias = tf.tile(causal_attn_bias, [1, num_blocks, 1, 1]) + padding_attn_bias = tf.tile( + padding_attn_bias, + [1, 1, np.prod(query_shape, dtype=np.int32), 1]) + attn_bias = tf.minimum(causal_attn_bias, padding_attn_bias) + + # Calculate dot product attention + output = dot_product_attention( + q, + k, + v, + attn_bias, + dropout_rate=0., + name=name or "masked_local_nd", + make_image_summary=False) + + # restructure the output from blocks ordering to the original ordering + output = unflatten_blocks_nd(output, blocks_per_dim) + if decode_step is None: + # In fast decoding, output only contains one element, this is not needed. + output = put_back_blocks_nd(output, query_shape) + + # bring back the heads dimension + output_shape = common_layers.shape_list(output) + output = tf.reshape(output, q_shape[:2] + output_shape[1:]) + if decode_step is None: + # No padding is introduced in fast decoding, no need to do this. + output_shape = common_layers.shape_list(output) + output = tf.slice(output, [0] * len(output_shape), + [-1, -1] + q_shape[2:-1] + [-1]) + return output + + +def select_block_for_decode_step(blocked_x, decode_step, query_shape): + """Selects one block from `x` that contains position `decode_step`. + + NOTE: This method only works for blocked inputs. It selects one block around + `decode_step` position in blocked raster scan order. + + Args: + blocked_x: a [batch, blocks_per_d1, ..., blocks_per_dn, b1 * ...* bn, depth] + tensor + decode_step: an integer + query_shape: a tuple (q1, q2, ..., qn) representing query shape + + Returns: + a [batch, [1] * n, b1 * ... * bn, depth] tensor + """ + blocked_x_shape = common_layers.shape_list(blocked_x) + # calculate the shape of the normal x + x_shape = [b * q for b, q in zip(blocked_x_shape[1:-2], query_shape)] + # Get the position of `decode_step` element in the unblocked x. + index = decode_step_to_index(decode_step, query_shape, x_shape) + # Convert it to the blocked positions. + blocked_index = [i // q for i, q in zip(index, query_shape)] + # TPU needs size to be non negative for the case when begin is not + # compile-time constants. + return tf.slice(blocked_x, [0] + blocked_index + [0, 0], + [blocked_x_shape[0]] + [1] * len(blocked_index) + + blocked_x_shape[-2:]) + + +def flatten_blocks_nd(x): + """Flattens blocks of the input tensor. + + Args: + x: a [batch, b1, ..., bn, items_in_block, depth] tensor + + Returns: + a flattened tensor of shape [batch, b1 * ...* bm, items_in_block, depth] + a list of [b1, ..., bn] which is used for unflattening. + """ + x_shape = common_layers.shape_list(x) + num_blocks = np.prod(x_shape[1:-2], dtype=np.int32) + return tf.reshape(x, [-1, num_blocks] + x_shape[-2:]), x_shape[1:-2] + + +def unflatten_blocks_nd(x, blocks_per_dimension): + """Converts a flattened tensor into a normal blocked tensor. + + Args: + x: a [batch, d1 * ... dn, items_in_block, depth] tensor + blocks_per_dimension: a n-d list of integers for number of blocks in each + dimension. + + Returns: + a [batch, d1, d2, ..., dn, items_in_block, depth] tensor + """ + x_shape = common_layers.shape_list(x) + assert x_shape[1] == np.prod(blocks_per_dimension, dtype=np.int32) + return tf.reshape(x, [-1] + list(blocks_per_dimension) + x_shape[-2:]) + + +def break_into_memory_blocks_nd(x, query_shape, memory_flange, masked=False): + """Break a tensor into memory blocks around query blocks. + + This requires memory_flange to be divisible by query_shape in every dimension. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + query_shape: a n-d list of integers representing query shape + memory_flange: an n-d list of integers representing memory flange. + masked: a boolean for masked vs unmasked attention. + + Returns: + a [batch, blocks_per_d1, ..., blocks_per_dn, b1 * ...* bn, depth] where bi + is the memory block size in dimension i which is equal to q[i] + 2m[i] or + q[i] + m[i] if masked attention and i = 1. + """ + assert all([m % b == 0 for b, m in zip(query_shape, memory_flange)]) + + original_x_shape = common_layers.shape_list(x) + # calculate the total number of query blocks in each dimension + blocks_in_memory_flange = [m // b for b, m in zip(query_shape, memory_flange)] + num_query_blocks = [ + l // q for l, q in zip(original_x_shape[1:-1], query_shape) + ] + # pad x to have enough items on the corners to form the memory blocks. + if masked: + # Only pad the beginning of first dimension in masked mode. + x = tf.pad(x, [[0, 0], [memory_flange[0], 0]] + + [[p, p] for p in memory_flange[1:]] + [[0, 0]]) + else: + x = tf.pad(x, [[0, 0]] + [[p, p] for p in memory_flange] + [[0, 0]]) + + query_blocks = break_into_blocks_nd(x, query_shape) + # stitch query blocks together to form memory blocks of the desired size. + start_indices_per_dimension = [] + for dimension, blocks in enumerate(blocks_in_memory_flange): + if masked and dimension == 0: + # num blocks for first dimension in masked mode is blocks + 1 + size = blocks + 1 + else: + size = 2 * blocks + 1 + start_indices_per_dimension.append(range(size)) + + slices = [] + for start_indices in itertools.product(*start_indices_per_dimension): + start = [0] + list(start_indices) + [0, 0] + size = [-1] + num_query_blocks + [-1, -1] + s = tf.slice(query_blocks, start, size) + slices.append(s) + # concat slices in their query block dimension to form the full memory blocks + return tf.concat(slices, axis=-2) + + +def break_into_blocks_nd(x, block_shape): + """Break input tensor into blocks of `block_shape`. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + block_shape: a n-d list of integers representing block shape + + Returns: + a [batch, d1//block1, ..., dn//blockn, block1 *... * blockn, depth] tensor + """ + x_shape = common_layers.shape_list(x) + assert all([l % b == 0 for l, b in zip(x_shape[1:], block_shape)]) + blocks_per_dimension = [l // b for l, b in zip(x_shape[1:], block_shape)] + # reshape to [-1, d1 // block1, block1, ..., dn // blockn, blockn, depth] + reshape_to = list( + itertools.chain.from_iterable(zip(blocks_per_dimension, block_shape))) + x = tf.reshape(x, [-1] + reshape_to + x_shape[-1:]) + # transpose dimensions to bring the n-d blocks in consecutive dimensions. + block_dimensions_index = [2 * (i + 1) for i in range(len(block_shape))] + x = tf.transpose(x, [0] + [i - 1 for i in block_dimensions_index] + + block_dimensions_index + [2 * len(block_shape) + 1]) + return tf.reshape(x, [-1] + blocks_per_dimension + + [np.prod(block_shape, dtype=np.int32)] + x_shape[-1:]) + + +def put_back_blocks_nd(x, block_shape): + """Restructure input tensor from blocks to normal ordering. + + Args: + x: a [batch, b1, ..., bn, items_in_block, depth] tensor + block_shape: a n-d list of integers representing block shape. + + Returns: + a [batch, d1, ..., dn, depth] where blocks are put back to form the + original tensor. + """ + x_shape = common_layers.shape_list(x) + assert x_shape[-2] == np.prod(block_shape) + x = tf.reshape(x, x_shape[:-2] + list(block_shape) + x_shape[-1:]) + block_dimension_index = [i + 1 for i in range(len(block_shape))] + block_shape_index = [b + len(block_shape) for b in block_dimension_index] + interleaved_dimensions = list( + itertools.chain.from_iterable( + zip(block_dimension_index, block_shape_index))) + x = tf.transpose(x, [0] + interleaved_dimensions + [2 * len(block_shape) + 1]) + x_shape = common_layers.shape_list(x) + x = tf.reshape(x, [-1] + [ + x_shape[2 * i + 1] * x_shape[2 * i + 2] for i in range(len(block_shape)) + ] + x_shape[-1:]) + return x + + +def pad_to_multiple_nd(x, block_shape): + """Making sure x is a multiple of shape. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + block_shape: a n-d list of integers representing block shape + + Returns: + padded x where each dimension is a multiple of corresponding block length. + """ + shape = common_layers.shape_list(x) + paddings = [-l % b for l, b in zip(shape[1:-1], block_shape)] + return tf.pad(x, [[0, 0]] + [[0, p] for p in paddings] + [[0, 0]]) + + +def causal_attention_bias_nd(query_shape, memory_flange, decode_step=None): + """Creates causal attention bias for local nd attention. + + This assumes memory_flange is divisible by query_shape in every dimension. + + Args: + query_shape: a n-d list of integers representing query shape + memory_flange: a n-d list of integers representing memory flange + decode_step: an integer + + Returns: + a [1, 1, query_items, memory_items] tensor for masked attention bias or + a [1, 1, 1, memory_items] tensor if decode_step is not None. + """ + assert all([m % q == 0 for q, m in zip(query_shape, memory_flange)]) + blocks_per_memory_flange = [ + m // q for q, m in zip(query_shape, memory_flange) + ] + # previous blocks will be half the number of all blocks if we select blocks + # to the left and right of center block in every dimension. + prev_blocks = np.prod([2 * b + 1 for b in blocks_per_memory_flange], + dtype=np.int32) // 2 + all_blocks = np.prod( + [blocks_per_memory_flange[0] + 1] + + [2 * b + 1 for b in blocks_per_memory_flange[1:]], + dtype=np.int32) + future_blocks = all_blocks - prev_blocks - 1 + # add unmasked biases for all prev blocks and a lower triangle for the center + # block and all masked for future blocks. + items_in_block = np.prod(query_shape, dtype=np.int32) + items_in_query = items_in_block if decode_step is None else 1 + prev_blocks_attn = tf.zeros( + [1, 1, items_in_query, prev_blocks * items_in_block]) + + # add mask for the center block + if decode_step is None: + center_block_attn = attention_bias_lower_triangle(items_in_block) + else: + step_in_block = decode_step % items_in_block + cond = tf.reshape( + tf.less_equal(tf.range(items_in_block, dtype=tf.int32), step_in_block), + [1, 1, items_in_query, items_in_block]) + center_block_attn = tf.where( + cond, tf.zeros([1, 1, items_in_query, items_in_block]), + -1e9 * tf.ones([1, 1, items_in_query, items_in_block])) + + # add mask for all future blocks + future_blocks_attn = -1e9 * tf.ones( + [1, 1, items_in_query, future_blocks * items_in_block]) + return tf.concat([prev_blocks_attn, center_block_attn, future_blocks_attn], + axis=3) + + +def compute_attention_component(antecedent, + total_depth, + filter_width=1, + padding="VALID", + name="c", + vars_3d_num_heads=0, + layer_collection=None): + """Computes attention component (query, key or value). + + Args: + antecedent: a Tensor with shape [batch, length, channels] + total_depth: an integer + filter_width: An integer specifying how wide you want the attention + component to be. + padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. + name: a string specifying scope name. + vars_3d_num_heads: an optional integer (if we want to use 3d variables) + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + Returns: + c : [batch, length, depth] tensor + """ + if layer_collection is not None: + if filter_width != 1 or vars_3d_num_heads != 0: + raise ValueError( + "KFAC implementation only supports filter_width=1 (actual: {}) and " + "vars_3d_num_heads=0 (actual: {}).".format( + filter_width, vars_3d_num_heads)) + if vars_3d_num_heads is not None and vars_3d_num_heads > 0: + assert filter_width == 1 + input_depth = antecedent.get_shape().as_list()[-1] + depth_per_head = total_depth // vars_3d_num_heads + initializer_stddev = input_depth ** -0.5 + if "q" in name: + initializer_stddev *= depth_per_head ** -0.5 + var = tf.get_variable( + name, [input_depth, + vars_3d_num_heads, + total_depth // vars_3d_num_heads], + initializer=tf.random_normal_initializer(stddev=initializer_stddev)) + var = tf.cast(var, antecedent.dtype) + var = tf.reshape(var, [input_depth, total_depth]) + return tf.tensordot(antecedent, var, axes=1) + if filter_width == 1: + return common_layers.dense( + antecedent, total_depth, use_bias=False, name=name, + layer_collection=layer_collection) + else: + return common_layers.conv1d( + antecedent, total_depth, filter_width, padding=padding, name=name) + + +def compute_qkv(query_antecedent, + memory_antecedent, + total_key_depth, + total_value_depth, + q_filter_width=1, + kv_filter_width=1, + q_padding="VALID", + kv_padding="VALID", + vars_3d_num_heads=0, + layer_collection=None): + """Computes query, key and value. + + Args: + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: a Tensor with shape [batch, length_m, channels] + total_key_depth: an integer + total_value_depth: an integer + q_filter_width: An integer specifying how wide you want the query to be. + kv_filter_width: An integer specifying how wide you want the keys and values + to be. + q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. + kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. + vars_3d_num_heads: an optional (if we want to use 3d variables) + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + Returns: + q, k, v : [batch, length, depth] tensors + """ + if memory_antecedent is None: + memory_antecedent = query_antecedent + q = compute_attention_component( + query_antecedent, + total_key_depth, + q_filter_width, + q_padding, + "q", + vars_3d_num_heads=vars_3d_num_heads, + layer_collection=layer_collection) + k = compute_attention_component( + memory_antecedent, + total_key_depth, + kv_filter_width, + kv_padding, + "k", + vars_3d_num_heads=vars_3d_num_heads, + layer_collection=layer_collection) + v = compute_attention_component( + memory_antecedent, + total_value_depth, + kv_filter_width, + kv_padding, + "v", + vars_3d_num_heads=vars_3d_num_heads, + layer_collection=layer_collection) + return q, k, v + + +def multihead_attention(query_antecedent, + memory_antecedent, + bias, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + dropout_rate, + attention_type="dot_product", + max_relative_position=None, + heads_share_relative_embedding=False, + add_relative_to_values=False, + image_shapes=None, + block_length=128, + block_width=128, + q_filter_width=1, + kv_filter_width=1, + q_padding="VALID", + kv_padding="VALID", + cache=None, + gap_size=0, + num_memory_blocks=2, + name="multihead_attention", + save_weights_to=None, + make_image_summary=True, + dropout_broadcast_dims=None, + vars_3d=False, + layer_collection=None, + recurrent_memory=None, + chunk_number=None, + hard_attention_k=0, + gumbel_noise_weight=0.0, + max_area_width=1, + max_area_height=1, + memory_height=1, + area_key_mode="mean", + area_value_mode="sum", + training=True, + **kwargs): + """Multihead scaled-dot-product attention with input/output transformations. + + Args: + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: a Tensor with shape [batch, length_m, channels] or None + bias: bias Tensor (see attention_bias()) + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + dropout_rate: a floating point number + attention_type: a string, either "dot_product", "dot_product_relative", + "local_mask_right", "local_unmasked", "masked_dilated_1d", + "unmasked_dilated_1d", graph, or any attention function + with the signature (query, key, value, **kwargs) + max_relative_position: Maximum distance between inputs to generate + unique relation embeddings for. Only relevant + when using "dot_product_relative" attention. + heads_share_relative_embedding: boolean to share relative embeddings + add_relative_to_values: a boolean for whether to add relative component to + values. + image_shapes: optional tuple of integer scalars. + see comments for attention_image_summary() + block_length: an integer - relevant for "local_mask_right" + block_width: an integer - relevant for "local_unmasked" + q_filter_width: An integer specifying how wide you want the query to be. + kv_filter_width: An integer specifying how wide you want the keys and values + to be. + q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. + kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": + no padding. + cache: dict containing Tensors which are the results of previous + attentions, used for fast decoding. Expects the dict to contrain two + keys ('k' and 'v'), for the initial call the values for these keys + should be empty Tensors of the appropriate shape. + 'k' [batch_size, 0, key_channels] + 'v' [batch_size, 0, value_channels] + gap_size: Integer option for dilated attention to indicate spacing between + memory blocks. + num_memory_blocks: Integer option to indicate how many memory blocks to look + at. + name: an optional string. + save_weights_to: an optional dictionary to capture attention weights + for vizualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + vars_3d: use 3-dimensional variables for input/output transformations + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + recurrent_memory: An optional transformer_memory.RecurrentMemory, which + retains state across chunks. Default is None. + chunk_number: an optional integer Tensor with shape [batch] used to operate + the recurrent_memory. + hard_attention_k: integer, if > 0 triggers hard attention (picking top-k). + gumbel_noise_weight: if > 0, apply Gumbel noise with weight + `gumbel_noise_weight` before picking top-k. This is a no op if + hard_attention_k <= 0. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + memory_height: the height of the memory. + area_key_mode: the mode for computing area keys, which can be "mean", + "concat", "sum", "sample_concat", and "sample_sum". + area_value_mode: the mode for computing area values, which can be either + "mean", or "sum". + training: indicating if it is in the training mode. + **kwargs (dict): Parameters for the attention function. + + Caching: + WARNING: For decoder self-attention, i.e. when memory_antecedent == None, + the caching assumes that the bias contains future masking. + + The caching works by saving all the previous key and value values so that + you are able to send just the last query location to this attention + function. I.e. if the cache dict is provided it assumes the query is of the + shape [batch_size, 1, hidden_dim] rather than the full memory. + + Returns: + The result of the attention transformation. The output shape is + [batch_size, length_q, hidden_dim] + unless the cache dict is provided in which case only the last memory + position is calculated and the output shape is [batch_size, 1, hidden_dim] + Optionally returns an additional loss parameters (ex: load balance loss for + the experts) returned by the attention_type function. + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + vars_3d_num_heads = num_heads if vars_3d else 0 + + if layer_collection is not None: + if cache is not None: + raise ValueError("KFAC implementation only supports cache is None.") + if vars_3d: + raise ValueError("KFAC implementation does not support 3d vars.") + + if recurrent_memory is not None: + if memory_antecedent is not None: + raise ValueError("Recurrent memory requires memory_antecedent is None.") + if cache is not None: + raise ValueError("Cache is not supported when using recurrent memory.") + if vars_3d: + raise ValueError("3d vars are not supported when using recurrent memory.") + if layer_collection is not None: + raise ValueError("KFAC is not supported when using recurrent memory.") + if chunk_number is None: + raise ValueError("chunk_number is required when using recurrent memory.") + + with tf.variable_scope(name, default_name="multihead_attention", + values=[query_antecedent, memory_antecedent]): + + if recurrent_memory is not None: + ( + recurrent_memory_transaction, + query_antecedent, memory_antecedent, bias, + ) = recurrent_memory.pre_attention( + chunk_number, + query_antecedent, memory_antecedent, bias, + ) + + if cache is None or memory_antecedent is None: + q, k, v = compute_qkv(query_antecedent, memory_antecedent, + total_key_depth, total_value_depth, q_filter_width, + kv_filter_width, q_padding, kv_padding, + vars_3d_num_heads=vars_3d_num_heads, + layer_collection=layer_collection) + if cache is not None: + if attention_type not in ["dot_product", "dot_product_relative"]: + # TODO(petershaw): Support caching when using relative position + # representations, i.e. "dot_product_relative" attention. + raise NotImplementedError( + "Caching is not guaranteed to work with attention types other than" + " dot_product.") + if bias is None: + raise ValueError("Bias required for caching. See function docstring " + "for details.") + + if memory_antecedent is not None: + # Encoder-Decoder Attention Cache + q = compute_attention_component(query_antecedent, total_key_depth, + q_filter_width, q_padding, "q", + vars_3d_num_heads=vars_3d_num_heads) + k = cache["k_encdec"] + v = cache["v_encdec"] + else: + k = split_heads(k, num_heads) + v = split_heads(v, num_heads) + decode_loop_step = kwargs.get("decode_loop_step") + if decode_loop_step is None: + k = cache["k"] = tf.concat([cache["k"], k], axis=2) + v = cache["v"] = tf.concat([cache["v"], v], axis=2) + else: + tmp_k = tf.transpose(cache["k"], perm=[2, 0, 1, 3]) + tmp_k = tf.add(tmp_k, tf.scatter_nd([[decode_loop_step]], tf.expand_dims(tf.squeeze(k, axis=2), 0), tmp_k.shape)) + k = cache["k"] = tf.transpose(tmp_k, perm=[1, 2, 0, 3]) + + tmp_v = tf.transpose(cache["v"], perm=[2, 0, 1, 3]) + tmp_v = tf.add(tmp_v, tf.scatter_nd([[decode_loop_step]], tf.expand_dims(tf.squeeze(v, axis=2), 0), tmp_v.shape)) + v = cache["v"] = tf.transpose(tmp_v, perm=[1, 2, 0, 3]) + + q = split_heads(q, num_heads) + if cache is None: + k = split_heads(k, num_heads) + v = split_heads(v, num_heads) + + key_depth_per_head = total_key_depth // num_heads + if not vars_3d: + q *= key_depth_per_head**-0.5 + + additional_returned_value = None + if callable(attention_type): # Generic way to extend multihead_attention + x = attention_type(q, k, v, **kwargs) + if isinstance(x, tuple): + x, additional_returned_value = x # Unpack + elif attention_type == "dot_product": + if max_area_width > 1 or max_area_height > 1: + x = area_attention.dot_product_area_attention( + q, k, v, bias, dropout_rate, image_shapes, + save_weights_to=save_weights_to, + dropout_broadcast_dims=dropout_broadcast_dims, + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height, + area_key_mode=area_key_mode, + area_value_mode=area_value_mode, + training=training) + else: + x = dot_product_attention( + q, k, v, bias, dropout_rate, image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims, + activation_dtype=kwargs.get("activation_dtype"), + hard_attention_k=hard_attention_k, + gumbel_noise_weight=gumbel_noise_weight) + elif attention_type == "dot_product_relative": + x = dot_product_attention_relative( + q, + k, + v, + bias, + max_relative_position, + dropout_rate, + image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + cache=cache is not None, + allow_memory=recurrent_memory is not None, + hard_attention_k=hard_attention_k, + gumbel_noise_weight=gumbel_noise_weight) + elif attention_type == "dot_product_unmasked_relative_v2": + x = dot_product_unmasked_self_attention_relative_v2( + q, + k, + v, + bias, + max_relative_position, + dropout_rate, + image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims, + heads_share_relative_embedding=heads_share_relative_embedding, + add_relative_to_values=add_relative_to_values) + elif attention_type == "dot_product_relative_v2": + x = dot_product_self_attention_relative_v2( + q, + k, + v, + bias, + max_relative_position, + dropout_rate, + image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims, + heads_share_relative_embedding=heads_share_relative_embedding, + add_relative_to_values=add_relative_to_values) + elif attention_type == "local_within_block_mask_right": + x = masked_within_block_local_attention_1d( + q, k, v, block_length=block_length) + elif attention_type == "local_relative_mask_right": + x = masked_relative_local_attention_1d( + q, + k, + v, + block_length=block_length, + make_image_summary=make_image_summary, + dropout_rate=dropout_rate, + heads_share_relative_embedding=heads_share_relative_embedding, + add_relative_to_values=add_relative_to_values, + name="masked_relative_local_attention_1d") + elif attention_type == "local_mask_right": + x = masked_local_attention_1d( + q, + k, + v, + block_length=block_length, + make_image_summary=make_image_summary) + elif attention_type == "local_unmasked": + x = local_attention_1d( + q, k, v, block_length=block_length, filter_width=block_width) + elif attention_type == "masked_dilated_1d": + x = masked_dilated_self_attention_1d(q, k, v, block_length, block_width, + gap_size, num_memory_blocks) + else: + assert attention_type == "unmasked_dilated_1d" + x = dilated_self_attention_1d(q, k, v, block_length, block_width, + gap_size, num_memory_blocks) + x = combine_heads(x) + + # Set last dim specifically. + x.set_shape(x.shape.as_list()[:-1] + [total_value_depth]) + + if vars_3d: + o_var = tf.get_variable( + "o", [num_heads, total_value_depth // num_heads, output_depth]) + o_var = tf.cast(o_var, x.dtype) + o_var = tf.reshape(o_var, [total_value_depth, output_depth]) + x = tf.tensordot(x, o_var, axes=1) + else: + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform", + layer_collection=layer_collection) + + if recurrent_memory is not None: + x = recurrent_memory.post_attention(recurrent_memory_transaction, x) + if additional_returned_value is not None: + return x, additional_returned_value + return x + + +def multihead_attention_2d(query_antecedent, + memory_antecedent, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + attention_type="local_attention_2d", + query_shape=(8, 16), + memory_flange=(8, 16), + name=None): + """2d Multihead scaled-dot-product attention with inp/output transformations. + + Args: + query_antecedent: a Tensor with shape [batch, h, w, depth_k] + memory_antecedent: a Tensor with shape [batch, h, w, depth_k] + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + attention_type: String, type of attention function to use. + query_shape: an tuple indicating the height and width of each query block. + memory_flange: an integer indicating how much to look in height and width + name: an optional string + + Returns: + A Tensor of shape [batch, h, w, output_depth] + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + with tf.variable_scope( + name, + default_name="multihead_attention_2d", + values=[query_antecedent, memory_antecedent]): + q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, + total_value_depth) + # after splitting, shape is [batch, heads, h, w, depth] + q = split_heads_2d(q, num_heads) + k = split_heads_2d(k, num_heads) + v = split_heads_2d(v, num_heads) + key_depth_per_head = total_key_depth // num_heads + q *= key_depth_per_head**-0.5 + if attention_type == "local_attention_2d": + x = local_attention_2d( + q, k, v, query_shape=query_shape, memory_flange=memory_flange) + elif attention_type == "masked_local_attention_2d": + assert attention_type == "masked_local_attention_2d" + x = masked_local_attention_2d( + q, k, v, query_shape=query_shape, memory_flange=memory_flange) + else: + assert attention_type == "unmasked_local_attention_2d_tpu" + x = dot_product_unmasked_attention_local_2d_tpu( + q, k, v, None, max_relative_position=None, query_shape=query_shape) + x = combine_heads_2d(x) + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform") + return x + + +def multihead_attention_nd(query_antecedent, + memory_antecedent, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + query_shape, + memory_flange, + masked=False, + cache=None, + decode_step=None, + name=None): + """n-d Multihead scaled-dot-product attention with in/output transformations. + + Args: + query_antecedent: a Tensor with shape [batch, d1, ..., dn, depth_q] or + [batch, 1, ..., 1, depth_q] if in fast decoding mode. + memory_antecedent: a Tensor with shape [batch, d1, ..., dn, depth_m] or None + for self attention. + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + query_shape: an tuple indicating the dimensions of each query block. + memory_flange: an integer indicating how much to look around a query block + in each dimension + masked: a boolean to specify whether to do masked or unmasked attention. + cache: a dict like: { + 'k': [batch, num_heads, d1, ..., dn, depth_k // num_heads], + 'v': [batch, num_heads, d1, ..., dn, depth_v // num_heads]} Caller should + initially pass zero tensors for `decode_step` == 0. This method will + update cache and caller should pass the same cache in consecutive calls. + This works for both GPU and TPU inference. Caller should pass the latest + query via `query_antecedent`. `memory_antecedent` should be None in this + case, since auto-regressive decoding only applies to self attention. + decode_step: integer to pass in decoding mode. `cache` and `decode_step` + should both be set in decoding mode. Caller can also pass an empty `cache` + without `decode_step`, for this method to initialize the cache for future + calls with `decode_step` > 0. + name: an optional string + + Returns: + A Tensor of shape [batch, d1, ..., dn, output_depth] or + [batch, 1, ..., 1, output_depth] if decode_step is set. + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + # Validate decoding input params are sensible. + if decode_step is not None: + assert "k" in cache and "v" in cache + if cache is not None: + assert memory_antecedent is None + + with tf.variable_scope( + name, + default_name="multihead_attention_nd", + values=[query_antecedent, memory_antecedent]): + if decode_step is not None: + latest_antecedent = query_antecedent + q, latest_k, latest_v = compute_qkv(latest_antecedent, None, + total_key_depth, total_value_depth) + latest_k = split_heads_nd(latest_k, num_heads) + latest_v = split_heads_nd(latest_v, num_heads) + # put latest k and v into their correct position in cache. + k = cache["k"] + v = cache["v"] + k = put_item_in_decode_step(k, latest_k, decode_step, query_shape) + v = put_item_in_decode_step(v, latest_v, decode_step, query_shape) + cache["k"] = k + cache["v"] = v + + else: + q, k, v = compute_qkv(query_antecedent, memory_antecedent, + total_key_depth, total_value_depth) + k = split_heads_nd(k, num_heads) + v = split_heads_nd(v, num_heads) + if cache is not None: + cache["k"] = k + cache["v"] = v + # after splitting, shape is [batch, heads, d1, ..., dn, depth] + q = split_heads_nd(q, num_heads) + key_depth_per_head = total_key_depth // num_heads + q *= key_depth_per_head**-0.5 + if masked: + x = masked_local_attention_nd( + q, + k, + v, + query_shape=query_shape, + memory_flange=memory_flange, + decode_step=decode_step) + else: + raise NotImplementedError( + "Unmaked multihead attention nd is not implemented") + x = combine_heads_nd(x) + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform") + return x + + +def decode_step_to_index(decode_step, query_shape, tensor_shape): + """Maps decode step to n-d index according to blocked raster scan order. + + Args: + decode_step: an integer + query_shape: a tuple (q1, q2, ..., qn) representing the query shape + tensor_shape: a tuple (d1, d2, ..., dn) representing the tensor shape, minus + the batch and depth dimensions. + + Returns: + a tuple (i1, i2, ..., in) representing the index of the element at + `decode_step` w.r.t. blocked raster scan order. + """ + assert len(query_shape) == len(tensor_shape) + blocks_per_dimension = [t // q for t, q in zip(tensor_shape, query_shape)] + items_in_block = np.prod(query_shape, dtype=np.int32) + step_block = decode_step // items_in_block + step_within_block = decode_step % items_in_block + + block_index = [] + for q in blocks_per_dimension[::-1]: + block_index.insert(0, step_block % q) + step_block //= q + + within_block_index = [] + for q in query_shape[::-1]: + within_block_index.insert(0, step_within_block % q) + step_within_block //= q + + final_index = [ + w + b * q for w, b, q in zip(within_block_index, block_index, query_shape) + ] + return tuple(final_index) + + +def get_item_at_decode_step(x, decode_step, query_shape): + """Extracts a single item from an n-d tensor at `decode_step` position. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + decode_step: an integer + query_shape: a tuple (q1, q2, ..., qn) representing the query shape + + Returns: + a [batch, 1, 1, ..., 1, depth] tensor that is a single element from `x` at + `decode_step` w.r.t. blocked raster scan order. + """ + x_shape = common_layers.shape_list(x) + index = decode_step_to_index(decode_step, query_shape, x_shape[1:-1]) + # TPU needs size to be non negative for the case when begins are not + # compile-time constants. + return tf.slice(x, [0] + list(index) + [0], + [x_shape[0]] + [1] * len(index) + [x_shape[-1]]) + + +def put_item_in_decode_step(x, item, decode_step, query_shape): + """Puts a single item into an n-d tensor at `decode_step` position. + + Args: + x: a [batch, heads, d1, d2, ..., dn, depth] tensor + item: a [batch, heads, 1, 1, ..., 1, depth] tensor + decode_step: an integer + query_shape: a tuple (q1, q2, ..., qn) representing the query shape + + Returns: + a [batch, heads, d1, d2, ..., dn, depth] tensor with value at `decode_step` + w.r.t. blocked raster scan order is updated to be `item`. + """ + x_shape = common_layers.shape_list(x) + index = decode_step_to_index(decode_step, query_shape, x_shape[2:-1]) + # inplace_update only works on the first dimension, we need to flatten and + # move batch to be the second dimension. + flattened_x = tf.reshape( + x, [-1, x_shape[1], np.prod(x_shape[2:-1]), x_shape[-1]]) + # transpose to [positions, batch, heads, depth] + flattened_x = tf.transpose(flattened_x, [2, 0, 1, 3]) + + flattened_index = 0 + factor = 1 + for d, idx in zip(x_shape[-2:1:-1], index[::-1]): + flattened_index += idx * factor + factor *= d + + item_shape = common_layers.shape_list(item) + item = tf.reshape(item, item_shape[:2] + item_shape[-1:]) + updated_x = inplace_ops.alias_inplace_update(flattened_x, flattened_index, + item) + # unflatten the results + updated_x = tf.transpose(updated_x, [1, 2, 0, 3]) + return tf.reshape(updated_x, [-1, x_shape[1]] + x_shape[2:]) + + +def ffn_self_attention_layer(x, + filter_depth, + output_depth, + num_parts, + dropout_rate, + share_kv=False, + name=None): + """Self-attention feedforward layer. + + We use self-attention to do feedforward computations. We apply this function + positionwise where for each position, we linearly transform the output to have + depth filter_depth, and break up the result depth-wise into num_parts + contiguous parts. The parts self-attend, we concatenate the results + depth-wise, and we linearly transform to a depth of output_depth. The goal is + to get multiplicative interactions between components of a representation. + + Args: + x: a Tensor with shape [batch, length, channels] + filter_depth: an integer + output_depth: an integer + num_parts: an integer dividing filter depth + dropout_rate: a floating point number + share_kv: Share the key value transform + name: an optional string + + Returns: + A Tensor with shape [batch, length, output_depth]. + """ + with tf.variable_scope( + name, default_name="feedforward_self_attention", values=[x]): + x_shape = common_layers.shape_list(x) + part_depth = filter_depth // num_parts + if not share_kv: + combined = common_layers.dense( + x, filter_depth * 3, use_bias=False, name="qkv_transform") + combined = tf.expand_dims(combined, axis=2) + q, k, v = tf.split(combined, 3, axis=3) + else: + q = tf.expand_dims( + common_layers.dense( + x, filter_depth, use_bias=False, name="q_transform"), + axis=2) + kv_combined = tf.expand_dims( + common_layers.dense( + tf.concat([x, x], axis=1), + filter_depth, + use_bias=False, + name="kv_transform"), + axis=2) + k, v = tf.split(kv_combined, [x_shape[1], x_shape[1]], axis=1) + + batch_q = tf.reshape(q, [-1, 1, num_parts, part_depth]) + batch_k = tf.reshape(k, [-1, 1, num_parts, part_depth]) + batch_v = tf.reshape(v, [-1, 1, num_parts, part_depth]) + + batch_q *= part_depth**-0.5 + # non-masked bias + bias = None + x = dot_product_attention(batch_q, batch_k, batch_v, bias, dropout_rate) + x = tf.reshape(x, [x_shape[0], x_shape[1], filter_depth]) + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform") + return x + + +def parameter_attention(x, + total_key_depth, + total_value_depth, + output_depth, + memory_rows, + num_heads, + dropout_rate, + name=None): + """Attention over parameters. + + We use the same multi-headed attention as in the other layers, but the memory + keys and values are model parameters. There are no linear transformation on + the keys or values. + + We are also a bit more careful about memory usage, since the number of + memory positions may be very large. + + Args: + x: a Tensor with shape [batch, length_q, channels] + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + memory_rows: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + dropout_rate: a floating point number + name: an optional string + + Returns: + A Tensor with shape [batch, length_q, output_depth]. + """ + with tf.variable_scope(name, default_name="parameter_attention", values=[x]): + head_size_k = total_key_depth // num_heads + head_size_v = total_value_depth // num_heads + var_shape_k = [num_heads, memory_rows, head_size_k] + var_shape_v = [num_heads, memory_rows, head_size_v] + k = tf.get_variable( + "k", + var_shape_k, + initializer=tf.random_normal_initializer( + 0, output_depth**-0.5 * (num_heads**0.5))) + v = tf.get_variable( + "v", + var_shape_v, + initializer=tf.random_normal_initializer( + 0, output_depth**-0.5 * (output_depth**0.5))) + batch_size = common_layers.shape_list(x)[0] + length = common_layers.shape_list(x)[1] + q = common_layers.dense( + x, total_key_depth, use_bias=False, name="q_transform") + if dropout_rate: + # This is a cheaper form of attention dropout where we use to use + # the same dropout decisions across batch elements and query positions, + # but different decisions across heads and memory positions. + v = tf.nn.dropout( + v, 1.0 - dropout_rate, noise_shape=[num_heads, memory_rows, 1]) + # query is [batch, length, hidden_size] + # reshape and transpose it to [heads, batch * length, head_size] + q = tf.reshape(q, [batch_size, length, num_heads, head_size_k]) + q = tf.transpose(q, [2, 0, 1, 3]) + q = tf.reshape(q, [num_heads, batch_size * length, head_size_k]) + weights = tf.matmul(q, k, transpose_b=True) + weights = tf.nn.softmax(weights) + y = tf.matmul(weights, v) + y = tf.reshape(y, [num_heads, batch_size, length, head_size_v]) + y = tf.transpose(y, [1, 2, 0, 3]) + y = tf.reshape(y, [batch_size, length, total_value_depth]) + y.set_shape([None, None, total_value_depth]) + y = common_layers.dense( + y, output_depth, use_bias=False, name="output_transform") + return y + + +@expert_utils.add_name_scope() +def coordinate_tensor(shape, axis): + """Return a tensor with given shape containing coordinate along given axis. + + Args: + shape: a Tensor representing the shape of the output Tensor + axis: an integer + + Returns: + A tensor with shape shape and type tf.int32, where each elements its + coordinate along the given axis. + """ + if axis < 0: + axis = tf.size(shape) + axis # Convert to positive for the one_hot indice + + r = tf.range(shape[axis]) + r_shape = tf.one_hot( + axis, tf.size(shape), on_value=-1, off_value=1, dtype=tf.int32) + return tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape) + + +def self_attention_expert(x, + batch_coordinate, + mask_right=True, + split_batch=False, + attention_num_head=1, + attention_kq_size=None, + attention_v_size=None): + """Implementing attention that runs inside each expert. + + Args: + x: A tensor of shape[batch, depth]. Contains representations from + different positions, which are lexicographically ordered. + batch_coordinate: A tensor of shape [batch, 1] containing the batch + coordinate of each element in x. This is needed to make sure that + positions from different sequences don't attend to each other. + mask_right: A bool. If true, we will not attend to positions on the right, + just as decoder self attention. + split_batch (bool): If True, each sequence of the batch is processed + individually on a loop. If False, the sequences are processed all at + once and a mask is applied to isolate the sequences from each others + attention_num_head (int): number of attention heads + attention_kq_size (int): dimension used for the attention key, and query + attention_v_size (int): dimension used for the attention value + + Returns: + out: A tensor of shape [batch, depth]. + example use: + expert_utils.local_moe( + ... + expert_fn=functools.partial(self_attention_expert, mask_right=) + ) + """ + + depth = x.get_shape().as_list()[-1] + length = common_layers.shape_list(batch_coordinate)[0] + + # Print a warning message if one of the expert isn't used (useful at + # inference where summaries aren't used and the gating function don't add + # noise) + global _expert_count # Hack to make each expert have a unique id + _expert_count += 1 + length = tf.cond( + tf.equal(length, 0), + lambda: tf.Print( # pylint: disable=g-long-lambda + length, [length], "Expert {} empty: ".format(_expert_count)), + lambda: length, + ) + + tf.summary.scalar("batch_size", length, family="experts_stats_batch_size") + + attention_kq_size = attention_kq_size or depth + attention_v_size = attention_v_size or depth + + def length_not_null(x, batch_coordinate): + """Branch of the graph only evaluated when length isn't null.""" + + # Mask between the sequences (not used if map_ids is used) + bias_batch = attention_bias_coordinates(batch_coordinate) + + def add_or_set_if(prev_bias, new_bias, condition): + """Add the bias together while considering the None case.""" + if not condition: + return prev_bias + if prev_bias is None: + return new_bias + return prev_bias + new_bias + + def mask_and_call_attention(x): + """Function applied once for each sequence of the batch.""" + + # Mask to prevent sequences of attending to the future + length = common_layers.shape_list(x)[1] # x has shape [1, length,...] + bias_past = tf.reshape( + attention_bias_lower_triangle(length), [length, length]) + # bias has shape [length, length] + + bias = None + bias = add_or_set_if(bias, bias_past, mask_right) + bias = add_or_set_if(bias, bias_batch, not split_batch) + bias = tf.reshape(bias, [1, 1, length, length]) + + return multihead_attention( + x, + None, + bias, + total_key_depth=attention_kq_size, + total_value_depth=attention_v_size, + output_depth=depth, + num_heads=attention_num_head, + dropout_rate=0.0) + + if split_batch: + out = expert_utils.map_ids(x, batch_coordinate, mask_and_call_attention) + else: + x = tf.reshape(x, [1, length, depth]) + out = mask_and_call_attention(x) + out = tf.squeeze(out, 0) + return out + + # If the length is empty, just forward an empty tensor (avoid having to + # evaluate multihead_attention with tensor having dim equal to zeros) + out = tf.cond( + tf.equal(length, 0), + lambda: tf.zeros(shape=[0, depth], dtype=tf.float32, name="empty_out"), + lambda: length_not_null(x, batch_coordinate), + ) + return out + + +def local_expert_attention(x, + k, + loss_coef, + attention_num_experts, + train=True, + batch_coordinate=None, + **kwargs): + """Attention using a mixture of experts. + + Positions sent to the same expert can attend to each other. + The mixture of experts is "local" in that it is replicated on each + datashard. + + local_moe flatten all batches so to avoid problems with padding (ex: all + padding going to the same expert, self attention attending to non null + padding tokens,...), the padding should be removed before. + + Args: + x: a Tensor with shape [batch, length, depth] or [1, batch*length, depth] + k: The number of experts to dispatch each example to + loss_coef: a scalar. A multiplier for the expert loss + attention_num_experts: The number of experts to use + train: a boolean for the current mode + batch_coordinate (tf.Tensor): int32 tensor of shape [1, batch*length, 1] + containing the batch ids. If None, deduced from first dim of x. + **kwargs: Arguments to forward to self_attention_expert + + Returns: + y: a Tensor with shape [batch, length, depth] + loss: a Scalar + """ + if batch_coordinate is None: + batch_coordinate = tf.expand_dims( + coordinate_tensor(common_layers.shape_list(x)[:-1], axis=0), axis=-1) + with tf.variable_scope("local_expert_attention"): + additional_dispatch_params = {"batch_coordinate": batch_coordinate} + return expert_utils.local_moe( + x, + train, + functools.partial(self_attention_expert, **kwargs), + attention_num_experts, + k=k, + loss_coef=loss_coef, + pass_x=True, + pass_gates=False, + additional_dispatch_params=additional_dispatch_params, + ) + + +@expert_utils.add_name_scope() +def expert_dot_product(q, k, v, info_q, info_k): + """Perform dot product on a subset of the sequence. + + Can add a mask to the attention to prevent sequences to attend to each other + and to prevent attention to the future. + + Args: + q (tf.Tensor): Queries of shape [length_expert_q, depth_k] + k (tf.Tensor): Keys of shape [length_expert_k, depth_k] + v (tf.Tensor): Values of shape [length_expert_k, depth_v] + info_q (BatchInfo): Batch info for queries. If None, no mask is added + info_k (BatchInfo): Batch info for keys + + Returns: + tf.Tensor: dot product attention output ([length_expert_q, depth_v]) + """ + + length_q = common_layers.shape_list(q)[0] + length_k = common_layers.shape_list(k)[0] + depth_v = v.get_shape().as_list()[-1] + + # Create the mask + bias = attention_bias_coordinates(info_q.coordinates, info_k.coordinates) + if info_k.order is not None: + bias += attention_bias_future(info_q.order, info_k.order) + + # Restore batch and head dimension + q, k, v = [tf.expand_dims(tf.expand_dims(t, 0), 0) for t in (q, k, v)] + + def is_zero(): + zeros = tf.zeros(shape=[1, 1, length_q, depth_v], dtype=tf.float32) + zeros = tf.Print(zeros, [length_k, length_q], "length_k/length_q: ") + return zeros + + def is_not_zero(): + return dot_product_attention( + q, + k, + v, + bias=bias, + # No image summary to avoid "Retval[0] does not have value" (because + # inside a condition) + make_image_summary=False, + ) + + # TODO(epot): Should make sure a query gets at least one key. Because the + # different sequences of a batch are merged, it's possible that a + # query from a sequence only receive memory from another sequence, so + # with the mask, the query will perform a softmax on -infinity values. + # A hack could be to add at least one sequence of each batch on each group so + # the query can attend to at least one element. + # Softmax(Q.K)*V + v_out = tf.cond( + tf.logical_or(tf.equal(length_q, 0), tf.equal(length_k, 0)), + is_zero, + is_not_zero, + ) + + # Remove batch and head dimension + v_out = tf.squeeze(v_out, axis=0) + v_out = tf.squeeze(v_out, axis=0) + return v_out + + +@expert_utils.add_name_scope() +def dot_product_single_head(q, k, v, gates_q, gates_k, bi): + """Perform a dot product attention on a single sequence on a single head. + + This function dispatch the q, k, v and loop over the buckets to compute the + attention dot product on each subsequences. + + Args: + q (tf.Tensor): [length_q, depth_q] + k (tf.Tensor): [length_k, depth_q] + v (tf.Tensor): [length_k, depth_v] + gates_q (tf.Tensor): One-hot vector of shape [length_q, nb_buckets] + gates_k (tf.Tensor): One-hot vector of shape [length_k, nb_buckets] + bi (BatchInfo): Contains the batch coordinates and sequence order + + Returns: + tf.Tensor: [length_q, depth_v] + """ + + nb_buckets = gates_q.get_shape().as_list()[-1] + + q_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_q) + k_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_k) + + def eventually_dispatch(dispatcher, value): + if value is not None: + return dispatcher.dispatch(value) + return [None] * nb_buckets + + # Iterate over every dispatched group + list_v_out = [] + for ( + q_i, + k_i, + v_i, + qbc, + qbo, + kbc, + kbo, + ) in zip( + # Dispatch queries, keys and values + q_dispatcher.dispatch(q), + k_dispatcher.dispatch(k), + k_dispatcher.dispatch(v), + # Also dispatch the sequence positions and batch coordinates + eventually_dispatch(q_dispatcher, bi.coordinates), + eventually_dispatch(q_dispatcher, bi.order), + eventually_dispatch(k_dispatcher, bi.coordinates), + eventually_dispatch(k_dispatcher, bi.order), + ): + list_v_out.append( + expert_dot_product( + q_i, + k_i, + v_i, + info_q=BatchInfo(coordinates=qbc, order=qbo), + info_k=BatchInfo(coordinates=kbc, order=kbo))) + + # Combine all buckets together to restore the original length + return q_dispatcher.combine(list_v_out) + + +def map_fn_switch(fn, elems, use_map_fn=True, **kwargs): + """Construct the graph with either tf.map_fn or a python for loop. + + This function is mainly for for benchmarking purpose. + + tf.map_fn is dynamic but is much slower than creating a static graph with + for loop. However, having a for loop make the graph much longer to build + and can consume too much RAM on distributed setting. + + Args: + fn (fct): same that tf.map_fn but for now can only return a single tensor + value (instead of a tuple of tensor for the general case) + elems (tuple): same that tf.map_fn + use_map_fn (bool): If True, tf.map_fn is used, if False, for _ in _: is used + instead + **kwargs: Additional tf.map_fn arguments (ignored if use_map_fn is False) + + Returns: + tf.Tensor: the output of tf.map_fn + """ + if use_map_fn: + return tf.map_fn(fn, elems, **kwargs) + elems_unpacked = (tf.unstack(e) for e in elems) + out_unpacked = [fn(e) for e in zip(*elems_unpacked)] + out = tf.stack(out_unpacked) + return out + + +@expert_utils.add_name_scope() +def sparse_dot_product_attention(q, k, v, bi, use_map_fn, experts_params): + """Sparse multihead self attention. + + Perform an approximation of the full multihead attention by dispatching + the tokens using their keys/values. Thus the attention matrix are only + computed each times on a subset of the tokens. + + Notes: + * The function don't perform scaling here (multihead_attention does + the /sqrt(depth)). + * The padding should have been removed (so batch size should be 1 but length + contains the elements from all different batches) + * Right now, only self attention is supported so length_q and length_kv + should be identical and the function will add triangular mask. + * If bi.order is not None, The bias is added inside this function to + prevent attention to the future. + + Args: + q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k] + k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k] + v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v] + bi (BatchInfo): Contains the batch coordinates and sequence order + use_map_fn (bool): Use either tf.map_fn of python for loop to compute the + heads separately + experts_params (dict): Additional params for the local expert + + Returns: + tf.Tensor: Approximation of Softmax(Q.K) * V, of shape + [batch, heads, length_q, depth_v] + """ + batch_size, nb_heads, _, depth = common_layers.shape_list(q) + + @expert_utils.add_name_scope() + def flatten_first_dims(x): + """Reshape such that x is [num_heads, -1, depth].""" + # Case 1: Either constant batch size of size 1 or batch already flattened + if x.get_shape().as_list()[0] == 1: + return tf.squeeze(x, axis=0) + + # Case 2: Flatten batch dimension + x = tf.transpose(x, perm=[1, 0, 2, 3]) + x = tf.reshape(x, [nb_heads, -1, depth]) + return x + + def flatten_batch(x): + if x is None: + return x + return expert_utils.flatten_all_but_last(x) + + q = flatten_first_dims(q) + k = flatten_first_dims(k) + v = flatten_first_dims(v) + bi = BatchInfo( + coordinates=flatten_batch(bi.coordinates), + order=flatten_batch(bi.order), + ) + + # Unstack heads + list_q = tf.unstack(q) # list[tf.Tensor(shape=[batch * length, depth])] + list_k = tf.unstack(k) + list_v = tf.unstack(v) + + list_gates_q = [] + list_gates_k = [] + + total_loss = 0.0 + # There might be a more optimized way to compute all heads at once + for single_q, single_k, _ in zip(list_q, list_k, list_v): + # Each head get its own dispatcher + lhs_gating = LshGating( + depth=single_q.get_shape().as_list()[-1], **experts_params) + + list_gates_q.append(lhs_gating.get_gates(single_q)) + list_gates_k.append(lhs_gating.get_gates(single_k)) + + gates_q = tf.stack(list_gates_q) + gates_k = tf.stack(list_gates_k) + + # Process each head separately. + v_out = map_fn_switch( + lambda args: dot_product_single_head(bi=bi, *args), + elems=(q, k, v, gates_q, gates_k), + dtype=(tf.float32), + parallel_iterations=2, + use_map_fn=use_map_fn, + ) + + # Restore original shape as expected by multihead_attention + if isinstance(batch_size, int) and batch_size == 1: + v_out = tf.expand_dims(v_out, axis=0) # Restore batch_size = 1 + else: + v_out = tf.reshape(v_out, [nb_heads, batch_size, -1, depth]) + v_out = tf.transpose(v_out, [1, 0, 2, 3]) + return v_out, total_loss / nb_heads + + +@expert_utils.add_name_scope() +def dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right=False): + """Perform a dot product attention on a single sequence on a single head. + + This function dispatch the q, k, v and loop over the buckets to compute the + attention dot product on each subsequences. + + Args: + q (tf.Tensor): [batch*heads, length_q, depth_q] + k (tf.Tensor): [batch*heads, length_k, depth_q] + v (tf.Tensor): [batch*heads, length_k, depth_v] + gates_q (tf.Tensor): One-hot of shape [batch*heads, length_q, nb_buckets] + gates_k (tf.Tensor): One-hot of shape [batch*heads, length_k, nb_buckets] + mask_right (bool): Add a bias to prevent attention to the future + + Returns: + tf.Tensor: [length_q, depth_v] + """ + nb_buckets = common_layers.shape_list(gates_q)[-1] + + @expert_utils.add_name_scope() + def get_dispatcher(gates): + """Construct dispatcher for gates.""" + length = common_layers.shape_list(gates)[1] + # Count the number of ones per batch (and keep the max value) + nb_elems_to_dispatch = tf.reduce_sum(gates, axis=[1, 2]) + nb_elems_to_dispatch = tf.reduce_max(nb_elems_to_dispatch) + nb_elems_to_dispatch = tf.to_int32(nb_elems_to_dispatch) + capacity = nb_elems_to_dispatch // nb_buckets * 2 # Capacity is hardcoded + capacity = tf.minimum(length, capacity) + tf.summary.scalar("dispatch_capacity", capacity, family="lsh") + return expert_utils.TruncatingDispatcher(gates, capacity) + + def add_summary_capacity(x, prefix): + # Monitor if capacity overflow + x = x[0, ...] # Take first batch/head + x = tf.reduce_sum(x, axis=0) + tf.summary.scalar(prefix + "_min", tf.reduce_min(x), family="lsh") + tf.summary.scalar(prefix + "_max", tf.reduce_max(x), family="lsh") + tf.summary.histogram(prefix + "capacity_distribution", x, family="lsh") + for i in range(3): # Show the first 3 buckets + tf.summary.scalar("{}_{}".format(prefix, i), x[i], family="lsh") + + add_summary_capacity(gates_q, "q") + add_summary_capacity(gates_k, "k") + + q_dispatcher = get_dispatcher(gates_q) + k_dispatcher = get_dispatcher(gates_k) + + q = q_dispatcher.dispatch(q) + k = k_dispatcher.dispatch(k) + v = k_dispatcher.dispatch(v) + + # Bias of shape [batch*heads, nb_buckets, 1, capacity] broadcasted to every + # queries + bias = tf.expand_dims((k_dispatcher.nonpadding() - 1.0) * 1e9, 2) + if mask_right: + q_coordinate = to_float( + tf.expand_dims(q_dispatcher.length_coordinate(), 3)) + k_coordinate = to_float( + tf.expand_dims(k_dispatcher.length_coordinate(), 2)) + bias += to_float(tf.greater(k_coordinate, q_coordinate)) * -1e9 + # The sequence padding is not masked but is ignored on the next layers + + # q, k, v now have shape [batch*heads, nb_bucket, capacity, depth] + # The buckets can be seen as different heads + v_out = dot_product_attention(q, k, v, bias=bias) + + # Combine all buckets together to restore the original length + return q_dispatcher.combine(v_out) + + +@expert_utils.add_name_scope() +def sparse_dot_product_attention_truncated( + q, + k, + v, + bi, # Unused + experts_params, + use_map_fn=False, # Unused + mask_right=False, +): # pylint: disable=unused-argument + """Sparse multihead self attention. + + Perform an approximation of the full multihead attention by dispatching + the tokens using their keys/values. Thus the attention matrix are only + computed each times on a subset of the tokens. + + Notes: + * The function don't perform scaling here (multihead_attention does + the /sqrt(depth)). + * The padding should have been removed (so batch size should be 1 but length + contains the elements from all different batches) + * Right now, only self attention is supported so length_q and length_kv + should be identical and the function will add triangular mask. + * If bi.order is not None, The bias is added inside this function to + prevent attention to the future. + + Args: + q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k] + k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k] + v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v] + bi (BatchInfo): Contains the batch coordinates and sequence order + experts_params (dict): Additional params for the local expert + use_map_fn (bool): Use either tf.map_fn of python for loop to compute the + heads separately + mask_right (bool): + Returns: + tf.Tensor: Approximation of Softmax(Q.K) * V, of shape + [batch, heads, length_q, depth_v] + """ + # Currently depth is the same for for q and v + batch_size, nb_heads, _, depth = common_layers.shape_list(q) + + total_loss = 0.0 + + # Each head get its own dispatcher + list_lsh = [LshGating(depth=depth, **experts_params) for _ in range(nb_heads)] + + @expert_utils.add_name_scope() + def get_gates_head(x, add_first=False): + """Return the gates for each heads of the current x. + + Args: + x (tf.Tensor): of shape [batch, heads, length, depth] + add_first (bool): if True, add the first element on each bucket + + Returns: + tf.Tensor: gates of shape [batch, heads, length, num_buckets] + """ + length = common_layers.shape_list(x)[2] + + # Invert heads/batch + x = tf.transpose(x, perm=[1, 0, 2, 3]) + x = tf.reshape(x, [nb_heads, batch_size * length, depth]) + + list_x = tf.unstack(x) # list[tf.Tensor(shape=[batch * length, depth])] + + # Unstack heads + list_gates = [] + # There might be a more optimized way to compute all heads at once + for lsh, single_x in zip(list_lsh, list_x): + # Each head get its own dispatcher + gates = lsh.get_gates(single_x) + nb_buckets = gates.get_shape().as_list()[-1] + # Reshape to [batch, length, depth] but should consider sequence + # padding in that case (also dispatch the padding) + gates = tf.reshape(gates, [batch_size, length, nb_buckets]) + list_gates.append(gates) + + gates = tf.stack(list_gates) + + # Restore original shape + gates = tf.reshape(gates, [nb_heads, batch_size, length, nb_buckets]) + gates = tf.transpose(gates, [1, 0, 2, 3]) + + # Dispatch the first element to every gates to avoid empty buckets + if add_first: + gates = tf.maximum(gates, + tf.reshape(tf.one_hot([0], length), [1, 1, length, 1])) + + return gates + + gates_q = get_gates_head(q) + gates_k = get_gates_head(k, add_first=True) + + # [batch, heads, length, depth] => [batch*heads, length, depth] + q, k, v, gates_q, gates_k = [ + combine_first_two_dimensions(t) for t in (q, k, v, gates_q, gates_k) + ] + + v_out = dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right) + + # Restore original dimension + v_out = tf.reshape(v_out, [batch_size, nb_heads, -1, depth]) + + return v_out, total_loss / nb_heads + + +@expert_utils.add_var_scope() +def deconv_elems_1d(x, factor, out_depth=None): + """Increase the length and change the dimensionality. + + Expand/project each positions of dim depth of the input into + factor*tokens of dim out_depth + + Args: + x (tf.Tensor): shape [batch_size, length, depth] + factor (int): Multiplicative factor of each tokens. + out_depth (int): Output depth (if None, keep depth constant) + + Returns: + tf.Tensor: shape [batch_size, length*factor, out_depth] + """ + out_depth = out_depth or x.get_shape().as_list()[-1] + x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth] + x = layers().Conv2DTranspose( + filters=out_depth, + kernel_size=(1, factor), + strides=(1, factor), + padding="valid", + data_format="channels_last", + )(x) # [batch_size, 1, length*factor, out_depth] + x = tf.squeeze(x, 1) # [batch_size, length*factor, depth] + return x + + +@expert_utils.add_var_scope() +def conv_elems_1d(x, factor, out_depth=None): + """Decrease the length and change the dimensionality. + + Merge/restore/compress factors positions of dim depth of the input into + a single position of dim out_depth. + This is basically just a strided convolution without overlap + between each strides. The original length has to be divided by factor. + + Args: + x (tf.Tensor): shape [batch_size, length, depth] + factor (int): Length compression factor. + out_depth (int): Output depth + + Returns: + tf.Tensor: shape [batch_size, length//factor, out_depth] + """ + out_depth = out_depth or x.get_shape().as_list()[-1] + # with tf.control_dependencies( # Dynamic assertion + # [tf.assert_equal(tf.shape(x)[1] % factor, 0)]): + x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth] + x = layers().Conv2D( + filters=out_depth, + kernel_size=(1, factor), + strides=(1, factor), + padding="valid", + data_format="channels_last", + )(x) # [batch_size, 1, length//factor, out_depth] + x = tf.squeeze(x, 1) # [batch_size, length//factor, depth] + return x + + +@expert_utils.add_var_scope() +def local_reduction_attention(x, block_length, multihead_params): + """Reduce the length dimension using self attention. + + Args: + x (tf.Tensor): float32 of shape [batch, length, depth] + block_length (int): Block length for local attention (Compression factor) + multihead_params (dict): parameters for multihead attention + + Returns: + tf.Tensor: Compressed tensor of shape [batch, length // factor, depth] + """ + + @expert_utils.add_name_scope() + def dot_product_self_local_attention_flattened(q, k, v): + """Strided block local self-attention. + + No overlap between the blocks. + + Args: + q (tf.Tensor): shape [batch, heads, length, depth_k] + k (tf.Tensor): shape [batch, heads, length, depth_k] + v (tf.Tensor): shape [batch, heads, length, depth_v] + + Returns: + tf.Tensor: shape [batch, heads, length, depth_v] + """ + _, num_head, _, depth = q.get_shape().as_list() + + # Extract the blocks + def pad_and_reshape(x): + """Split the length dim into [num_block, block_length].""" + length_x = common_layers.shape_list(x)[2] + # Add some padding, but won't matter as the last block will never be + # attended by the query (after compression) + x = tf.pad(x, [[0, 0], [0, 0], [0, -length_x % block_length], [0, 0]]) + x = tf.reshape( + x, + [ + common_layers.shape_list(x)[0], # Batch + num_head, # Head + common_layers.shape_list(x)[2] // block_length, # Num blocks + block_length, # Block length + depth, # Depth + ]) + return x + + q, k, v = [pad_and_reshape(t) for t in (q, k, v)] + + # Perform attention on the flattened dot product + logits = tf.matmul(q, k, transpose_b=True) + logits = tf.reshape( + logits, + [ + common_layers.shape_list(logits)[0], # Batch + num_head, # Head + common_layers.shape_list(logits)[2], # Num blocks + block_length**2, # Flatten last dimension + ]) + weights = tf.nn.softmax(logits) + weights = tf.reshape( + weights, + [ + common_layers.shape_list(weights)[0], # Batch + num_head, # Head + common_layers.shape_list(weights)[2], # Num blocks + block_length, + block_length, # Restore the block length dimension + ]) + weights = tf.reduce_sum(weights, axis=3, keep_dims=True) # Compress block + v_out = tf.matmul(weights, v) # [1, block_length] @ [block_length, depth] + v_out = tf.squeeze(v_out, axis=3) + return v_out + + return multihead_attention( + x, + None, + bias=None, + output_depth=x.get_shape().as_list()[-1], + attention_type=dot_product_self_local_attention_flattened, + **multihead_params) + + +@expert_utils.add_var_scope() +def multihead_self_attention_reduced( + x, + memory_antecedent=None, + bias=None, + factor=None, + multihead_params=None, + nonlinearity="none", + reduction_type="conv", + add_mask=True, +): + """Reduce the length dimension by compressing with conv. + + Args: + x (tf.Tensor): float32 of shape [batch, length, depth] + memory_antecedent (tf.Tensor): Unsupported for now + bias (tf.Tensor): Ignored + factor (int): compression factor for the memory sequence + multihead_params (dict): parameters for multihead attention + nonlinearity (str): Add some non-linearity after the memory block + reduction_type (str): type of compression + add_mask (bool): If True, add the bias to prevent attention to the future + + Returns: + (tf.Tensor): float32 of shape [batch, length, depth] + + Raises: + ValueError: If reduction_type or nonlinearity is invalid + """ + if not factor or not multihead_params: + raise ValueError("factor and multihead_params should be set") + if memory_antecedent is not None: + raise NotImplementedError( + "multihead_self_attention_reduced only works with self-attention") + + depth = x.get_shape().as_list()[-1] + + # Could try to have some overlap between the blocks but that would + # create conv artifacts, would make it difficult to not attend to the future + # within one group and the padding should be handled specially. + + # Reduce the memory dimension + if reduction_type == "attention": + memory_x = local_reduction_attention(x, factor, multihead_params) + elif reduction_type == "conv": + # With valid padding, the last block won't be computed (not attended anyway) + memory_x = conv_elems_1d(x, factor) + else: + raise ValueError("Unknown reduction type {}".format(reduction_type)) + + if nonlinearity == "silu": + memory_x *= tf.nn.sigmoid(memory_x) + elif nonlinearity != "none": + raise ValueError("Unknown non linearity {}".format(nonlinearity)) + + memory_x = tf.concat( + # Add the first elem to make it attendable by everyone (otherwise the + # first block cannot attend to anything) + [x[:, :1, :], memory_x], + axis=1, + ) + + # Construct the bias + @expert_utils.add_name_scope() + def construct_bias_vectors(t, axis): + length = to_float(common_layers.shape_list(t)[1]) + length_coordinates = tf.range(length, dtype=tf.float32) + length_coordinates = tf.expand_dims(length_coordinates, axis=axis) + # [1, length_k] or [length_q, 1] + return length_coordinates + + if add_mask: # Create mask to prevent attention to the future + bias = to_float( + tf.greater( + # Because we add the first elem to the memory block and it can be + # attended by anyone,we don't need to add +1 anymore to prevent self + # attention Use * factor to make sure the last tokens of a block + # cannot attend the block + construct_bias_vectors(memory_x, 0) * factor, + # +epsilon to avoid float equality + construct_bias_vectors(x, 1) + 1e-3, + )) * -1e9 + bias = tf.expand_dims(bias, axis=0) + bias = tf.expand_dims(bias, axis=0) # [1, 1, length_k, length_q] + else: + bias = None + + return multihead_attention( + query_antecedent=x, + memory_antecedent=memory_x, + bias=bias, + output_depth=depth, + **multihead_params) + + +def scaled_dot_product_attention_simple(q, k, v, bias, name=None): + """Scaled dot-product attention. One head. One spatial dimension. + + Args: + q: a Tensor with shape [batch, length_q, depth_k] + k: a Tensor with shape [batch, length_kv, depth_k] + v: a Tensor with shape [batch, length_kv, depth_v] + bias: optional Tensor broadcastable to [batch, length_q, length_kv] + name: an optional string + + Returns: + A Tensor. + """ + with tf.variable_scope( + name, default_name="scaled_dot_product_attention_simple"): + scalar = tf.rsqrt(to_float(common_layers.shape_list(q)[2])) + logits = tf.matmul(q * scalar, k, transpose_b=True) + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + if common_layers.should_generate_summaries(): + tf.summary.image( + "attention", tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1) + return tf.matmul(weights, v) + + +_function_cache = {} + + +def multihead_self_attention_memory_efficient(x, + bias, + num_heads, + head_size=None, + epsilon=1e-6, + forget=True, + test_vars=None, + name=None): + """Multihead scaled-dot-product self-attention. + + Includes layer norm. + + Returns multihead-self-attention(layer_norm(x)) + + Computes one attention head at a time to avoid exhausting memory. + + If forget=True, then forget all forwards activations and recompute on + the backwards pass. + + Args: + x: a Tensor with shape [batch, length, input_size] + bias: an attention bias tensor broadcastable to [batch, 1, length, length] + num_heads: an integer + head_size: an optional integer - defaults to input_size/num_heads + epsilon: a float, for layer norm + forget: a boolean - forget forwards activations and recompute on backprop + test_vars: optional tuple of variables for testing purposes + name: an optional string + + Returns: + A Tensor. + """ + io_size = x.get_shape().as_list()[-1] + if head_size is None: + assert io_size % num_heads == 0 + head_size = io_size / num_heads + + def forward_internal(x, wqkv, wo, attention_bias, norm_scale, norm_bias): + """Forward function.""" + n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias) + wqkv_split = tf.unstack(wqkv, num=num_heads) + wo_split = tf.unstack(wo, num=num_heads) + y = 0 + for h in range(num_heads): + with tf.control_dependencies([y] if h > 0 else []): + combined = tf.nn.conv1d(n, wqkv_split[h], 1, "SAME") + q, k, v = tf.split(combined, 3, axis=2) + o = scaled_dot_product_attention_simple(q, k, v, attention_bias) + y += tf.nn.conv1d(o, wo_split[h], 1, "SAME") + return y + + key = ( + "multihead_self_attention_memory_efficient %s %s" % (num_heads, epsilon)) + if not forget: + forward_fn = forward_internal + elif key in _function_cache: + forward_fn = _function_cache[key] + else: + + @function.Defun(compiled=True) + def grad_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias, dy): + """Custom gradient function.""" + with tf.control_dependencies([dy]): + n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias) + wqkv_split = tf.unstack(wqkv, num=num_heads) + wo_split = tf.unstack(wo, num=num_heads) + deps = [] + dwqkvs = [] + dwos = [] + dn = 0 + for h in range(num_heads): + with tf.control_dependencies(deps): + combined = tf.nn.conv1d(n, wqkv_split[h], 1, "SAME") + q, k, v = tf.split(combined, 3, axis=2) + o = scaled_dot_product_attention_simple(q, k, v, attention_bias) + partial_y = tf.nn.conv1d(o, wo_split[h], 1, "SAME") + pdn, dwqkvh, dwoh = tf.gradients( + ys=[partial_y], + xs=[n, wqkv_split[h], wo_split[h]], + grad_ys=[dy]) + dn += pdn + dwqkvs.append(dwqkvh) + dwos.append(dwoh) + deps = [dn, dwqkvh, dwoh] + dwqkv = tf.stack(dwqkvs) + dwo = tf.stack(dwos) + with tf.control_dependencies(deps): + dx, dnorm_scale, dnorm_bias = tf.gradients( + ys=[n], xs=[x, norm_scale, norm_bias], grad_ys=[dn]) + return (dx, dwqkv, dwo, tf.zeros_like(attention_bias), dnorm_scale, + dnorm_bias) + + @function.Defun( + grad_func=grad_fn, compiled=True, separate_compiled_gradients=True) + def forward_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias): + return forward_internal(x, wqkv, wo, attention_bias, norm_scale, + norm_bias) + + _function_cache[key] = forward_fn + + if bias is not None: + bias = tf.squeeze(bias, 1) + with tf.variable_scope(name, default_name="multihead_attention", values=[x]): + # TODO(noam): it would be nice to save memory by casting x to float16 + # here, but this causes problems with the gradients. Figure out if there + # is a way to leave the gradients as float32. + if test_vars is not None: + wqkv, wo, norm_scale, norm_bias = list(test_vars) + else: + wqkv = tf.get_variable( + "wqkv", [num_heads, 1, io_size, 3 * head_size], + initializer=tf.random_normal_initializer(stddev=io_size**-0.5)) + wo = tf.get_variable( + "wo", [num_heads, 1, head_size, io_size], + initializer=tf.random_normal_initializer( + stddev=(head_size * num_heads)**-0.5)) + norm_scale, norm_bias = common_layers.layer_norm_vars(io_size) + y = forward_fn(x, wqkv, wo, bias, norm_scale, norm_bias) + y.set_shape(x.get_shape()) + return y + + +multihead_attention_sparse_dot_prod = functools.partial( + multihead_attention, attention_type=sparse_dot_product_attention) + +multihead_attention_sparse_truncated = functools.partial( + multihead_attention, attention_type=sparse_dot_product_attention_truncated) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_layers.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..9805876a63804e34d694a6ff7854ed1ec0c397b5 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_layers.py @@ -0,0 +1,4080 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - Added support for HabanaLayerNorm + +"""Layers common to multiple models.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import contextlib +import functools +import math + +from absl import logging +import numpy as np +from six.moves import range # pylint: disable=redefined-builtin + +from TensorFlow.nlp.transformer.utils import contrib +import tensorflow.compat.v1 as tf +import tensorflow_probability as tfp + +from tensorflow.python.framework import function +from tensorflow.python.framework import ops +from tensorflow.python.ops import control_flow_util +from tensorflow.python.ops import inplace_ops + +from habana_frameworks.tensorflow.ops.layer_norm import * +from habana_frameworks.tensorflow import habana_ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.client import device_lib + +# TODO(lukaszkaiser): remove this function when not needed any more. +def layers(): + """Get the layers module good for TF 1 and TF 2 work for now.""" + layers_module = None + try: + layers_module = tf.layers + except AttributeError: + logging.info("Cannot access tf.layers, trying TF2 layers.") + try: + from tensorflow.python import tf2 # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + if tf2.enabled(): + logging.info("Running in V2 mode, using Keras layers.") + layers_module = tf.keras.layers + except ImportError: + pass + return layers_module + + +@function.Defun( + python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), + shape_func=lambda op: [op.inputs[0].get_shape()]) +def convert_gradient_to_tensor(x): + """Identity operation whose gradient is converted to a `Tensor`. + + Currently, the gradient to `tf.concat` is particularly expensive to + compute if dy is an `IndexedSlices` (a lack of GPU implementation + forces the gradient operation onto CPU). This situation occurs when + the output of the `tf.concat` is eventually passed to `tf.gather`. + It is sometimes faster to convert the gradient to a `Tensor`, so as + to get the cheaper gradient for `tf.concat`. To do this, replace + `tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`. + + Args: + x: A `Tensor`. + + Returns: + The input `Tensor`. + """ + return x + + +def is_xla_compiled(): + """Whether we are building graph that will be compiled by XLA. + + This checks whether the code is executing within an XLA context. + + If True, model authors should ensure the graph they build is compilable by + XLA. Specifically, they should ensure that all ops have XLA implementations + and that all shapes are statically known. + + Returns: + bool, whether the current graph will be compiled for XLA. + """ + ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access + return control_flow_util.GetContainingXLAContext(ctxt) is not None + + +def to_float(x): + """Cast x to float; created because tf.to_float is deprecated.""" + return tf.cast(x, tf.float32) + + +def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs): + """Like tf.nn.dropout but takes broadcast_dims instead of noise_shape. + + Instead of specifying noise_shape, this function takes broadcast_dims - + a list of dimension numbers in which noise_shape should be 1. The random + keep/drop tensor has dimensionality 1 along these dimensions. + + Args: + x: a floating point tensor. + keep_prob: A scalar Tensor with the same type as x. + The probability that each element is kept. + broadcast_dims: an optional list of integers + the dimensions along which to broadcast the keep/drop flags. + **kwargs: keyword arguments to tf.nn.dropout other than "noise_shape". + + Returns: + Tensor of the same shape as x. + """ + assert "noise_shape" not in kwargs + if broadcast_dims: + shape = tf.shape(x) + ndims = len(x.get_shape()) + # Allow dimensions like "-1" as well. + broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims] + kwargs["noise_shape"] = [ + 1 if i in broadcast_dims else shape[i] for i in range(ndims) + ] + return tf.nn.dropout(x, keep_prob, **kwargs) + + +def comma_separated_string_to_integer_list(s): + return [int(i) for i in s.split(",") if i] + + +def saturating_sigmoid(x): + """Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1].""" + with tf.name_scope("saturating_sigmoid", values=[x]): + y = tf.sigmoid(x) + return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1)) + + +def hard_sigmoid(x, saturation_limit=0.9): + saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit)) + x_shifted = 0.5 * x + 0.5 + return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost + + +def hard_tanh(x, saturation_limit=0.9): + saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit)) + return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost + + +def inverse_exp_decay(max_step, min_value=0.01, step=None): + """Inverse-decay exponentially from min_value to 1.0 reached at max_step.""" + inv_base = tf.exp(tf.log(min_value) / float(max_step)) + if step is None: + step = tf.train.get_global_step() + if step is None: + return 1.0 + step = to_float(step) + return inv_base**tf.maximum(float(max_step) - step, 0.0) + + +def inverse_lin_decay(max_step, min_value=0.01, step=None): + """Inverse-decay linearly from min_value to 1.0 reached at max_step.""" + if step is None: + step = tf.train.get_global_step() + if step is None: + return 1.0 + step = to_float(step) + progress = tf.minimum(step / float(max_step), 1.0) + return progress * (1.0 - min_value) + min_value + + +def inverse_sigmoid_decay(max_step, min_value=0.01, step=None): + """Inverse-decay linearly from min_value to 1.0 reached at max_step.""" + if step is None: + step = tf.train.get_global_step() + if step is None: + return 1.0 + step = to_float(step) + + def sigmoid(x): + return 1 / (1 + tf.exp(-x)) + + def inv_sigmoid(y): + return tf.log(y / (1 - y)) + + assert min_value > 0, ( + "sigmoid's output is always >0 and <1. min_value must respect " + "these bounds for interpolation to work.") + assert min_value < 0.5, "Must choose min_value on the left half of sigmoid." + + # Find + # x s.t. sigmoid(x ) = y_min and + # x' s.t. sigmoid(x') = y_max + # We will map [0, max_step] to [x_min, x_max]. + y_min = min_value + y_max = 1.0 - min_value + x_min = inv_sigmoid(y_min) + x_max = inv_sigmoid(y_max) + + x = tf.minimum(step / float(max_step), 1.0) # [0, 1] + x = x_min + (x_max - x_min) * x # [x_min, x_max] + y = sigmoid(x) # [y_min, y_max] + + y = (y - y_min) / (y_max - y_min) # [0, 1] + y = y * (1.0 - y_min) # [0, 1-y_min] + y += y_min # [y_min, 1] + return y + + +def shakeshake2_py(x, y, equal=False, individual=False): + """The shake-shake sum of 2 tensors, python version.""" + if equal: + alpha = 0.5 + elif individual: + alpha = tf.random_uniform(tf.get_shape(x)[:1]) + else: + alpha = tf.random_uniform([]) + + return alpha * x + (1.0 - alpha) * y + + +@function.Defun() +def shakeshake2_grad(x1, x2, dy): + """Overriding gradient for shake-shake of 2 tensors.""" + y = shakeshake2_py(x1, x2) + dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy]) + return dx + + +@function.Defun() +def shakeshake2_indiv_grad(x1, x2, dy): + """Overriding gradient for shake-shake of 2 tensors.""" + y = shakeshake2_py(x1, x2, individual=True) + dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy]) + return dx + + +@function.Defun() +def shakeshake2_equal_grad(x1, x2, dy): + """Overriding gradient for shake-shake of 2 tensors.""" + y = shakeshake2_py(x1, x2, equal=True) + dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy]) + return dx + + +@function.Defun(grad_func=shakeshake2_grad) +def shakeshake2(x1, x2): + """The shake-shake function with a different alpha for forward/backward.""" + return shakeshake2_py(x1, x2) + + +@function.Defun(grad_func=shakeshake2_indiv_grad) +def shakeshake2_indiv(x1, x2): + return shakeshake2_py(x1, x2, individual=True) + + +@function.Defun(grad_func=shakeshake2_equal_grad) +def shakeshake2_eqgrad(x1, x2): + """The shake-shake function with a different alpha for forward/backward.""" + return shakeshake2_py(x1, x2) + + +def shakeshake(xs, equal_grad=False): + """Multi-argument shake-shake, currently approximated by sums of 2.""" + if len(xs) == 1: + return xs[0] + div = (len(xs) + 1) // 2 + arg1 = shakeshake(xs[:div], equal_grad=equal_grad) + arg2 = shakeshake(xs[div:], equal_grad=equal_grad) + if equal_grad: + return shakeshake2_eqgrad(arg1, arg2) + return shakeshake2(arg1, arg2) + + +def convert_rgb_to_real(x): + """Conversion of pixel values to real numbers.""" + with tf.name_scope("rgb_to_real", values=[x]): + x = to_float(x) + x /= 255.0 + return x + + +def convert_rgb_to_symmetric_real(x): + """Conversion of pixel values to real numbers.""" + with tf.name_scope("rgb_to_real", values=[x]): + x = to_float(x) + # Convert each pixel intensity in [0, 1, 2, ..., 255] into a real number in + # the range [-1, 1]. + x = (x / 127.5) - 1 + return x + + +def convert_real_to_rgb(x): + """Conversion of real numbers to pixel values.""" + with tf.name_scope("real_to_rgb", values=[x]): + x *= 255.0 + return x + + +def expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1): + """Make x n-d with squeeze and expand_dims.""" + if len(x.shape) > n: + while len(x.shape) != n: + x = tf.squeeze(x, [squeeze_dim]) + else: + while len(x.shape) != n: + x = tf.expand_dims(x, expand_dim) + return x + + +def standardize_images(x): + """Image standardization on batches and videos.""" + with tf.name_scope("standardize_images", values=[x]): + x_shape = shape_list(x) + x = to_float(tf.reshape(x, [-1] + x_shape[-3:])) + x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True) + x_variance = tf.reduce_mean( + tf.squared_difference(x, x_mean), axis=[1, 2], keepdims=True) + num_pixels = to_float(x_shape[-2] * x_shape[-3]) + x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels)) + return tf.reshape(x, x_shape) + + +def flatten4d3d(x): + """Flatten a 4d-tensor into a 3d-tensor by joining width and height.""" + xshape = shape_list(x) + result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]]) + return result + + +# TODO(noam): remove this function after TPUs do gather faster. +def gather(params, indices, dtype=tf.float32): + """Version of tf.gather that works faster on tpu.""" + if not is_xla_compiled(): + return tf.gather(params, indices) + vocab_size = params.get_shape().as_list()[0] + indices_flat = tf.reshape(indices, [-1]) + out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params) + out = reshape_like(out, tf.expand_dims(indices, -1)) + return out + + +# TODO(noam): remove this function after TPUs do cumsum faster. +def cumsum(x, axis=0, exclusive=False): + """TPU hack for tf.cumsum. + + This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless + the axis dimension is very large. + + Args: + x: a Tensor + axis: an integer + exclusive: a boolean + + Returns: + Tensor of the same shape as x. + """ + if not is_xla_compiled(): + return tf.cumsum(x, axis=axis, exclusive=exclusive) + x_shape = shape_list(x) + rank = len(x_shape) + length = x_shape[axis] + my_range = tf.range(length) + comparator = tf.less if exclusive else tf.less_equal + mask = tf.cast( + comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)), + x.dtype) + ret = tf.tensordot(x, mask, axes=[[axis], [0]]) + if axis != rank - 1: + ret = tf.transpose( + ret, + list(range(axis)) + [rank - 1] + list(range(axis, rank - 1))) + return ret + + +def dropout_no_scaling(x, keep_prob): + """Like tf.nn.dropout, but does not scale up. Works on integers also. + + Args: + x: a Tensor + keep_prob: a floating point number + + Returns: + Tensor of the same shape as x. + """ + if keep_prob == 1.0: + return x + mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob) + return x * cast_like(mask, x) + + +def embedding(x, + vocab_size, + dense_size, + name=None, + reuse=None, + multiplier=1.0, + symbol_dropout_rate=0.0, + embedding_var=None, + dtype=tf.float32): + """Embed x of type int64 into dense vectors, reducing to max 4 dimensions.""" + with tf.variable_scope( + name, default_name="embedding", values=[x], reuse=reuse, dtype=dtype): + if embedding_var is None: + embedding_var = tf.get_variable("kernel", [vocab_size, dense_size]) + # On the backwards pass, we want to convert the gradient from + # an indexed-slices to a regular tensor before sending it back to the + # parameter server. This avoids excess computation on the parameter server. + if not tf.executing_eagerly(): + embedding_var = convert_gradient_to_tensor(embedding_var) + x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate) + emb_x = gather(embedding_var, x, dtype) + if multiplier != 1.0: + emb_x *= multiplier + static_shape = emb_x.shape.as_list() + if len(static_shape) < 5: + return emb_x + assert len(static_shape) == 5 + # If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1. + return tf.squeeze(emb_x, 3) + + +def shift_right(x, pad_value=None): + """Shift the second dimension of x right by one.""" + if pad_value is None: + shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :] + else: + shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :] + return shifted_targets + + +def shift_right_3d(x, pad_value=None): + """Shift the second dimension of x right by one.""" + if pad_value is None: + shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :] + else: + shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :] + return shifted_targets + + +def shift_right_2d(x, pad_value=None): + """Shift the second dimension of x right by one.""" + if pad_value is None: + shifted_targets = tf.pad(x, [[0, 0], [1, 0]])[:, :-1] + else: + shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1] + return shifted_targets + + +def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None): + """Use a strided convolution to downsample x by 2, `nbr_steps` times. + + We use stride and filter size 2 to avoid the checkerboard problem of deconvs. + As detailed in http://distill.pub/2016/deconv-checkerboard/. + + Args: + x: a `Tensor` with shape `[batch, spatial, depth]` or + `[batch, spatial_1, spatial_2, depth]` + nbr_steps: number of halving downsample rounds to apply + output_filters: an int specifying the filter count for the convolutions + name: a string + reuse: a boolean + + Returns: + a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or + `[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps), + output_filters]` + """ + with tf.variable_scope( + name, default_name="conv_stride2_multistep", values=[x], reuse=reuse): + if nbr_steps == 0: + out = conv(x, output_filters, (1, 1)) + return out, [out] + hidden_layers = [x] + for i in range(nbr_steps): + hidden_layers.append( + conv( + hidden_layers[-1], + output_filters, (2, 2), + strides=2, + activation=tf.nn.relu, + name="conv" + str(i))) + return hidden_layers[-1], hidden_layers + + +def deconv_stride2_multistep(x, + nbr_steps, + output_filters, + name=None, + reuse=None): + """Use a deconvolution to upsample x by 2**`nbr_steps`. + + Args: + x: a `Tensor` with shape `[batch, spatial, depth]` or + `[batch, spatial_1, spatial_2, depth]` + nbr_steps: an int specifying the number of doubling upsample rounds to + apply. + output_filters: an int specifying the filter count for the deconvolutions + name: a string + reuse: a boolean + + Returns: + a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or + `[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps), + output_filters]` + """ + with tf.variable_scope( + name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse): + + def deconv1d(cur, i): + cur_shape = shape_list(cur) + thicker = conv( + cur, + output_filters * 2, (1, 1), + padding="SAME", + activation=tf.nn.relu, + name="deconv1d" + str(i)) + return tf.reshape(thicker, + [cur_shape[0], cur_shape[1] * 2, 1, output_filters]) + + def deconv2d(cur, i): + thicker = conv( + cur, + output_filters * 4, (1, 1), + padding="SAME", + activation=tf.nn.relu, + name="deconv2d" + str(i)) + return tf.depth_to_space(thicker, 2) + + cur = x + for i in range(nbr_steps): + if cur.get_shape()[2] == 1: + cur = deconv1d(cur, i) + else: + cur_dim = shape_list(cur)[2] + if isinstance(cur_dim, int): + if cur_dim == 1: + cur = deconv1d(cur, i) + else: + cur = deconv2d(cur, i) + else: + cur = tf.cond( + tf.equal(cur_dim, 1), + lambda idx=i: deconv1d(cur, idx), + lambda idx=i: deconv2d(cur, idx)) + return cur + + +def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs): + """Conditional conv_fn making kernel 1d or 2d depending on inputs shape.""" + static_shape = inputs.get_shape() + if not static_shape or len(static_shape) != 4: + raise ValueError("Inputs to conv must have statically known rank 4. " + "Shape: " + str(static_shape)) + # Add support for left padding. + if kwargs.get("padding") == "LEFT": + dilation_rate = (1, 1) + if "dilation_rate" in kwargs: + dilation_rate = kwargs["dilation_rate"] + assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1 + height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0] + cond_padding = tf.cond( + tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0), + lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1])) + width_padding = 0 if static_shape[2] == 1 else cond_padding + padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]] + inputs = tf.pad(inputs, padding) + # Set middle two dimensions to None to prevent convolution from complaining + inputs.set_shape([static_shape[0], None, None, static_shape[3]]) + kwargs["padding"] = "VALID" + + def conv2d_kernel(kernel_size_arg, name_suffix): + """Call conv2d but add suffix to name.""" + name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix) + original_name = kwargs.pop("name", None) + original_force2d = kwargs.pop("force2d", None) + result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs) + if original_name is not None: + kwargs["name"] = original_name # Restore for other calls. + if original_force2d is not None: + kwargs["force2d"] = original_force2d + return result + + return conv2d_kernel(kernel_size, "single") + + +def conv(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs): + def _conv2d(x, *args, **kwargs): + return layers().Conv2D(*args, **kwargs)(x) + return conv_internal( + _conv2d, + inputs, + filters, + kernel_size, + dilation_rate=dilation_rate, + **kwargs) + + +def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs): + return tf.squeeze( + conv(tf.expand_dims(inputs, 2), filters, (kernel_size, 1), + dilation_rate=(dilation_rate, 1), **kwargs), + 2) + + +def separable_conv(inputs, filters, kernel_size, **kwargs): + def _sep_conv2d(x, *args, **kwargs): + return layers().SeparableConv2D(*args, **kwargs)(x) + return conv_internal(_sep_conv2d, inputs, filters, kernel_size, **kwargs) + + +def subseparable_conv(inputs, filters, kernel_size, **kwargs): + """Sub-separable convolution. If separability == 0 it's a separable_conv.""" + + def conv_fn(inputs, filters, kernel_size, **kwargs): + """Sub-separable convolution, splits into separability-many blocks.""" + separability = None + if "separability" in kwargs: + separability = kwargs.pop("separability") + if separability: + parts = [] + abs_sep = separability if separability > 0 else -1 * separability + for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)): + with tf.variable_scope("part_%d" % split_idx): + if separability > 0: + parts.append( + layers().Conv2D(filters // separability, kernel_size, + **kwargs)(split)) + else: + parts.append( + layers().SeparableConv2D(filters // abs_sep, + kernel_size, **kwargs)(split)) + if separability > 1: + result = layers().Conv2D(filters, (1, 1))(tf.concat(parts, axis=3)) + elif abs_sep == 1: # If we have just one block, return it. + assert len(parts) == 1 + result = parts[0] + else: + result = tf.concat(parts, axis=3) + else: + result = layers().SeparableConv2D(filters, kernel_size, + **kwargs)(inputs) + if separability is not None: + kwargs["separability"] = separability + return result + + return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs) + + +def layer_norm_vars(filters): + """Create Variables for layer norm.""" + scale = tf.get_variable( + "layer_norm_scale", [filters], initializer=tf.ones_initializer()) + bias = tf.get_variable( + "layer_norm_bias", [filters], initializer=tf.zeros_initializer()) + return scale, bias + + +def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None): + """Layer norm raw computation.""" + + # Save these before they get converted to tensors by the casting below + params = (scale, bias) + + epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]] + mean = tf.reduce_mean(x, axis=[-1], keepdims=True) + variance = tf.reduce_mean( + tf.squared_difference(x, mean), axis=[-1], keepdims=True) + norm_x = (x - mean) * tf.rsqrt(variance + epsilon) + + output = norm_x * scale + bias + + + return output + +def layer_norm(x, + filters=None, + epsilon=1e-6, + name=None, + reuse=None, + layer_collection=None): + """Layer normalize the tensor x, averaging over the last dimension.""" + devices = device_lib.list_local_devices() + use_fused_layer_norm = any(dev.device_type == "HPU" for dev in devices) + + if filters is None: + filters = shape_list(x)[-1] + with tf.variable_scope( + name, default_name="layer_norm", values=[x], reuse=reuse): + scale, bias = layer_norm_vars(filters) + + if use_fused_layer_norm: + out, _, _ = habana_ops.habana_layer_norm( + x=x, + beta=bias, + gamma=scale, + axes=tensor_util.make_tensor_proto(len(x.shape)-1), + epsilon=tensor_util.make_tensor_proto(epsilon) + ) + else: + out = layer_norm_compute(x, epsilon, scale, bias, + layer_collection=layer_collection) + return out + + + +def group_norm(x, filters=None, num_groups=8, epsilon=1e-5): + """Group normalization as in https://arxiv.org/abs/1803.08494.""" + x_shape = shape_list(x) + if filters is None: + filters = x_shape[-1] + assert len(x_shape) == 4 + assert filters % num_groups == 0 + # Prepare variables. + scale = tf.get_variable( + "group_norm_scale", [filters], initializer=tf.ones_initializer()) + bias = tf.get_variable( + "group_norm_bias", [filters], initializer=tf.zeros_initializer()) + epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]] + # Reshape and compute group norm. + x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups]) + # Calculate mean and variance on heights, width, channels (not groups). + mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True) + norm_x = (x - mean) * tf.rsqrt(variance + epsilon) + return tf.reshape(norm_x, x_shape) * scale + bias + + +def noam_norm(x, epsilon=1.0, name=None): + """One version of layer normalization.""" + with tf.name_scope(name, default_name="noam_norm", values=[x]): + shape = x.get_shape() + ndims = len(shape) + return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt( + to_float(shape[-1]))) + + +def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None): + """Layer normalization with l2 norm.""" + if filters is None: + filters = shape_list(x)[-1] + with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse): + scale = tf.get_variable( + "l2_norm_scale", [filters], initializer=tf.ones_initializer()) + bias = tf.get_variable( + "l2_norm_bias", [filters], initializer=tf.zeros_initializer()) + epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]] + mean = tf.reduce_mean(x, axis=[-1], keepdims=True) + l2norm = tf.reduce_sum( + tf.squared_difference(x, mean), axis=[-1], keepdims=True) + norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon) + return norm_x * scale + bias + + +def apply_spectral_norm(x): + """Normalizes x using the spectral norm. + + The implementation follows Algorithm 1 of + https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is + reshaped such that the number of channels (last-dimension) is the same. + + Args: + x: Tensor with the last dimension equal to the number of filters. + + Returns: + x: Tensor with the same shape as x normalized by the spectral norm. + assign_op: Op to be run after every step to update the vector "u". + """ + weights_shape = shape_list(x) + other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1] + + # Reshape into a 2-D matrix with outer size num_filters. + weights_2d = tf.reshape(x, (other, num_filters)) + + # v = Wu / ||W u|| + with tf.variable_scope("u", reuse=tf.AUTO_REUSE): + u = tf.get_variable( + "u", [num_filters, 1], + initializer=tf.truncated_normal_initializer(), + trainable=False) + v = tf.nn.l2_normalize(tf.matmul(weights_2d, u)) + + # u_new = vW / ||v W|| + u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d)) + + # s = v*W*u + spectral_norm = tf.squeeze( + tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new)))) + + # set u equal to u_new in the next iteration. + assign_op = tf.assign(u, tf.transpose(u_new)) + return tf.divide(x, spectral_norm), assign_op + + +def apply_norm(x, norm_type, depth, epsilon, layer_collection=None): + """Apply Normalization.""" + if layer_collection is not None: + assert norm_type == "layer" + if norm_type == "layer": + return layer_norm( + x, filters=depth, epsilon=epsilon, layer_collection=layer_collection) + if norm_type == "group": + return group_norm(x, filters=depth, epsilon=epsilon) + if norm_type == "batch": + return layers().BatchNormalization(epsilon=epsilon)(x) + if norm_type == "noam": + return noam_norm(x, epsilon) + if norm_type == "l2": + return l2_norm(x, filters=depth, epsilon=epsilon) + if norm_type == "none": + return x + raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch'," + "'noam', 'lr', 'none'.") + + +def zero_add(previous_value, x, name=None, reuse=None): + """Resnet connection with zero initialization. + + Another type of resnet connection which returns previous_value + gamma * x. + gamma is a trainable scalar and initialized with zero. It is useful when a + module is plugged into a trained model and we want to make sure it matches the + original model's performance. + + Args: + previous_value: A tensor. + x: A tensor. + name: name of variable scope; defaults to zero_add. + reuse: reuse scope. + + Returns: + previous_value + gamma * x. + """ + with tf.variable_scope(name, default_name="zero_add", reuse=reuse): + gamma = tf.get_variable("gamma", (), initializer=tf.zeros_initializer()) + return previous_value + gamma * x + + +def layer_prepostprocess(previous_value, + x, + sequence, + dropout_rate, + norm_type, + depth, + epsilon, + default_name, + name=None, + dropout_broadcast_dims=None, + layer_collection=None): + """Apply a sequence of functions to the input or output of a layer. + + The sequence is specified as a string which may contain the following + characters: + a: add previous_value + n: apply normalization + d: apply dropout + z: zero add + + For example, if sequence=="dna", then the output is + previous_value + normalize(dropout(x)) + + Args: + previous_value: A Tensor, to be added as a residual connection ('a') + x: A Tensor to be transformed. + sequence: a string. + dropout_rate: a float + norm_type: a string (see apply_norm()) + depth: an integer (size of last dimension of x). + epsilon: a float (parameter for normalization) + default_name: a string + name: a string + dropout_broadcast_dims: an optional list of integers less than 3 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + Returns: + a Tensor + """ + with tf.variable_scope(name, default_name=default_name): + if sequence == "none": + return x + for c in sequence: + if c == "a": + x += previous_value + elif c == "z": + x = zero_add(previous_value, x) + elif c == "n": + x = apply_norm( + x, norm_type, depth, epsilon, layer_collection=layer_collection) + else: + assert c == "d", ("Unknown sequence step %s" % c) + x = dropout_with_broadcast_dims( + x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + return x + + +def layer_preprocess(layer_input, hparams, layer_collection=None): + """Apply layer preprocessing. + + See layer_prepostprocess() for details. + + A hyperparameters object is passed for convenience. The hyperparameters + that may be used are: + + layer_preprocess_sequence + layer_prepostprocess_dropout + norm_type + hidden_size + norm_epsilon + + Args: + layer_input: a Tensor + hparams: a hyperparameters object. + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + Returns: + a Tensor + """ + assert "a" not in hparams.layer_preprocess_sequence, ( + "No residual connections allowed in hparams.layer_preprocess_sequence") + assert "z" not in hparams.layer_preprocess_sequence, ( + "No residual connections allowed in hparams.layer_preprocess_sequence") + return layer_prepostprocess( + None, + layer_input, + sequence=hparams.layer_preprocess_sequence, + dropout_rate=hparams.layer_prepostprocess_dropout, + norm_type=hparams.norm_type, + depth=None, + epsilon=hparams.norm_epsilon, + dropout_broadcast_dims=comma_separated_string_to_integer_list( + getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")), + default_name="layer_prepostprocess", + layer_collection=layer_collection) + + +def layer_postprocess(layer_input, layer_output, hparams): + """Apply layer postprocessing. + + See layer_prepostprocess() for details. + + A hyperparameters object is passed for convenience. The hyperparameters + that may be used are: + + layer_postprocess_sequence + layer_prepostprocess_dropout + norm_type + hidden_size + norm_epsilon + + Args: + layer_input: a Tensor + layer_output: a Tensor + hparams: a hyperparameters object. + + Returns: + a Tensor + """ + return layer_prepostprocess( + layer_input, + layer_output, + sequence=hparams.layer_postprocess_sequence, + dropout_rate=hparams.layer_prepostprocess_dropout, + norm_type=hparams.norm_type, + depth=None, + epsilon=hparams.norm_epsilon, + dropout_broadcast_dims=comma_separated_string_to_integer_list( + getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")), + default_name="layer_postprocess") + + +def conv_block_internal(conv_fn, + inputs, + filters, + dilation_rates_and_kernel_sizes, + first_relu=True, + use_elu=False, + separabilities=None, + **kwargs): + """A block of convolutions. + + Args: + conv_fn: convolution function, e.g. conv or separable_conv. + inputs: a Tensor + filters: an Integer + dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h)) + first_relu: whether to do a relu at start (defaults to True) + use_elu: whether to use ELUs instead of ReLUs (defaults to False) + separabilities: list of separability factors (per-layer). + **kwargs: additional arguments (e.g., pooling) + + Returns: + a Tensor. + """ + + name = kwargs.pop("name") if "name" in kwargs else None + mask = kwargs.pop("mask") if "mask" in kwargs else None + + # Usage for normalize_fn kwarg: + # if not specified, use layer norm + # if given normalize_fn=None, don't use any normalization + # if given normalize_fn=norm, use the specified norm function + + use_layer_norm = "normalizer_fn" not in kwargs + norm = kwargs.pop("normalizer_fn", None) + use_normalizer_fn = use_layer_norm or norm + + if use_layer_norm: + norm = lambda x, name: layer_norm(x, filters, name=name) + + with tf.variable_scope(name, "conv_block", [inputs]): + cur, counter = inputs, -1 + for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes: + counter += 1 + if first_relu or counter > 0: + cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur) + if mask is not None: + cur *= mask + if separabilities: + cur = conv_fn( + cur, + filters, + kernel_size, + dilation_rate=dilation_rate, + name="conv_block_%d" % counter, + use_bias=norm is None, + separability=separabilities[counter], + **kwargs) + else: + cur = conv_fn( + cur, + filters, + kernel_size, + dilation_rate=dilation_rate, + name="conv_block_%d" % counter, + use_bias=norm is None, + **kwargs) + if use_normalizer_fn: + cur = norm(cur, name="conv_block_norm_%d" % counter) + return cur + + +def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs): + """A block of standard 2d convolutions.""" + return conv_block_internal(conv, inputs, filters, + dilation_rates_and_kernel_sizes, **kwargs) + + +def conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs): + """A block of standard 1d convolutions.""" + return conv_block_internal(conv1d, inputs, filters, + dilation_rates_and_kernel_sizes, **kwargs) + + +def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes, + **kwargs): + """A block of separable convolutions.""" + return conv_block_internal(separable_conv, inputs, filters, + dilation_rates_and_kernel_sizes, **kwargs) + + +def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes, + **kwargs): + """A block of separable convolutions.""" + return conv_block_internal(subseparable_conv, inputs, filters, + dilation_rates_and_kernel_sizes, **kwargs) + + +def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)): + """Pooling (supports "LEFT").""" + with tf.name_scope("pool", values=[inputs]): + static_shape = inputs.get_shape() + if not static_shape or len(static_shape) != 4: + raise ValueError("Inputs to conv must have statically known rank 4.") + # Add support for left padding. + if padding == "LEFT": + assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1 + if len(static_shape) == 3: + width_padding = 2 * (window_size[1] // 2) + padding_ = [[0, 0], [width_padding, 0], [0, 0]] + else: + height_padding = 2 * (window_size[0] // 2) + cond_padding = tf.cond( + tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0), + lambda: tf.constant(2 * (window_size[1] // 2))) + width_padding = 0 if static_shape[2] == 1 else cond_padding + padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]] + inputs = tf.pad(inputs, padding_) + inputs.set_shape([static_shape[0], None, None, static_shape[3]]) + padding = "VALID" + + return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides) + + +def conv_block_downsample(x, + kernel, + strides, + padding, + separability=0, + name=None, + reuse=None): + """Implements a downwards-striding conv block, like Xception exit flow.""" + with tf.variable_scope( + name, default_name="conv_block_downsample", values=[x], reuse=reuse): + hidden_size = int(x.get_shape()[-1]) + res = conv_block( + x, + int(1.25 * hidden_size), [((1, 1), kernel)], + padding=padding, + strides=strides, + name="res_conv") + + x = subseparable_conv_block( + x, + hidden_size, [((1, 1), kernel)], + padding=padding, + separability=separability, + name="conv0") + x = subseparable_conv_block( + x, + int(1.25 * hidden_size), [((1, 1), kernel)], + padding=padding, + separability=separability, + name="conv1") + x = pool(x, kernel, "MAX", padding, strides=strides) + + x += res + + x = subseparable_conv_block( + x, + 2 * hidden_size, [((1, 1), kernel)], + first_relu=False, + padding=padding, + separability=separability, + name="conv2") + x = subseparable_conv_block( + x, + int(2.5 * hidden_size), [((1, 1), kernel)], + padding=padding, + separability=separability, + name="conv3") + return x + + +def get_timing_signal(length, + min_timescale=1, + max_timescale=1e4, + num_timescales=16): + """Create Tensor of sinusoids of different frequencies. + + Args: + length: Length of the Tensor to create, i.e. Number of steps. + min_timescale: a float + max_timescale: a float + num_timescales: an int + + Returns: + Tensor of shape (length, 2*num_timescales) + """ + positions = to_float(tf.range(length)) + log_timescale_increment = ( + math.log(max_timescale / min_timescale) / (num_timescales - 1)) + inv_timescales = min_timescale * tf.exp( + to_float(tf.range(num_timescales)) * -log_timescale_increment) + scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0) + return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) + + +def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16): + """Adds a bunch of sinusoids of different frequencies to a Tensor. + + This allows attention to learn to use absolute and relative positions. + The timing signal should be added to some precursor of both the source + and the target of the attention. + + The use of relative position is possible because sin(x+y) and cos(x+y) can be + expressed in terms of y, sin(x) and cos(x). + + In particular, we use a geometric sequence of timescales starting with + min_timescale and ending with max_timescale. For each timescale, we + generate the two sinusoidal signals sin(timestep/timescale) and + cos(timestep/timescale). All of these sinusoids are concatenated in + the depth dimension, padded with zeros to be the same depth as the input, + and added into input. + + Args: + x: a Tensor with shape [?, length, ?, depth] + min_timescale: a float + max_timescale: a float + num_timescales: an int <= depth/2 + + Returns: + a Tensor the same shape as x. + """ + length = shape_list(x)[1] + depth = shape_list(x)[3] + signal = get_timing_signal(length, min_timescale, max_timescale, + num_timescales) + padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]]) + return x + tf.reshape(padded_signal, [1, length, 1, depth]) + + +def mask_from_embedding(emb): + """Input embeddings -> padding mask. + + We have hacked symbol_modality to return all-zero embeddings for padding. + Returns a mask with 0.0 in the padding positions and 1.0 elsewhere. + + Args: + emb: a Tensor with shape [batch, width, height, depth]. + Returns: + a 0.0/1.0 Tensor with shape [batch, width, height, 1]. + """ + return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True)) + + +def length_from_embedding(emb): + """Compute the length of each sequence in the batch. + + Args: + emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth]. + Returns: + a Tensor with shape [batch]. + """ + return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32) + + +def mask_pos_gt(source_length, target_length): + """A mask with 1.0 wherever source_pos > target_pos and 0.0 elsewhere. + + Args: + source_length: an integer + target_length: an integer + Returns: + a Tensor with shape [1, target_length, source_length] + """ + return tf.expand_dims( + tf.cast(tf.greater(tf.expand_dims(tf.range(target_length), axis=0), + tf.expand_dims(tf.range(source_length), axis=1)), + dtype=tf.float32), axis=0) + + +def mask_leq(target_length, source_length): + """A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere. + + Args: + target_length: an integer + source_length: an integer + Returns: + a Tensor with shape [1, target_length, source_length] + """ + return ones_matrix_band_part( + target_length, + source_length, + -1, + 0, + out_shape=[1, target_length, source_length]) + + +def mask_pos_lt(source_length, target_length): + """A mask with 1.0 wherever source_pos < target_pos and 0.0 elsewhere. + + Args: + source_length: an integer + target_length: an integer + Returns: + a Tensor with shape [1, target_length, source_length] + """ + return tf.expand_dims( + tf.cast(tf.less(tf.expand_dims(tf.range(target_length), axis=0), + tf.expand_dims(tf.range(source_length), axis=1)), + dtype=tf.float32), axis=0) + + +def relu_density_logit(x, reduce_dims): + """logit(density(x)). + + Useful for histograms. + + Args: + x: a Tensor, typically the output of tf.relu + reduce_dims: a list of dimensions + + Returns: + a Tensor + """ + frac = tf.reduce_mean(to_float(x > 0.0), reduce_dims) + scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10)) + return scaled + + +def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask): + """If necessary, zero out inputs to a conv for padding positions. + + Args: + inputs: a Tensor with shape [batch, length, ...] + kernel_size: an integer or pair of integers + nonpadding_mask: a Tensor with shape [batch, length] + + Returns: + Tensor of the same shape as inputs. + """ + if (kernel_size != 1 and kernel_size != (1, 1) and + nonpadding_mask is not None): + while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims: + nonpadding_mask = tf.expand_dims(nonpadding_mask, -1) + return inputs * nonpadding_mask + + return inputs + + +def dense_relu_dense(inputs, + filter_size, + output_size, + output_activation=None, + dropout=0.0, + dropout_broadcast_dims=None, + layer_collection=None, + name=None): + """Hidden layer with RELU activation followed by linear projection.""" + # layer_name is appended with "conv1" or "conv2" in this method only for + # historical reasons. These are in fact dense layers. + layer_name = "%s_{}" % name if name else "{}" + h = dense( + inputs, + filter_size, + use_bias=True, + activation=tf.nn.relu, + layer_collection=layer_collection, + name=layer_name.format("conv1")) + + if dropout != 0.0: + h = dropout_with_broadcast_dims( + h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims) + o = dense( + h, + output_size, + activation=output_activation, + use_bias=True, + layer_collection=layer_collection, + name=layer_name.format("conv2")) + return o + + +def dense_dropconnect(inputs, + output_size, + dropconnect_dropout=0.0, + name="dense_dropconnect", + **kwargs): + """Dense layer with dropconnect.""" + + if dropconnect_dropout != 0.0: + tf.logging.info("Applying dropconnect as the kernel regularization.") + kwargs["kernel_regularizer"] = functools.partial( + tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout) + + return dense(inputs, output_size, use_bias=True, name=name, **kwargs) + + +def conv_relu_conv(inputs, + filter_size, + output_size, + first_kernel_size=3, + second_kernel_size=3, + padding="SAME", + nonpadding_mask=None, + dropout=0.0, + name=None, + cache=None, + decode_loop_step=None): + """Hidden layer with RELU activation followed by linear projection. + + Args: + inputs: A tensor. + filter_size: An integer. + output_size: An integer. + first_kernel_size: An integer. + second_kernel_size: An integer. + padding: A string. + nonpadding_mask: A tensor. + dropout: A float. + name: A string. + cache: A dict, containing Tensors which are the results of previous + attentions, used for fast decoding. + decode_loop_step: An integer, step number of the decoding loop. + Only used for inference on TPU. If it is not None, the function + will do inplace update for the cache instead of concatenating the + current result to the cache. + + Returns: + A Tensor. + """ + with tf.variable_scope(name, "conv_relu_conv", [inputs]): + inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask) + + if cache: + if decode_loop_step is None: + inputs = cache["f"] = tf.concat([cache["f"], inputs], axis=1) + else: + # Inplace update is required for inference on TPU. + # Inplace_ops only supports inplace_update on the first dimension. + # The performance of current implementation is better than updating + # the tensor by adding the result of matmul(one_hot, + # update_in_current_step) + tmp_f = tf.transpose(cache["f"], perm=[1, 0, 2]) + tmp_f = inplace_ops.alias_inplace_update( + tmp_f, + decode_loop_step * tf.shape(inputs)[1], + tf.transpose(inputs, perm=[1, 0, 2])) + inputs = cache["f"] = tf.transpose(tmp_f, perm=[1, 0, 2]) + inputs = cache["f"] = inputs[:, -first_kernel_size:, :] + + h = tpu_conv1d( + inputs, filter_size, first_kernel_size, padding=padding, name="conv1") + + if cache: + h = h[:, -1:, :] + + h = tf.nn.relu(h) + if dropout != 0.0: + h = tf.nn.dropout(h, 1.0 - dropout) + h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) + return tpu_conv1d( + h, output_size, second_kernel_size, padding=padding, name="conv2") + + +def sepconv_relu_sepconv(inputs, + filter_size, + output_size, + first_kernel_size=(1, 1), + second_kernel_size=(1, 1), + padding="LEFT", + nonpadding_mask=None, + dropout=0.0, + name=None): + """Hidden layer with RELU activation followed by linear projection.""" + with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]): + inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask) + if inputs.get_shape().ndims == 3: + is_3d = True + inputs = tf.expand_dims(inputs, 2) + else: + is_3d = False + h = separable_conv( + inputs, + filter_size, + first_kernel_size, + activation=tf.nn.relu, + padding=padding, + name="conv1") + if dropout != 0.0: + h = tf.nn.dropout(h, 1.0 - dropout) + h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) + ret = separable_conv( + h, output_size, second_kernel_size, padding=padding, name="conv2") + if is_3d: + ret = tf.squeeze(ret, 2) + return ret + + +# DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv +def conv_hidden_relu(inputs, + hidden_size, + output_size, + kernel_size=(1, 1), + second_kernel_size=(1, 1), + dropout=0.0, + **kwargs): + """Hidden layer with RELU activation followed by linear projection.""" + name = kwargs.pop("name") if "name" in kwargs else None + with tf.variable_scope(name, "conv_hidden_relu", [inputs]): + if inputs.get_shape().ndims == 3: + is_3d = True + inputs = tf.expand_dims(inputs, 2) + else: + is_3d = False + conv_f1 = conv if kernel_size == (1, 1) else separable_conv + h = conv_f1( + inputs, + hidden_size, + kernel_size, + activation=tf.nn.relu, + name="conv1", + **kwargs) + if dropout != 0.0: + h = tf.nn.dropout(h, 1.0 - dropout) + conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv + ret = conv_f2(h, output_size, second_kernel_size, name="conv2", **kwargs) + if is_3d: + ret = tf.squeeze(ret, 2) + return ret + + +def conv_gru(x, + kernel_size, + filters, + padding="SAME", + dilation_rate=(1, 1), + name=None, + reuse=None): + """Convolutional GRU in 1 dimension.""" + + # Let's make a shorthand for conv call first. + def do_conv(args, name, bias_start, padding): + return conv( + args, + filters, + kernel_size, + padding=padding, + dilation_rate=dilation_rate, + bias_initializer=tf.constant_initializer(bias_start), + name=name) + + # Here comes the GRU gate. + with tf.variable_scope( + name, default_name="conv_gru", values=[x], reuse=reuse): + reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding)) + gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding)) + candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding)) + return gate * x + (1 - gate) * candidate + + +def gru_feedfwd(a_t, h_prev, filters, name=None): + """position-wise Feed-fwd GRU gates following the MPNN. + + Args: + a_t: Tensor of shape [batch, length, depth] of current input + h_prev: Tensor of shape [batch, length, depth] of prev input + filters: an integer specifying number of dimensions of the filters + name: A string + Returns: + h_t: [batch, length, filters] hidden state + """ + + with tf.variable_scope(name, default_name="GRU", values=[a_t, h_prev]): + # we use right matrix multiplication to handle batches + # W_z and W_r have shape 2d, d. U_z U_r have shape d,d + z_t = ( + tf.sigmoid( + tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_z") + + tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_z"))) + r_t = ( + tf.sigmoid( + tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_r") + + tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_r"))) + h_tilde = ( + tf.tanh( + tpu_conv1d(a_t, filters, 1, padding="SAME", name="W") + + tpu_conv1d(r_t * h_prev, filters, 1, padding="SAME", name="U"))) + h_t = (1. - z_t) * h_prev + z_t * h_tilde + + return h_t + + +def conv_lstm(x, + kernel_size, + filters, + padding="SAME", + dilation_rate=(1, 1), + name=None, + reuse=None): + """Convolutional LSTM in 1 dimension.""" + with tf.variable_scope( + name, default_name="conv_lstm", values=[x], reuse=reuse): + gates = conv( + x, + 4 * filters, + kernel_size, + padding=padding, + dilation_rate=dilation_rate) + g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3) + new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3]) + return tf.sigmoid(g[2]) * tf.tanh(new_cell) + + +def diagonal_conv_gru(x, + kernel_size, + filters, + dropout=0.0, + name=None, + reuse=None): + """Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727.""" + + # Let's make a shorthand for conv call first. + def do_conv(args, name, bias_start): + return conv( + args, + filters, + kernel_size, + padding="SAME", + bias_initializer=tf.constant_initializer(bias_start), + name=name) + + # Here comes the GRU gate. + with tf.variable_scope( + name, default_name="diagonal_conv_gru", values=[x], reuse=reuse): + reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5)) + gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7)) + candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0)) + + if dropout > 0.0: + candidate = tf.nn.dropout(candidate, 1.0 - dropout) + + # Diagonal shift. + shift_filters = filters // 3 + base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) + + [[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters) + shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32) + shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3) + x_shifted = tf.nn.depthwise_conv2d( + x, shift_filter, [1, 1, 1, 1], padding="SAME") + + # Return the gated result and cost. + total_cost_avg = 0.5 * (reset_cost + gate_cost) + return gate * x_shifted + (1 - gate) * candidate, total_cost_avg + + +def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1): + """Pad tensors x and y on axis 1 so that they have the same length.""" + if axis not in [1, 2]: + raise ValueError("Only axis=1 and axis=2 supported for now.") + with tf.name_scope("pad_to_same_length", values=[x, y]): + x_length = shape_list(x)[axis] + y_length = shape_list(y)[axis] + if (isinstance(x_length, int) and isinstance(y_length, int) and + x_length == y_length and final_length_divisible_by == 1): + return x, y + max_length = tf.maximum(x_length, y_length) + if final_length_divisible_by > 1: + # Find the nearest larger-or-equal integer divisible by given number. + max_length += final_length_divisible_by - 1 + max_length //= final_length_divisible_by + max_length *= final_length_divisible_by + length_diff1 = max_length - x_length + length_diff2 = max_length - y_length + + def padding_list(length_diff, arg): + if axis == 1: + return [[[0, 0], [0, length_diff]], + tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)] + return [[[0, 0], [0, 0], [0, length_diff]], + tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)] + + paddings1 = tf.concat(padding_list(length_diff1, x), axis=0) + paddings2 = tf.concat(padding_list(length_diff2, y), axis=0) + res_x = tf.pad(x, paddings1) + res_y = tf.pad(y, paddings2) + # Static shapes are the same except for axis=1. + x_shape = x.shape.as_list() + x_shape[axis] = None + res_x.set_shape(x_shape) + y_shape = y.shape.as_list() + y_shape[axis] = None + res_y.set_shape(y_shape) + return res_x, res_y + + +def pad_with_zeros(logits, labels): + """Pad labels on the length dimension to match logits length.""" + with tf.name_scope("pad_with_zeros", values=[logits, labels]): + logits, labels = pad_to_same_length(logits, labels) + if len(labels.shape) == 3: # 2-d labels. + logits, labels = pad_to_same_length(logits, labels, axis=2) + return logits, labels + + +def weights_nonzero(labels): + """Assign weight 1.0 to all labels except for padding (id=0).""" + return to_float(tf.not_equal(labels, 0)) + + +def weights_prepend_inputs_to_targets(labels): + """Assign weight 1.0 to only the "targets" portion of the labels. + + Weight 1.0 is assigned to all nonzero labels past the first zero. + See prepend_mode in common_hparams.py + + Args: + labels: A Tensor of int32s. + + Returns: + A Tensor of floats. + """ + past_first_zero = tf.cumsum(to_float(tf.equal(labels, 0)), axis=1) + nonzero = to_float(labels) + return to_float(tf.not_equal(past_first_zero * nonzero, 0)) + + +def check_nonnegative(value): + """Check that the value is nonnegative.""" + if isinstance(value, tf.Tensor): + with tf.control_dependencies([tf.assert_greater_equal(value, 0)]): + value = tf.identity(value) + elif value < 0: + raise ValueError("Value must be non-negative.") + return value + + +def weights_multi_problem(labels, taskid=-1): + """Assign weight 1.0 to only the "targets" portion of the labels. + + Weight 1.0 is assigned to all labels past the taskid. + + Args: + labels: A Tensor of int32s. + taskid: an int32 representing the task id for a problem. + + Returns: + A Tensor of floats. + + Raises: + ValueError: The Task ID must be valid. + """ + taskid = check_nonnegative(taskid) + past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1) + # Additionally zero out the task id location + past_taskid *= to_float(tf.not_equal(labels, taskid)) + non_taskid = to_float(labels) + return to_float(tf.not_equal(past_taskid * non_taskid, 0)) + + +def weights_multi_problem_all(labels, taskid=-1): + """Assign weight 1.0 to only examples from the given task.""" + taskid = check_nonnegative(taskid) + weights = to_float(tf.not_equal(labels, 0)) + past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1) + # Additionally zero out the task id location + past_taskid *= to_float(tf.not_equal(labels, taskid)) + non_taskid = to_float(labels) + example_mask = to_float(tf.not_equal(past_taskid * non_taskid, 0)) + example_mask = tf.reduce_sum(example_mask, axis=1) + example_mask = to_float( + tf.greater(example_mask, tf.zeros_like(example_mask))) + + return weights * tf.expand_dims(example_mask, axis=-1) + + +def weights_multi_problem_input(labels, taskid=-1): + """Assign weight 1.0 to only the inputs for the given task.""" + taskid = check_nonnegative(taskid) + weights_all_tokens = weights_multi_problem_all(labels, taskid) + weights_target = weights_multi_problem(labels, taskid) + return weights_all_tokens - weights_target + + +def weights_all(labels): + """Assign weight 1.0 to all labels.""" + return tf.ones_like(labels, dtype=tf.float32) + + +def weights_concatenated(labels): + """Assign weight 1.0 to the "target" part of the concatenated labels. + + The labels look like: + source English I love you . ID1 target French Je t'aime . ID1 source + English the cat ID1 target French le chat ID1 source English ... + + We want to assign weight 1.0 to all words in the target text (including the + ID1 end symbol), but not to the source text or the boilerplate. In the + above example, the target words that get positive weight are: + Je t'aime . ID1 le chat ID1 + + Args: + labels: a Tensor + Returns: + a Tensor + """ + eos_mask = tf.to_int32(tf.equal(labels, 1)) + sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True) + in_target = tf.equal(tf.mod(sentence_num, 2), 1) + # first two tokens of each sentence are boilerplate. + sentence_num_plus_one = sentence_num + 1 + shifted = tf.pad(sentence_num_plus_one, + [[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :] + nonboilerplate = tf.equal(sentence_num_plus_one, shifted) + ret = to_float(tf.logical_and(nonboilerplate, in_target)) + return ret + + +def padded_cross_entropy(logits, + labels, + label_smoothing, + weights_fn=weights_nonzero, + reduce_sum=True, + cutoff=0.0, + gaussian=False): + """Compute cross-entropy assuming 0s are padding. + + Computes a loss numerator (the sum of losses), and loss denominator + (the number of non-padding tokens). + + Args: + logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`. + optionally a FactoredTensor. + labels: an integer `Tensor` with shape `[batch, timesteps]`. + label_smoothing: a floating point `Scalar`. + weights_fn: A function from labels to weights. + reduce_sum: a Boolean, whether to sum at the end or not. + cutoff: a float, at which point to have no loss. + gaussian: If true, use a Gaussian distribution for label smoothing + + Returns: + loss_numerator: a `Scalar`. Sum of losses. + loss_denominator: a `Scalar. The number of non-padding target tokens. + + Raises: + ValueError: in case of unsupported argument types. + """ + if isinstance(logits, FactoredTensor): + if gaussian: + raise ValueError("Factored padded cross entropy with Gaussian smoothing " + "is not implemented yet.") + return padded_cross_entropy_factored( + logits, + labels, + label_smoothing, + weights_fn=weights_fn, + reduce_sum=reduce_sum) + confidence = 1.0 - label_smoothing + logits_shape = shape_list(logits) + vocab_size = logits_shape[-1] + with tf.name_scope("padded_cross_entropy", values=[logits, labels]): + if len(logits_shape) == 2: + # Deal with the case where we did not insert extra dimensions due to + # TPU issues. No pad-to-same-length happens in this case. + # TODO(noam): remove this logic once TPU can handle extra dimensions. + labels = tf.reshape(labels, [-1]) + else: + logits, labels = pad_with_zeros(logits, labels) + logits = tf.reshape( + logits, + shape_list(labels) + [vocab_size], + name="padded_cross_entropy_size_check") + logits = tf.cast(logits, tf.float32) + xent = smoothing_cross_entropy( + logits, labels, vocab_size, confidence, gaussian=gaussian) + weights = weights_fn(labels) + if cutoff > 0.0: + xent = tf.nn.relu(xent - cutoff) + if not reduce_sum: + return xent * weights, weights + return tf.reduce_sum(xent * weights), tf.reduce_sum(weights) + + +def _weights_one_third(labels): + """Returns Tensor of shape [batch, height, width]. Each element is 1/3.""" + return tf.ones(tf.shape(labels)[:-1]) / 3. + + +def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True): + """Discretized mixture of logistics loss. + + Args: + pred: A [batch, height, width, num_mixtures*10] tensor of floats + comprising one unconstrained mixture probability, three means + (one per channel), three standard deviations (one per channel), + and three coefficients which linearly parameterize dependence across + channels. + labels: A [batch, height, width, channels] tensor of 8-bit pixel + intensities. The computation assumes channels is 3. + weights_fn: A function of labels, returning a Tensor of shape + [batch, height, width] which weights each loss term. Default is to scale + each loss term by 1/3 so that they capture the average across channels. + reduce_sum: A boolean, to return scalar loss instead of per position. + + Returns: + Tuple of loss tensors for numerator and denominator, each a scalar if + reduce_sum else of shape [batch, height, width]. The sum of their divisions + is the number of nats for each pixel in labels. + """ + real_labels = convert_rgb_to_symmetric_real(labels) + dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels) + weights = weights_fn(labels) + loss_num = weights * dml_loss_value + loss_den = weights_nonzero(weights) + if reduce_sum: + loss_num = tf.reduce_sum(loss_num) + loss_den = tf.reduce_sum(loss_den) + return loss_num, loss_den + + +def split_to_discretized_mix_logistic_params(inputs): + """Splits input tensor into parameters of discretized mixture logistic. + + Args: + inputs: A [batch, height, width, num_mixtures*10] tensor of floats + comprising one unconstrained mixture probability, three means + (one per channel), three standard deviations (one per channel), + and three coefficients which linearly parameterize dependence across + channels. + + Returns: + Tuple of unconstrained mixture probabilities, locations, scales, and + coefficient parameters of the distribution. The mixture probability has + shape [batch, height, width, num_mixtures]. Other parameters have shape + [batch, height, width, num_mixtures, 3]. + """ + batch, height, width, output_dim = shape_list(inputs) # pylint: disable=unbalanced-tuple-unpacking + num_mixtures = output_dim // 10 + logits, locs, log_scales, coeffs = tf.split( + inputs, + num_or_size_splits=[ + num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3 + ], + axis=-1) + split_shape = [batch, height, width, num_mixtures, 3] + locs = tf.reshape(locs, split_shape) + log_scales = tf.reshape(log_scales, split_shape) + log_scales = tf.maximum(log_scales, -7.) + coeffs = tf.reshape(coeffs, split_shape) + coeffs = tf.tanh(coeffs) + return logits, locs, log_scales, coeffs + + +def discretized_mix_logistic_loss(pred, labels): + """Computes negative log probability for the discretized mixture of logistics. + + The distribution of a whole pixel is a mixture of 3-dimensional discretized + logistic distributions. The 3-D discretized logistic factorizes as 3 1-D + discretized logistic distributions, one for each channel. It defines + + ```none + P(X = x) + = sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k]) + = sum_{k=1}^K probs[k] * [ + prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ] + ``` + + The means tensor is a linear combination of location parameters and previous + channels. The discretized logistic distribution assigns probability mass to an + event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X < x - 0.5) for 1 < x < + 254; P(X <= 0.5) for x = 0; and 1 - P(X < 245.5) for x = 255. Instead of + 8-bit inputs, this implementation assumes the events are rescaled to [-1, 1]. + + Args: + pred: A [batch, height, width, num_mixtures*10] tensor of floats + comprising one unconstrained mixture probability, three means + (one per channel), three standard deviations (one per channel), + and three coefficients which linearly parameterize dependence across + channels. + labels: A [batch, height, width, channels] tensor of true pixel intensities + rescaled to [-1, 1]. The computation assumes channels is 3. + + Returns: + A [batch, height, width] tensor of the negative log conditional probability + of each pixel given all previous pixels. + """ + + logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params( + pred) + + # Tile labels to broadcast compute across the mixture dimension. + batch, height, width, num_mixtures = shape_list(logits) # pylint: disable=unbalanced-tuple-unpacking + labels = tf.tile( + tf.reshape(labels, [batch, height, width, 1, 3]), + [1, 1, 1, num_mixtures, 1]) + + # p(x) = sigmoid((x - means_i + 1/255.)/scale_i) - + # sigmoid((x - means_i - 1/255.)/scale_i) + # for each channel i. The means are linearly parameterized. + means_0 = locs[..., 0] + means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0] + means_2 = ( + locs[..., 2] + coeffs[..., 1] * labels[..., 0] + + coeffs[..., 2] * labels[..., 1]) + means = tf.stack([means_0, means_1, means_2], axis=-1) + centered_labels = labels - means + inv_stdv = tf.exp(-log_scales) + plus_in = inv_stdv * (centered_labels + 1. / 255.) + min_in = inv_stdv * (centered_labels - 1. / 255.) + cdf_plus = tf.nn.sigmoid(plus_in) + cdf_min = tf.nn.sigmoid(min_in) + + # Compute log probability for edge case of 0 (before scaling), 255 (before + # scaling), and all other cases respectively. + log_prob_0 = plus_in - tf.nn.softplus(plus_in) + log_prob_255 = -tf.nn.softplus(min_in) + prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12) + log_prob_event = tf.log(prob_event) + + # Robustly select log-prob based on numerical edge-cases: (a) [-1, -1+eps); + # (b) (1-eps, 1]; (c) NaNs during `tf.gradients` of `tf.select`, which may + # cause `tf.log(0.)`; (d) p(x) < 1e-5. + mid_in = inv_stdv * centered_labels + log_prob_event_approx = ( + mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5)) + log_probs = tf.where( + labels < -0.999, log_prob_0, + tf.where( + labels > 0.999, log_prob_255, + tf.where(prob_event > 1e-5, log_prob_event, log_prob_event_approx))) + + # Sum over channels and compute log-probability of each mixture. + log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1) + output = -tf.reduce_logsumexp(log_probs, axis=-1) + return output + + +def sample_from_discretized_mix_logistic(pred, seed=None): + """Sampling from a discretized mixture of logistics. + + Args: + pred: A [batch, height, width, num_mixtures*10] tensor of floats + comprising one unconstrained mixture probability, three means + (one per channel), three standard deviations (one per channel), + and three coefficients which linearly parameterize dependence across + channels. + seed: Random seed. + + Returns: + A tensor of shape [batch, height, width, 3] with real intensities scaled + between -1 and 1. + """ + + logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params( + pred) + + # Sample mixture indicator given logits using the gumbel max trick. + num_mixtures = shape_list(logits)[-1] + gumbel_noise = -tf.log(-tf.log( + tf.random_uniform( + tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed))) + sel = tf.one_hot( + tf.argmax(logits + gumbel_noise, -1), + depth=num_mixtures, + dtype=tf.float32) + + # Select mixture component's parameters. + sel = tf.expand_dims(sel, -1) + locs = tf.reduce_sum(locs * sel, 3) + log_scales = tf.reduce_sum(log_scales * sel, 3) + coeffs = tf.reduce_sum(coeffs * sel, 3) + + # Sample from 3-D logistic & clip to interval. Note we don't round to the + # nearest 8-bit value when sampling. + uniform_noise = tf.random_uniform( + tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed) + logistic_noise = tf.log(uniform_noise) - tf.log1p(-uniform_noise) + x = locs + tf.exp(log_scales) * logistic_noise + x0 = x[..., 0] + x1 = x[..., 1] + coeffs[..., 0] * x0 + x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1 + x = tf.stack([x0, x1, x2], axis=-1) + x = tf.clip_by_value(x, -1., 1.) + return x + + +def smoothing_cross_entropy(logits, + labels, + vocab_size, + confidence, + gaussian=False): + """Cross entropy with label smoothing to limit over-confidence. + + Args: + logits: Tensor of shape [batch_size, ?, ?, ?, vocab_size]. + labels: Tensor of shape [batch_size, ?, ?, ?]. + vocab_size: Tensor representing the size of the vocabulary. + confidence: Used to determine on and off values for label smoothing. + If `gaussian` is true, `confidence` is the variance to the Gaussian + distribution. + gaussian: Uses a Gaussian distribution for label smoothing + + Returns: + Tensor of shape [batch_size, ?, ?, ?]. + """ + with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]): + # Low confidence is given to all non-true labels, uniformly. + low_confidence = (1.0 - confidence) / to_float(vocab_size - 1) + # Normalizing constant is the best cross-entropy value with soft targets. + # We subtract it just for readability, makes no difference on learning. + normalizing = -( + confidence * tf.log(confidence) + to_float(vocab_size - 1) * + low_confidence * tf.log(low_confidence + 1e-20)) + + if gaussian and confidence > 0.0: + labels = tf.cast(labels, tf.float32) + + normal_dist = tfp.distributions.Normal(loc=labels, scale=confidence) + # Locations to evaluate the probability distributions. + soft_targets = normal_dist.prob( + tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None]) + # Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match + # logits: [batch_size, ?, ?, ?, vocab_size] + soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0]) + else: + soft_targets = tf.one_hot( + tf.cast(labels, tf.int32), + depth=vocab_size, + on_value=confidence, + off_value=low_confidence) + xentropy = tf.nn.softmax_cross_entropy_with_logits_v2( + logits=logits, labels=soft_targets) + return xentropy - normalizing + + +def global_pool_1d(inputs, pooling_type="MAX", mask=None): + """Pool elements across the last dimension. + + Useful to convert a list of vectors into a single vector so as + to get a representation of a set. + + Args: + inputs: A tensor of shape [batch_size, sequence_length, input_dims] + containing the sequences of input vectors. + pooling_type: the pooling type to use, MAX or AVR + mask: A tensor of shape [batch_size, sequence_length] containing a + mask for the inputs with 1's for existing elements, and 0's elsewhere. + + Returns: + A tensor of shape [batch_size, input_dims] containing the sequences of + transformed vectors. + """ + with tf.name_scope("global_pool", values=[inputs]): + if mask is not None: + mask = tf.expand_dims(mask, axis=2) + inputs = tf.multiply(inputs, mask) + + if pooling_type == "MAX": + # A tf.pool can be used here, but reduce is cleaner + output = tf.reduce_max(inputs, axis=1) + elif pooling_type == "AVR": + if mask is not None: + # Some elems are dummy elems so we can't just reduce the average. + output = tf.reduce_sum(inputs, axis=1) + num_elems = tf.reduce_sum(mask, axis=1, keepdims=True) + output = tf.div(output, tf.maximum(num_elems, 1)) + else: + output = tf.reduce_mean(inputs, axis=1) + + return output + + +def running_global_pool_1d(inputs, pooling_type="MAX"): + """Same global pool, but only for the elements up to the current element. + + Useful for outputs where the state of future elements is not known. + Takes no mask as all elements up to the current element are assumed to exist. + Currently only supports maximum. Equivalent to using a lower triangle bias. + + Args: + inputs: A tensor of shape [batch_size, sequence_length, input_dims] + containing the sequences of input vectors. + pooling_type: Pooling type to use. Currently only supports 'MAX'. + + Returns: + A tensor of shape [batch_size, sequence_length, input_dims] containing the + running 'totals'. + """ + del pooling_type + with tf.name_scope("running_global_pool", values=[inputs]): + scan_fct = tf.maximum + # Permute inputs so seq_length is first. + elems = tf.transpose(inputs, [1, 0, 2]) + # Perform scan. + cumulatives = tf.scan(scan_fct, elems, swap_memory=True) + # Permute output to get back to original order. + output = tf.transpose(cumulatives, [1, 0, 2]) + return output + + +def gated_linear_unit_layer(x, name=None): + """Gated linear unit layer. + + Paper: Language Modeling with Gated Convolutional Networks. + Link: https://arxiv.org/abs/1612.08083 + x = Wx * sigmoid(W'x). + + Args: + x: A tensor + name: A string + + Returns: + A tensor of the same shape as x. + """ + with tf.variable_scope(name, default_name="glu_layer", values=[x]): + depth = shape_list(x)[-1] + x = layers().Dense(depth * 2, activation=None)(x) + x, gating_x = tf.split(x, 2, axis=-1) + return x * tf.nn.sigmoid(gating_x) + + +def sru(x, + num_layers=2, + activation=None, + initial_state=None, + name=None, + reuse=None): + """SRU cell as in https://arxiv.org/abs/1709.02755. + + This implementation uses tf.scan and can incur overhead, see the full SRU + function doc for details and an implementation that is sometimes faster. + + Args: + x: A tensor of shape [batch, ..., channels] ; ... is treated as time. + num_layers: How many SRU layers; default is 2 as results for 1 disappoint. + activation: Optional activation function, try tf.nn.tanh or tf.nn.relu. + initial_state: Optional initial c-state, set to zeros if None. + name: Optional name, "sru" by default. + reuse: Optional reuse. + + Returns: + A tensor of the same shape as x. + + Raises: + ValueError: if num_layers is not positive. + """ + if num_layers < 1: + raise ValueError("Number of layers must be positive: %d" % num_layers) + with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse): + # We assume x is [batch, ..., channels] and treat all ... as time. + x_shape = shape_list(x) + x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]]) + x = tf.transpose(x, [1, 0, 2]) # Scan assumes time on axis 0. + initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]]) + + # SRU state manipulation function. + def next_state(cur_state, args_tup): + cur_x_times_one_minus_f, cur_f = args_tup + return cur_f * cur_state + cur_x_times_one_minus_f + + # Calculate SRU on each layer. + for i in range(num_layers): + # The parallel part of the SRU. + x_orig = x + x, f, r = tf.split( + layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1) + f, r = tf.sigmoid(f), tf.sigmoid(r) + x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed. + # Calculate states. + c_states = tf.scan( + next_state, (x_times_one_minus_f, f), + initializer=initial_state, + parallel_iterations=2, + name="scan_%d" % i) + # Final output. + if activation is not None: + c_states = activation(c_states) + h = c_states * r + (1.0 - r) * x_orig + x = h # Next layer. + # Transpose back to batch-major. + x = tf.transpose(x, [1, 0, 2]) + return tf.reshape(x, x_shape) + + +def linear_set_layer(layer_size, + inputs, + context=None, + activation_fn=tf.nn.relu, + dropout=0.0, + name=None): + """Basic layer type for doing funky things with sets. + + Applies a linear transformation to each element in the input set. + If a context is supplied, it is concatenated with the inputs. + e.g. One can use global_pool_1d to get a representation of the set which + can then be used as the context for the next layer. + + TODO: Add bias add (or control the biases used). + + Args: + layer_size: Dimension to transform the input vectors to. + inputs: A tensor of shape [batch_size, sequence_length, input_dims] + containing the sequences of input vectors. + context: A tensor of shape [batch_size, context_dims] containing a global + statistic about the set. + activation_fn: The activation function to use. + dropout: Dropout probability. + name: name. + + Returns: + Tensor of shape [batch_size, sequence_length, output_dims] containing the + sequences of transformed vectors. + """ + with tf.variable_scope( + name, default_name="linear_set_layer", values=[inputs]): + # Apply 1D convolution to apply linear filter to each element + # along the 2nd dimension. + outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv") + + # Apply the context if it exists. + if context is not None: + # Unfortunately tf doesn't support broadcasting via concat, but we can + # simply add the transformed context to get the same effect. + if len(context.get_shape().as_list()) == 2: + context = tf.expand_dims(context, axis=1) + cont_tfm = conv1d( + context, layer_size, 1, activation=None, name="cont_conv") + outputs += cont_tfm + + if activation_fn is not None: + outputs = activation_fn(outputs) + + if dropout != 0.0: + outputs = tf.nn.dropout(outputs, 1.0 - dropout) + + return outputs + + +def ravanbakhsh_set_layer(layer_size, + inputs, + mask=None, + sequential=False, + activation_fn=tf.nn.tanh, + dropout=0.0, + name=None): + """Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 . + + More parameter-efficient version of a linear-set-layer with context. + + Args: + layer_size: Dimension to transform the input vectors to. + inputs: A tensor of shape [batch_size, sequence_length, vector] + containing the sequences of input vectors. + mask: A tensor of shape [batch_size, sequence_length] containing a + mask for the inputs with 1's for existing elements, and 0's elsewhere. + sequential: If true, will use a running global pool so each element will + only depend on those before it. Set true if this layer is being used in + an output sequence. + activation_fn: The activation function to use. + dropout: dropout. + name: name. + + Returns: + Tensor of shape [batch_size, sequence_length, vector] containing the + sequences of transformed vectors. + """ + del dropout + with tf.variable_scope(name, "ravanbakhsh_set_layer", [inputs]): + if sequential: + return linear_set_layer( + layer_size, + inputs - running_global_pool_1d(inputs), + activation_fn=activation_fn, + name=name) + return linear_set_layer( + layer_size, + inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1), + activation_fn=activation_fn, + name=name) + + +def fn_device_dependency_dict(): + """State container for fn_device_dependency.""" + default_graph = tf.get_default_graph() + if not hasattr(default_graph, "dependency_dict"): + default_graph.dependency_dict = collections.defaultdict(list) + return default_graph.dependency_dict + + +@contextlib.contextmanager +def fn_device_dependency(name, device=""): + """Add control deps for name and device.""" + key = name + "_" + device + outs = [] + + def body(): + with tf.control_dependencies(fn_device_dependency_dict()[key]): + yield outs + assert outs + + deps = outs + if isinstance(outs[0], (list, tuple)): + assert len(outs) == 1 + deps = outs[0] + fn_device_dependency_dict()[key] = deps + + if device: + with tf.device(device): + return body() + else: + return body() + + +def underlying_variable_ref(t): + """Find the underlying variable ref. + + Traverses through Identity, ReadVariableOp, and Enter ops. + Stops when op type has Variable or VarHandle in name. + + Args: + t: a Tensor + + Returns: + a Tensor that is a variable ref, or None on error. + """ + while t.op.type in ["Identity", "ReadVariableOp", "Enter"]: + t = t.op.inputs[0] + + op_type = t.op.type + if "Variable" in op_type or "VarHandle" in op_type: + return t + else: + return None + + +def underlying_variable(t): + """Find the underlying tf.Variable object. + + Args: + t: a Tensor + + Returns: + tf.Variable. + """ + t = underlying_variable_ref(t) + assert t is not None + # make sure that the graph has a variable index and that it is up-to-date + if not hasattr(tf.get_default_graph(), "var_index"): + tf.get_default_graph().var_index = {} + var_index = tf.get_default_graph().var_index + for v in tf.global_variables()[len(var_index):]: + var_index[v.name] = v + return var_index[t.name] + + +def approximate_split(x, num_splits, axis=0): + """Split approximately equally into num_splits parts. + + Args: + x: a Tensor + num_splits: an integer + axis: an integer. + + Returns: + a list of num_splits Tensors. + """ + size = shape_list(x)[axis] + size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)] + return tf.split(x, size_splits, axis=axis) + + +class FactoredTensor(object): + """A concise factored representation of Tensor as two tensors. + + This class represents the tensor tf.matmul(a, b, transpose_b=True) + by storing the values of Tensors a and b. + + The reason for this is that the product may be too big to fully realize at + once, so it can be realized a part at a time. + + "a" may have extra leading dimensions, in which case they are flattened out + before computing the matrix product, then re-expanded afterwards. + """ + + def __init__(self, a, b): + self._a = a + self._b = b + + @property + def a(self): + return self._a + + @property + def b(self): + return self._b + + def to_tensor(self): + """Convert to Tensor.""" + a_shape = shape_list(self.a) + b_shape = shape_list(self.b) + inner_dim = b_shape[1] + result_dim = b_shape[0] + flat_a = tf.reshape(self.a, [-1, inner_dim]) + product = tf.matmul(flat_a, self.b, transpose_b=True) + product_shape = a_shape[:-1] + [result_dim] + product = tf.reshape(product, product_shape) + product.set_shape(self.a.get_shape().as_list()[:-1] + + [self.b.get_shape()[0]]) + return product + + +def _convert_factored_tensor_to_tensor(value, *args, **kwargs): + # call ops.convert_to_tensor to handle optional arguments appropriately + return ops.convert_to_tensor(value.to_tensor(), *args, **kwargs) + + +tf.register_tensor_conversion_function(FactoredTensor, + _convert_factored_tensor_to_tensor) + + +def smoothing_cross_entropy_factored_grad(op, dy): + """Gradient function for smoothing_cross_entropy_factored.""" + a = op.inputs[0] + b = op.inputs[1] + labels = op.inputs[2] + confidence = op.inputs[3] + num_splits = 16 + vocab_size = shape_list(b)[0] + labels = approximate_split(labels, num_splits) + a = approximate_split(a, num_splits) + dy = approximate_split(dy, num_splits) + b_grad = None + a_grad_parts = [] + deps = [] + for part in range(num_splits): + with tf.control_dependencies(deps): + logits = tf.matmul(a[part], b, transpose_b=True) + output_part = smoothing_cross_entropy(logits, labels[part], vocab_size, + confidence) + a_grad_part, b_grad_part = tf.gradients( + ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]]) + a_grad_parts.append(a_grad_part) + if part > 0: + b_grad += b_grad_part + else: + b_grad = b_grad_part + deps = [b_grad, a_grad_part] + a_grad = tf.concat(a_grad_parts, 0) + return a_grad, b_grad, None, None + + +@function.Defun( + noinline=True, + python_grad_func=smoothing_cross_entropy_factored_grad, + compiled=True, + separate_compiled_gradients=True) +def smoothing_cross_entropy_factored(a, b, labels, confidence): + """Memory-efficient computation of smoothing cross-entropy. + + Avoids realizing the entire logits matrix at once. + + Args: + a: a Tensor with shape [batch, inner_dim] + b: a Tensor with shape [vocab_size, inner_dim] + labels: an integer Tensor with shape [batch] + confidence: a float + + Returns: + A Tensor with shape [batch] + """ + num_splits = 16 + vocab_size = shape_list(b)[0] + labels = approximate_split(labels, num_splits) + a = approximate_split(a, num_splits) + parts = [] + for part in range(num_splits): + with tf.control_dependencies(parts[-1:]): + logits = tf.matmul(a[part], b, transpose_b=True) + parts.append( + smoothing_cross_entropy(logits, labels[part], vocab_size, confidence)) + return tf.concat(parts, 0) + + +def padded_cross_entropy_factored(factored_logits, + labels, + label_smoothing, + weights_fn=weights_nonzero, + reduce_sum=True): + """Memory-efficient computation of smoothing cross-entropy. + + Avoids realizing the entire logits matrix at once. + + Args: + factored_logits: a `FactoredTensor` representing a Tensor + with shape `[batch, timesteps, vocab_size]`. + labels: an integer `Tensor` with shape `[batch, timesteps]`. + label_smoothing: a floating point `Scalar`. + weights_fn: A function from labels to weights. + reduce_sum: a Boolean, whether to sum at the end or not. + + Returns: + loss_numerator: a `Scalar`. Sum of losses. + loss_denominator: a `Scalar. The number of non-padding target tokens. + """ + a = factored_logits.a + b = factored_logits.b + confidence = 1.0 - label_smoothing + with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]): + labels_flat = tf.reshape(labels, [-1]) + a_flat = tf.reshape(a, [-1, shape_list(b)[1]]) + xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat, + tf.convert_to_tensor(confidence)) + xent = tf.reshape(xent, shape_list(labels)) + weights = weights_fn(labels) + if not reduce_sum: + return xent * weights, weights + return tf.reduce_sum(xent * weights), tf.reduce_sum(weights) + + +def fn_with_custom_grad(grad_fn, use_global_vars=False): + """Decorator to create a subgraph with a custom gradient function. + + The subgraph created by the decorated function is NOT put in a Defun and so + does not suffer from the limitations of the Defun (all subgraph ops on the + same device, no summaries). + + Args: + grad_fn: function with signature + (inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars), + all of which are lists of Tensors. + use_global_vars: if True, variables will be the global variables created. + If False, will be the trainable variables. + + Returns: + Decorator for function such that the gradient is defined by grad_fn. + """ + + def dec(fn): + + @functools.wraps(fn) + def wrapped(*args): + return _fn_with_custom_grad( + fn, args, grad_fn, use_global_vars=use_global_vars) + + return wrapped + + return dec + + +def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False): + """Create a subgraph with a custom gradient. + + Args: + fn: function that takes inputs as arguments and produces 1 or more Tensors. + inputs: list, will be passed as fn(*inputs). + grad_fn: function with signature + (inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars), + all of which are lists of Tensors. + use_global_vars: if True, variables will be the global variables created. + If False, will be the trainable variables. + + Returns: + fn(*inputs) + """ + vs = tf.get_variable_scope() + get_vars_fn = ( + vs.global_variables if use_global_vars else vs.trainable_variables) + len_before_vars = len(get_vars_fn()) + inputs = list(inputs) + outputs = fn(*inputs) + train_vars = get_vars_fn()[len_before_vars:] + + if grad_fn is None: + return outputs + + if not isinstance(outputs, (tuple, list)): + outputs = [outputs] + outputs = list(outputs) + + defun_inputs = [inputs, train_vars, outputs] + + def custom_grad_fn(op, *dys): + """Custom grad fn applying grad_fn for identity Defun.""" + fn_inputs, fn_vars, fn_outputs = contrib.framework().nest.pack_sequence_as( + defun_inputs, list(op.inputs)) + dys = list(dys) + assert len(fn_outputs) == len(outputs) + assert len(fn_outputs) == len(dys) + + grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys) + grad_outputs = [None] * len(fn_outputs) + return tuple(grad_inputs + grad_vars + grad_outputs) + + # The Defun takes as input the original inputs, the trainable variables + # created in fn, and the outputs. In the forward it passes through the + # outputs. In the backwards, it produces gradients for the original inputs + # and the trainable variables. + in_types = [t.dtype for t in inputs] + out_types = [t.dtype for t in outputs] + var_types = [t.dtype for t in train_vars] + + @function.Defun( + *(in_types + var_types + out_types), + func_name="identity_custom_grad%d" % ops.uid(), + python_grad_func=custom_grad_fn, + shape_func=lambda _: [t.get_shape() for t in outputs]) + def identity(*args): + _, _, outs = contrib.framework().nest.pack_sequence_as(defun_inputs, args) + return tuple([tf.identity(t) for t in outs]) + + flat_inputs = contrib.framework().nest.flatten(defun_inputs) + id_out = identity(*flat_inputs) + return id_out + + +_function_cache = {} + + +def conv_hidden_relu_memory_efficient(x, + filter_size, + epsilon=1e-6, + forget=True, + test_vars=None, + name=None): + """LayerNorm, Conv, ReLU, Conv. + + All convolutions have kernel size 1. + + returns conv(relu(conv(layer_norm(x)))) + + Args: + x: input Tensor with shape [batch, length, io_size] + filter_size: an integer - size of the hidden layer. + epsilon: a float (for layer norm) + forget: a boolean - forget forwards activations and recompute on backprop + test_vars: optional tuple of variables for testing purposes + name: an optional string + + Returns: + a Tensor with shape [batch, length, io_size] + """ + io_size = x.get_shape().as_list()[-1] + + def forward_internal(x, f1, f2, scale, bias): + """Forward function.""" + # split batch-wise to avoid exhausting memory in cast the batch is large + # and the hidden layer is large. + num_splits = 4 + x_flat = tf.reshape(x, [-1, 1, shape_list(x)[2]]) + xs = approximate_split(x_flat, num_splits) + ys = [] + for i in range(num_splits): + with tf.control_dependencies(ys[-1:]): + n = layer_norm_compute(xs[i], epsilon, scale, bias) + y = tf.nn.conv1d(n, f1, 1, "SAME") + y = tf.nn.relu(y) + y = tf.nn.conv1d(y, f2, 1, "SAME") + ys.append(y) + y = tf.concat(ys, 0) + y = tf.reshape(y, shape_list(x)) + return y + + key = ("conv_hidden_relu_memory_efficient %s" % epsilon) + if not forget: + forward_fn = forward_internal + elif key in _function_cache: + forward_fn = _function_cache[key] + else: + + @function.Defun(compiled=True) + def grad_fn(x, f1, f2, scale, bias, dy): + """Gradient for efficiency.""" + with tf.control_dependencies([dy]): + num_splits = 4 + x_shape = shape_list(x) + flat_shape = [-1, 1, x_shape[2]] + x = tf.reshape(x, flat_shape) + dy = tf.reshape(dy, flat_shape) + xs = approximate_split(x, num_splits) + dys = approximate_split(dy, num_splits) + dxs = [] + df1 = 0 + df2 = 0 + dscale = 0 + dbias = 0 + deps = [] + for i in range(num_splits): + with tf.control_dependencies(deps): + n = layer_norm_compute(xs[i], epsilon, scale, bias) + y = tf.nn.conv1d(n, f1, 1, "SAME") + y = tf.nn.relu(y) + y = tf.nn.conv1d(y, f2, 1, "SAME") + dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients( + ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]]) + df1 += pdf1 + df2 += pdf2 + dscale += pdscale + dbias += pdbias + dxs.append(dxi) + deps = [dxi, df1, df2, dscale, dbias] + with tf.control_dependencies(deps): + dx = tf.concat(dxs, 0) + dx = tf.reshape(dx, x_shape) + return dx, df1, df2, dscale, dbias + + @function.Defun( + grad_func=grad_fn, compiled=True, separate_compiled_gradients=True) + def forward_fn(x, f1, f2, scale, bias): + return forward_internal(x, f1, f2, scale, bias) + + with tf.variable_scope(name, default_name="ffn2", values=[x]): + # TODO(noam): it would be nice to save memory by casting x to float16 + # here, but this causes problems with the gradients. Figure out if there + # is a way to leave the gradients as float32. + if test_vars is not None: + f1, f2, scale, bias = list(test_vars) + else: + f1 = tf.get_variable("f1", [1, io_size, filter_size]) + f2 = tf.get_variable("f2", [1, filter_size, io_size]) + scale, bias = layer_norm_vars(io_size) + if forget: + y = forward_fn(x, f1, f2, scale, bias) + else: + y = forward_internal(x, f1, f2, scale, bias) + y.set_shape(x.get_shape()) + return y + + +def shape_list(x): + """Return list of dims, statically where possible.""" + x = tf.convert_to_tensor(x) + + # If unknown rank, return dynamic shape + if x.get_shape().dims is None: + return tf.shape(x) + + static = x.get_shape().as_list() + shape = tf.shape(x) + + ret = [] + for i, dim in enumerate(static): + if dim is None: + dim = shape[i] + ret.append(dim) + return ret + + +def list_product(els): + prod = els[0] + for el in els[1:]: + prod *= el + return prod + + +def sample_with_temperature(logits, temperature, sampling_keep_top_k=-1): + """Either argmax or random sampling. + + Args: + logits: a Tensor. + temperature: a float 0.0=argmax 1.0=random + sampling_keep_top_k: If not -1, only sample from the top k logits. + Returns: + a Tensor with one fewer dimension than logits. + """ + if temperature == 0.0: + # TF argmax doesn't handle >5 dimensions, so we reshape here. + logits_shape = shape_list(logits) + argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1) + return tf.reshape(argmax, logits_shape[:-1]) + else: + tf.debugging.assert_greater(temperature, 0.0) + + if sampling_keep_top_k != -1: + if sampling_keep_top_k <= 0: + raise ValueError("sampling_keep_top_k must either be -1 or positive.") + + vocab_size = shape_list(logits)[1] + + k_largest = contrib.nn().nth_element( + logits, n=sampling_keep_top_k, reverse=True) + k_largest = tf.tile(tf.reshape(k_largest, [-1, 1]), [1, vocab_size]) + + # Force every position that is not in the top k to have probability near + # 0 by setting the logit to be very negative. + logits = tf.where(tf.less_equal(logits, k_largest), + tf.ones_like(logits)*-1e6, logits) + + reshaped_logits = ( + tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature) + choices = tf.multinomial(reshaped_logits, 1) + choices = tf.reshape(choices, + shape_list(logits)[:logits.get_shape().ndims - 1]) + return choices + + +def _select_top_k(logits, top_k): + """Replaces logits, expect the top k highest values, with small number (-1e6). + + If k is -1 don't replace anything. + + Args: + logits: A `Tensor` of shape [batch_size, ..., vocab_size] + top_k: vector of batch size. + + Returns: + A `Tensor` with same shape as logits. + """ + vocab_size = logits.shape[-1] + + top_k = tf.where( + tf.not_equal(top_k, -1), top_k, + tf.ones_like(top_k) * vocab_size) + + return tf.where( + tf.argsort(logits) < tf.reshape(top_k, [-1] + [1] * + (len(logits.shape) - 1)), logits, + tf.ones_like(logits) * -1e6) + + +def sample_temperature_per_example(logits, temperature, sampling_keep_top_k=-1): + """Either random sampling with different temperature per example. + + Args: + logits: a Tensor. + temperature: a float vector of same size as logits. + sampling_keep_top_k: If not -1, only sample from the top k logits. + Returns: + a Tensor with one fewer dimension than logits. + """ + logits = _select_top_k(logits, sampling_keep_top_k) + logits /= tf.reshape(temperature, [-1] + [1] * (len(logits.shape) - 1)) + reshaped_logits = tf.reshape(logits, [-1, shape_list(logits)[-1]]) + choices = tf.multinomial(reshaped_logits, 1) + choices = tf.reshape(choices, + shape_list(logits)[:logits.get_shape().ndims - 1]) + return choices + + +def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None): + """Matrix band part of ones. + + Args: + rows: int determining number of rows in output + cols: int + num_lower: int, maximum distance backward. Negative values indicate + unlimited. + num_upper: int, maximum distance forward. Negative values indicate + unlimited. + out_shape: shape to reshape output by. + + Returns: + Tensor of size rows * cols reshaped into shape out_shape. + """ + if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]): + # Needed info is constant, so we construct in numpy + if num_lower < 0: + num_lower = rows - 1 + if num_upper < 0: + num_upper = cols - 1 + lower_mask = np.tri(cols, rows, num_lower).T + upper_mask = np.tri(rows, cols, num_upper) + band = np.ones((rows, cols)) * lower_mask * upper_mask + if out_shape: + band = band.reshape(out_shape) + band = tf.constant(band, tf.float32) + else: + band = tf.linalg.band_part( + tf.ones([rows, cols]), tf.cast(num_lower, tf.int64), + tf.cast(num_upper, tf.int64)) + if out_shape: + band = tf.reshape(band, out_shape) + + return band + + +def reshape_like_all_dims(a, b): + """Reshapes a to match the shape of b.""" + ret = tf.reshape(a, tf.shape(b)) + if not tf.executing_eagerly(): + ret.set_shape(b.get_shape()) + return ret + + +def recompute_grad(fn): + """Decorator that recomputes the function on the backwards pass. + + Args: + fn: a function that takes Tensors (all as positional arguments) and returns + a tuple of Tensors. + + Returns: + A wrapped fn that is identical to fn when called, but its activations will + be discarded and recomputed on the backwards pass (i.e. on a call to + tf.gradients). + """ + + @functools.wraps(fn) + def wrapped(*args): + return _recompute_grad(fn, args) + + return wrapped + + +def _recompute_grad(fn, args): + """See recompute_grad.""" + + cached_vs = [] + cached_arg_scope = [] + + def grad_fn(inputs, variables, outputs, output_grads): + """Recompute outputs for gradient computation.""" + del outputs + variables = [underlying_variable_ref(v) for v in variables] + # Recompute outputs + with tf.control_dependencies(output_grads): + with contrib.framework().arg_scope(cached_arg_scope[0]): + with tf.variable_scope(cached_vs[0], reuse=True): + outputs = fn(*inputs) + + if not isinstance(outputs, (list, tuple)): + outputs = [outputs] + outputs = list(outputs) + grads = tf.gradients(outputs, inputs + variables, output_grads) + grad_inputs = grads[:len(inputs)] + grad_vars = grads[len(inputs):] + # TODO(rsepassi): Make fn_with_custom_grad work with bfloat16. + # If the input gradients are bfloat16, it's assumed the variables are + # bfloat16. This is a hack to ensure that grad_vars are the right type. + if grad_inputs[0].dtype == tf.bfloat16: + grad_vars = [tf.cast(grad_var, tf.bfloat16) for grad_var in grad_vars] + return grad_inputs, grad_vars + + @fn_with_custom_grad(grad_fn) + def fn_with_recompute(*args): + cached_vs.append(tf.get_variable_scope()) + cached_arg_scope.append(contrib.framework().current_arg_scope()) + return fn(*args) + + return fn_with_recompute(*args) + + +def dense(x, units, **kwargs): + """Identical to layers.dense.""" + layer_collection = kwargs.pop("layer_collection", None) + activations = layers().Dense(units, **kwargs)(x) + if layer_collection: + # We need to find the layer parameters using scope name for the layer, so + # check that the layer is named. Otherwise parameters for different layers + # may get mixed up. + layer_name = tf.get_variable_scope().name + if (not layer_name) or ("name" not in kwargs): + raise ValueError( + "Variable scope and layer name cannot be empty. Actual: " + "variable_scope={}, layer name={}".format( + layer_name, kwargs.get("name", None))) + + layer_name += "/" + kwargs["name"] + layer_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, + scope=layer_name) + assert layer_params + if len(layer_params) == 1: + layer_params = layer_params[0] + + tf.logging.info( + "Registering dense layer to collection for tensor: {}".format( + layer_params)) + + x_shape = x.shape.as_list() + if len(x_shape) == 3: + # Handle [batch, time, depth] inputs by folding batch and time into + # one dimension: reshaping inputs to [batchxtime, depth]. + x_2d = tf.reshape(x, [-1, x_shape[2]]) + activations_shape = activations.shape.as_list() + activations_2d = tf.reshape(activations, [-1, activations_shape[2]]) + layer_collection.register_fully_connected_multi( + layer_params, x_2d, activations_2d, num_uses=x_shape[1]) + activations = tf.reshape(activations_2d, activations_shape) + else: + layer_collection.register_fully_connected(layer_params, x, activations) + return activations + + +def batch_dense(inputs, + units, + activation=None, + kernel_initializer=None, + reuse=None, + name=None): + """Multiply a batch of input matrices by a batch of parameter matrices. + + Each input matrix is multiplied by the corresponding parameter matrix. + + This is useful in a mixture-of-experts where the batch represents different + experts with different inputs. + + Args: + inputs: a Tensor with shape [batch, length, input_units] + units: an integer + activation: an optional activation function to apply to the output + kernel_initializer: an optional initializer + reuse: whether to reuse the varaible scope + name: an optional string + + Returns: + a Tensor with shape [batch, length, units] + + Raises: + ValueError: if the "batch" or "input_units" dimensions of inputs are not + statically known. + """ + inputs_shape = shape_list(inputs) + if len(inputs_shape) != 3: + raise ValueError("inputs must have 3 dimensions") + batch = inputs_shape[0] + input_units = inputs_shape[2] + if not isinstance(batch, int) or not isinstance(input_units, int): + raise ValueError("inputs must have static dimensions 0 and 2") + with tf.variable_scope( + name, + default_name="batch_dense", + values=[inputs], + reuse=reuse, + dtype=inputs.dtype): + if kernel_initializer is None: + kernel_initializer = tf.random_normal_initializer( + stddev=input_units**-0.5) + w = tf.get_variable( + "w", [batch, input_units, units], + initializer=kernel_initializer, + dtype=inputs.dtype) + y = tf.matmul(inputs, w) + if activation is not None: + y = activation(y) + return y + + +def mix(x1, + x2, + steps, + is_training, + min_prob=0.0, + max_prob=1.0, + mode="lin", + simple=False, + broadcast_last=False): + """Mix starting with x2, mixing mixing, going towards x1.""" + with tf.name_scope("mix"): + if not is_training: + if max_prob >= 1.0: + return x1 + alpha_shape = shape_list(x1) + if broadcast_last: + alpha_shape = alpha_shape[:-1] + [1] + alpha = tf.random_uniform(alpha_shape) + alpha = to_float(tf.less(alpha, max_prob)) + return alpha * x1 + (1.0 - alpha) * x2 + + def get_res(): + """Create the result. + + Separate function to speed it up later (see below). + + Returns: + Tensor of mixed inputs. + """ + if mode == "lin": + alpha_p = inverse_lin_decay(steps) + else: + alpha_p = inverse_exp_decay(steps) + alpha_p = alpha_p * (max_prob - min_prob) + min_prob + if simple: + return alpha_p * x1 + (1.0 - alpha_p) * x2 + alpha_shape = shape_list(x1) + if broadcast_last: + alpha_shape = alpha_shape[:-1] + [1] + alpha = tf.random_uniform(alpha_shape) + alpha = to_float(tf.less(alpha, alpha_p)) + return alpha * x1 + (1.0 - alpha) * x2 + + if max_prob < 1.0: + return get_res() + + # Prevent sampling after steps is passed to speed it up. + if is_xla_compiled(): + return get_res() + else: + cur_step = tf.train.get_global_step() + if cur_step is None: + return x1 # Step not available, probably eval mode, don't mix. + return tf.cond(tf.less(cur_step, steps), get_res, lambda: x1) + + +def brelu(x): + """Bipolar ReLU as in https://arxiv.org/abs/1709.04054.""" + x_shape = shape_list(x) + x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1) + y1 = tf.nn.relu(x1) + y2 = -tf.nn.relu(-x2) + return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape) + + +def belu(x): + """Bipolar ELU as in https://arxiv.org/abs/1709.04054.""" + x_shape = shape_list(x) + x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1) + y1 = tf.nn.elu(x1) + y2 = -tf.nn.elu(-x2) + return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape) + + +def gelu(x): + """Gaussian Error Linear Unit. + + This is a smoother version of the RELU. + Original paper: https://arxiv.org/abs/1606.08415 + + Args: + x: float Tensor to perform activation. + + Returns: + x with the GELU activation applied. + """ + cdf = 0.5 * (1.0 + tf.tanh( + (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) + return x * cdf + + +def nac(x, depth, name=None, reuse=None): + """NAC as in https://arxiv.org/abs/1808.00508.""" + with tf.variable_scope(name, default_name="nac", values=[x], reuse=reuse): + x_shape = shape_list(x) + w = tf.get_variable("w", [x_shape[-1], depth]) + m = tf.get_variable("m", [x_shape[-1], depth]) + w = tf.tanh(w) * tf.nn.sigmoid(m) + x_flat = tf.reshape(x, [-1, x_shape[-1]]) + res_flat = tf.matmul(x_flat, w) + return tf.reshape(res_flat, x_shape[:-1] + [depth]) + + +def nalu(x, depth, epsilon=1e-30, name=None, reuse=None): + """NALU as in https://arxiv.org/abs/1808.00508.""" + with tf.variable_scope(name, default_name="nalu", values=[x], reuse=reuse): + x_shape = shape_list(x) + x_flat = tf.reshape(x, [-1, x_shape[-1]]) + gw = tf.get_variable("w", [x_shape[-1], depth]) + g = tf.nn.sigmoid(tf.matmul(x_flat, gw)) + g = tf.reshape(g, x_shape[:-1] + [depth]) + a = nac(x, depth, name="nac_lin") + log_x = tf.log(tf.abs(x) + epsilon) + m = nac(log_x, depth, name="nac_log") + return g * a + (1 - g) * tf.exp(m) + + +def argmax_with_score(logits, axis=None): + """Argmax along with the value.""" + axis = axis or len(logits.get_shape()) - 1 + predictions = tf.argmax(logits, axis=axis) + + logits_shape = shape_list(logits) + prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1] + prefix_size = 1 + for d in prefix_shape: + prefix_size *= d + + # Flatten to extract scores + flat_logits = tf.reshape(logits, [prefix_size, vocab_size]) + flat_predictions = tf.reshape(predictions, [prefix_size]) + flat_indices = tf.stack( + [tf.range(tf.to_int64(prefix_size)), + tf.to_int64(flat_predictions)], + axis=1) + flat_scores = tf.gather_nd(flat_logits, flat_indices) + + # Unflatten + scores = tf.reshape(flat_scores, prefix_shape) + + return predictions, scores + + +def log_prob_from_logits(logits, reduce_axis=-1): + return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True) + + +def top_kth_iterative(x, k): + """Compute the k-th top element of x on the last axis iteratively. + + This assumes values in x are non-negative, rescale if needed. + It is often faster than tf.nn.top_k for small k, especially if k < 30. + Note: this does not support back-propagation, it stops gradients! + + Args: + x: a Tensor of non-negative numbers of type float. + k: a python integer. + + Returns: + a float tensor of the same shape as x but with 1 on the last axis + that contains the k-th largest number in x. + """ + # The iterative computation is as follows: + # + # cur_x = x + # for _ in range(k): + # top_x = maximum of elements of cur_x on the last axis + # cur_x = cur_x where cur_x < top_x and 0 everywhere else (top elements) + # + # We encode this computation in a TF graph using tf.foldl, so the inner + # part of the above loop is called "next_x" and tf.foldl does the loop. + def next_x(cur_x, _): + top_x = tf.reduce_max(cur_x, axis=-1, keep_dims=True) + return cur_x * to_float(cur_x < top_x) + # We only do k-1 steps of the loop and compute the final max separately. + fin_x = tf.foldl(next_x, tf.range(k - 1), initializer=tf.stop_gradient(x), + parallel_iterations=2, back_prop=False) + return tf.stop_gradient(tf.reduce_max(fin_x, axis=-1, keep_dims=True)) + + +def top_1_tpu(inputs): + """find max and argmax over the last dimension. + + Works well on TPU + + Args: + inputs: A tensor with shape [..., depth] + + Returns: + values: a Tensor with shape [...] + indices: a Tensor with shape [...] + """ + inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True) + mask = tf.to_int32(tf.equal(inputs_max, inputs)) + index = tf.range(tf.shape(inputs)[-1]) * mask + return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1) + + +def index_last_dim_with_indices(x, indices): + """Use indices to index into the last axis of x. + + This can be useful for recovering the actual probabilities of a sample from a + probability distribution. + + Args: + x: Tensor, n-d. + indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1) + dimensions of x. The values of indices will be used to index into the last + axis of x. + + Returns: + Tensor, (n-1)-d. + """ + assert len(x.shape) == len(indices.shape) + 1 + + x_shape = shape_list(x) + vocab_size = x_shape[-1] + + flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size]) + flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])]) + + idx = tf.stack( + [ + tf.range(tf.to_int64(shape_list(flat_indices)[0])), + tf.to_int64(flat_indices) + ], + axis=1) + flat_x_idx = tf.gather_nd(flat_x, idx) + + x_idx = tf.reshape(flat_x_idx, x_shape[:-1]) + + return x_idx + + +def should_generate_summaries(): + """Is this an appropriate context to generate summaries. + + Returns: + a boolean + """ + name_scope = contrib.framework().get_name_scope() + if name_scope and "while/" in name_scope: + # Summaries don't work well within tf.while_loop() + return False + if tf.get_variable_scope().reuse: + # Avoid generating separate summaries for different data shards + return False + return True + + +def reshape_like(a, b): + """Reshapes a to match the shape of b in all but the last dimension.""" + ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0)) + if not tf.executing_eagerly(): + ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:]) + return ret + + +def summarize_video(video, prefix, max_outputs=1): + """Summarize the video using image summaries starting with prefix.""" + video_shape = shape_list(video) + if len(video_shape) != 5: + raise ValueError("Assuming videos given as tensors in the format " + "[batch, time, height, width, channels] but got one " + "of shape: %s" % str(video_shape)) + if tf.executing_eagerly(): + return + if video.get_shape().as_list()[1] is None: + tf.summary.image( + "%s_last_frame" % prefix, + tf.cast(video[:, -1, :, :, :], tf.uint8), + max_outputs=max_outputs) + else: + for k in range(video_shape[1]): + tf.summary.image( + "%s_frame_%d" % (prefix, k), + tf.cast(video[:, k, :, :, :], tf.uint8), + max_outputs=max_outputs) + + +def cast_like(x, y): + """Cast x to y's dtype, if necessary.""" + x = tf.convert_to_tensor(x) + y = tf.convert_to_tensor(y) + + if x.dtype.base_dtype == y.dtype.base_dtype: + return x + + cast_x = tf.cast(x, y.dtype) + if cast_x.device != x.device: + x_name = "(eager Tensor)" + try: + x_name = x.name + except AttributeError: + pass + tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name, + x.device, cast_x.device) + return cast_x + + +def make_even_size(x): + """Pad x to be even-sized on axis 1 and 2, but only if necessary.""" + x_shape = x.get_shape().as_list() + assert len(x_shape) > 2, "Only 3+-dimensional tensors supported." + shape = [dim if dim is not None else -1 for dim in x_shape] + new_shape = x_shape # To make sure constant shapes remain constant. + if x_shape[1] is not None: + new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5)) + if x_shape[2] is not None: + new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5)) + if shape[1] % 2 == 0 and shape[2] % 2 == 0: + return x + if shape[1] % 2 == 0: + x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2) + x.set_shape(new_shape) + return x + if shape[2] % 2 == 0: + x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1) + x.set_shape(new_shape) + return x + x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1) + x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2) + x.set_shape(new_shape) + return x + + +def sliced_gan_loss(input1, + input2, + discriminator, + num_vecs, + do_random_vecs=True, + do_tanh=True, + return_logits=False): + """Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947. + + Puts input1 and input2 through the provided discriminator to get logits. + Then, computes num_vecs random projections of the logits, sorts them on + the batch dimension and returns the L2 loss between the sorted vectors. + See the above-mentioned paper for the reasoning behind it. + + Args: + input1: first discriminator inputs. + input2: second discriminator inputs. + discriminator: inputs -> logits function. + num_vecs: how many random vectors to use for projections. + do_random_vecs: whether to use random vectors or just tanh of the logits. + do_tanh: if true (default) we'll also just use tanh of the logits. + return_logits: Whether or not to return the logits. + + Returns: + The generator loss, i.e., the sliced approximation of the distance between + the projected distributions (warning: discriminator should maximize it). + """ + with tf.variable_scope("sliced_gan"): + with tf.variable_scope("discriminator"): + logits1 = discriminator(input1) + with tf.variable_scope("discriminator", reuse=True): + logits2 = discriminator(input2) + + if do_random_vecs: + random_vecs = tf.nn.l2_normalize( + tf.random_uniform([shape_list(logits1)[-1], num_vecs]), axis=0) + + def get_sorted_projections(x): + """Make projections of x and sort them on the batch dimension.""" + x = tf.reshape(x, [-1, shape_list(x)[-1]]) + batch_size = shape_list(x)[0] + if do_random_vecs and do_tanh: + n = tf.nn.l2_normalize(x, axis=1) + proj = tf.concat([tf.matmul(n, random_vecs), tf.tanh(n)], axis=1) + elif do_random_vecs: + n = tf.nn.l2_normalize(x, axis=1) + proj = tf.matmul(n, random_vecs) + else: + proj = tf.tanh(x) + proj = tf.transpose(proj, [1, 0]) # [num_vecs, batch] after this. + + if is_xla_compiled(): + proj_dtype = proj.dtype + proj = tf.cast(proj, tf.bfloat16) + + # Currently TPU only supports 1-D top_k calls. + map_fn = lambda x: tf.nn.top_k(x, k=batch_size, sorted=True)[0] + values = tf.map_fn(map_fn, proj) + + values = tf.cast(values, proj_dtype) + else: + values, _ = tf.nn.top_k(proj, k=batch_size, sorted=True) + + return values + + proj1 = get_sorted_projections(logits1) + proj2 = get_sorted_projections(logits2) + dist = tf.reduce_mean(tf.squared_difference(proj1, proj2)) + if return_logits: + return dist, logits1, logits2 + return dist + + +def lrelu(input_, leak=0.2, name="lrelu"): + return tf.maximum(input_, leak * input_, name=name) + + +def deep_discriminator(x, + batch_norm, + is_training, + filters=64, + filter_size=4, + stride=2, + output_size=1024): + """Discriminator architecture based on InfoGAN.""" + with tf.variable_scope( + "discriminator", initializer=tf.random_normal_initializer(stddev=0.02)): + batch_size, height, width = shape_list(x)[:3] # pylint: disable=unbalanced-tuple-unpacking + net = layers().Conv2D( + filters, filter_size, strides=stride, padding="SAME", name="conv1")(x) + net = lrelu(net) + net = layers().Conv2D( + 2 * filters, + filter_size, + strides=stride, + padding="SAME", + name="conv2")(net) + # [bs, h/4, w/4, 128] + if batch_norm: + net = layers().BatchNormalization( + training=is_training, momentum=0.999, name="d_bn2")(net) + net = lrelu(net) + size = height * width + x_shape = x.get_shape().as_list() + if x_shape[1] is None or x_shape[2] is None: + net = tf.reduce_mean(net, axis=[1, 2]) + else: + net = tf.reshape(net, [batch_size, size * 8]) + net = layers().Dense(output_size, name="d_fc3")(net) + if batch_norm: + net = layers().BatchNormalization( + training=is_training, momentum=0.999, name="d_bn3")(net) + net = lrelu(net) + return net + + +def instance_norm(x): + """Instance normalization layer.""" + with tf.variable_scope("instance_norm"): + epsilon = 1e-5 + mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) + scale = tf.get_variable( + "scale", [x.get_shape()[-1]], + initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02)) + offset = tf.get_variable( + "offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0)) + out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset + + return out + + +def general_conv(x, + num_filters=64, + filter_size=7, + stride=1, + stddev=0.02, + padding="VALID", + name="conv", + do_norm="instance", + do_relu=True, + relufactor=0): + """Generalized convolution layer.""" + with tf.variable_scope(name): + x = layers().Conv2D( + num_filters, + filter_size, + stride, + padding, + activation=None, + kernel_initializer=tf.truncated_normal_initializer(stddev=stddev), + bias_initializer=tf.constant_initializer(0.0))(x) + if do_norm == "layer": + x = layer_norm(x) + elif do_norm == "instance": + x = instance_norm(x) + + if do_relu: + if relufactor == 0: + x = tf.nn.relu(x, "relu") + else: + x = lrelu(x, leak=relufactor) + + return x + + +def patch_discriminator(x, filters=64, filter_size=5, n=4, + name="patch_discrim"): + """Patch descriminator.""" + with tf.variable_scope(name): + x_shape = shape_list(x) + spatial_dims = [x_shape[1] // 4, x_shape[2] // 4] + x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]]) + for i in range(n): + x = general_conv( + x=x, + num_filters=filters * 2**i, + filter_size=filter_size, + stride=2 if i != n - 1 else 1, + stddev=0.02, + padding="SAME", + name="c%d" % i, + do_norm="instance" if i != 0 else False, + do_relu=i != n - 1, + relufactor=0.2) + x = tf.reduce_mean(x, [1, 2]) + return x + + +def mean_with_attention(x, name, num_heads=4): + """Mean and attention to reduce spatial dimensions.""" + with tf.variable_scope(name): + shape = shape_list(x) + m = tf.reduce_mean(x, [1, 2]) + a = layers().Dense(num_heads, name="mean_attn")(x) + s = tf.reshape(a, [shape[0], -1, num_heads]) + s = tf.nn.softmax(s, axis=1) + s = tf.reshape(s, shape[:-1] + [1, num_heads]) + am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2]) + l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1) + return layers().Dense(2 * shape[-1], name="mean_attn_final")( + tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]])) + + +def single_discriminator(x, filters=128, kernel_size=8, + strides=4, pure_mean=False): + """A simple single-layer convolutional discriminator.""" + with tf.variable_scope("discriminator"): + net = layers().Conv2D( + filters, kernel_size, strides=strides, padding="SAME", name="conv1")(x) + if pure_mean: + net = tf.reduce_mean(net, [1, 2]) + else: + net = mean_with_attention(net, "mean_with_attention") + return net + + +def double_discriminator(x, filters1=128, filters2=None, + kernel_size=8, strides=4, pure_mean=False): + """A convolutional discriminator with 2 layers and concatenated output.""" + if filters2 is None: + filters2 = 4 * filters1 + with tf.variable_scope("discriminator"): + batch_size = shape_list(x)[0] + net = layers().Conv2D( + filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x) + if pure_mean: + net1 = tf.reduce_mean(net, [1, 2]) + else: + net1 = mean_with_attention(net, "mean_with_attention1") + tf.reshape(net, [batch_size, -1]) + net = tf.nn.relu(net) + net = layers().Conv2D( + filters2, kernel_size, strides=strides, padding="SAME", + name="conv2")(net) + if pure_mean: + net2 = tf.reduce_mean(net, [1, 2]) + else: + net2 = mean_with_attention(net, "mean_with_attention2") + return tf.concat([net1, net2], axis=-1) + + +def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR): + """Upscaling the image by a factor of f.""" + height, width = shape_list(inputs)[1:3] # pylint: disable=unbalanced-tuple-unpacking + return tf.image.resize_images(inputs, (height * f, width * f), method) + + +def tpu_safe_image_summary(image): + if is_xla_compiled(): + # We only support float32 images at the moment due to casting complications. + if image.dtype != tf.float32: + image = to_float(image) + else: + image = tf.cast(image, tf.uint8) + return image + + +# This has been (shamefully) copied from +# GitHub tensorflow/models/blob/master/research/slim/nets/cyclegan.py +# +# tensorflow/models cannot be pip installed, and even if it were we don't want +# to depend on all the models in it. +# +# Therefore copying and forgoing any more bugfixes into it is the most +# expedient way to use this function. +def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"): + """Upsamples the given inputs. + + Args: + net: A Tensor of size [batch_size, height, width, filters]. + num_outputs: The number of output filters. + stride: A list of 2 scalars or a 1x2 Tensor indicating the scale, + relative to the inputs, of the output dimensions. For example, if kernel + size is [2, 3], then the output height and width will be twice and three + times the input size. + method: The upsampling method: 'nn_upsample_conv', + 'bilinear_upsample_conv', or 'conv2d_transpose'. + + Returns: + A Tensor which was upsampled using the specified method. + + Raises: + ValueError: if `method` is not recognized. + """ + + with tf.variable_scope("upconv"): + net_shape = tf.shape(net) + height = net_shape[1] + width = net_shape[2] + + # Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a + # 3x3 "valid" convolution produce an output with the same dimension as the + # input. + spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]]) + + if method == "nn_upsample_conv": + net = tf.image.resize_nearest_neighbor( + net, [stride[0] * height, stride[1] * width]) + net = tf.pad(net, spatial_pad_1, "REFLECT") + net = layers().Conv2D( + num_outputs, (3, 3), activation=tf.nn.relu)(net) + elif method == "bilinear_upsample_conv": + net = tf.image.resize_bilinear(net, + [stride[0] * height, stride[1] * width]) + net = tf.pad(net, spatial_pad_1, "REFLECT") + net = layers().Conv2D( + num_outputs, (3, 3), activation=tf.nn.relu)(net) + elif method == "conv2d_transpose": + # This corrects 1 pixel offset for images with even width and height. + # conv2d is left aligned and conv2d_transpose is right aligned for even + # sized images (while doing "SAME" padding). + # Note: This doesn"t reflect actual model in paper. + net = layers().Conv2DTranspose( + num_outputs, (3, 3), strides=stride, activation=tf.nn.relu)(net) + net = net[:, 1:, 1:, :] + else: + raise ValueError("Unknown method: [%s]" % method) + + return net + + +def weight_targeting(w, k): + """Weight-level magnitude pruning.""" + k = tf.to_int32(k) + w_shape = shape_list(w) + size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) + w = tf.reshape(w, [size, w_shape[-1]]) + + transpose_w = tf.transpose(w) + thres = contrib.framework().sort(tf.abs(transpose_w), axis=1)[:, k] + mask = to_float(thres[None, :] >= tf.abs(w)) + + return tf.reshape(mask, w_shape) + + +def unit_targeting(w, k): + """Unit-level magnitude pruning.""" + k = tf.to_int32(k) + w_shape = shape_list(w) + size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) + w = tf.reshape(w, [size, w_shape[-1]]) + + norm = tf.norm(w, axis=0) + thres = contrib.framework().sort(norm, axis=0)[k] + mask = to_float(thres >= norm)[None, :] + mask = tf.tile(mask, [size, 1]) + + return tf.reshape(mask, w_shape) + + +def td_conv(inputs, + filters, + kernel_size, + targeting_count, + targeting_fn, + keep_prob, + is_training, + do_prune=True, + strides=(1, 1), + padding="valid", + data_format="channels_last", + dilation_rate=(1, 1), + activation=None, + use_bias=True, + kernel_initializer=None, + bias_initializer=tf.zeros_initializer(), + name=None, + reuse=None): + """Apply targeted dropout to the weights of a convolution.""" + with tf.variable_scope(name, default_name="td_conv", reuse=reuse): + nhwc = data_format == "channels_last" + in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1] + + kernel_shape = [kernel_size, kernel_size, in_dim, filters] + w = tf.get_variable( + "DW", shape=kernel_shape, initializer=kernel_initializer) + if use_bias: + b = tf.get_variable("b", shape=[filters], initializer=bias_initializer) + + if keep_prob < 1.0: + w = targeted_dropout( + w, + targeting_count, + keep_prob, + targeting_fn, + is_training, + do_prune=do_prune) + + if isinstance(strides, int): + strides = [strides, strides] + if isinstance(dilation_rate, int): + dilation_rate = [dilation_rate, dilation_rate] + + if nhwc: + strides = [1, strides[0], strides[1], 1] + dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1] + else: + strides = [1, 1, strides[0], strides[1]] + dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]] + + y = tf.nn.conv2d( + inputs, + w, + strides, + padding, + data_format="NHWC" if nhwc else "NCHW", + dilations=dilation_rate, + name=None) + + if use_bias: + y += b + + if activation: + y = activation(y) + + return y + + +def targeted_dropout(inputs, + k, + keep_prob, + targeting_fn, + is_training, + do_prune=False): + """Applies targeted dropout. + + Applies dropout at a rate of `1 - keep_prob` to only those elements of + `inputs` marked by `targeting_fn`. See below and paper for more detail: + + "Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang, + Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton. + + Args: + inputs: Tensor, inputs to apply targeted dropout to. + k: Scalar Tensor or python scalar, sets the number of elements to target in + `inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with + second argument of `targeting_fn`. + keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument. + targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a + boolean mask the same shape as `inputs` where True indicates an element + will be dropped, and False not. + is_training: bool, indicates whether currently training. + do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)` + elements of `inputs` expected to be dropped each forwards pass. + + Returns: + Tensor, same shape and dtype as `inputs`. + """ + if not is_training and do_prune: + k = tf.round(to_float(k) * to_float(1. - keep_prob)) + + mask = targeting_fn(inputs, k) + mask = tf.cast(mask, inputs.dtype) + + if is_training: + return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask + elif do_prune: + return inputs * (1 - mask) + else: + return inputs + + +def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0): + """KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). + + Args: + mu: mu parameter of the distribution. + log_var: log(var) parameter of the distribution. + mu_p: optional mu from a learned prior distribution + log_var_p: optional log(var) from a learned prior distribution + Returns: + the KL loss. + """ + + batch_size = shape_list(mu)[0] + prior_distribution = tfp.distributions.Normal( + mu_p, tf.exp(tf.multiply(0.5, log_var_p))) + posterior_distribution = tfp.distributions.Normal( + mu, tf.exp(tf.multiply(0.5, log_var))) + kld = tfp.distributions.kl_divergence(posterior_distribution, + prior_distribution) + return tf.reduce_sum(kld) / to_float(batch_size) + + +def sparse_equals_constant(constant, tensor): + return tf.SparseTensor( + indices=tensor.indices, + dense_shape=tensor.dense_shape, + values=tf.equal(tensor.values, constant)) + + +def sparse_expand_dims(tensor, current_num_dims, axis=0): + if axis == -1: + axis = current_num_dims + + new_col = tf.zeros([tf.shape(tensor.indices)[0]], dtype=tf.int64) + cols = tf.unstack(tensor.indices, axis=1, num=current_num_dims) + shape = tf.unstack(tensor.dense_shape, num=current_num_dims) + new_indices = tf.stack(cols[:axis] + [new_col] + cols[axis:], axis=1) + return tf.SparseTensor( + indices=new_indices, + values=tensor.values, + dense_shape=tf.stack(shape[:axis] + [1] + shape[axis:])) + + +def sparse_add_constant(constant, tensor): + return tf.SparseTensor( + indices=tensor.indices, + values=constant + tensor.values, + dense_shape=tensor.dense_shape) + + +def sparse_eye(size): + indices = tf.cast(tf.stack([tf.range(size), tf.range(size)]), tf.int64) + values = tf.ones(size) + dense_shape = [tf.cast(size, tf.int64), tf.cast(size, tf.int64)] + + return tf.SparseTensor( + indices=indices, values=values, dense_shape=dense_shape) + + +# modification from https://github.com/tensorflow/tensorflow/pull/21276 +# without special initialization for g +class WeightNorm(tf.keras.layers.Wrapper): + """Decouple weight magnitude and direction. + + This wrapper reparameterizes a layer by decoupling the weight's + magnitude and direction. This speeds up convergence by improving the + conditioning of the optimization problem. + + Weight Normalization: A Simple Reparameterization to Accelerate + Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868 + Tim Salimans, Diederik P. Kingma (2016) + + WeightNorm wrapper works for keras and tf layers. + + ```python + net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'), + input_shape=(32, 32, 3), data_init=True)(x) + net = WeightNorm(tf.keras.layers.Conv2D(16, 5, activation='relu'), + data_init=True) + net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'), + data_init=True)(net) + net = WeightNorm(tf.keras.layers.Dense(n_classes), + data_init=True)(net) + ``` + + Arguments: + layer: a layer instance. + data_init: If `True` use data dependent variable initialization + + Raises: + ValueError: If not initialized with a `Layer` instance. + ValueError: If `Layer` does not contain a `kernel` of weights + NotImplementedError: If `data_init` is True and running graph execution + """ + + def __init__(self, layer, data_init=False, **kwargs): + if not isinstance(layer, tf.keras.layers.Layer): + raise ValueError( + "Please initialize `WeightNorm` layer with a " + "`Layer` instance. You passed: {input}".format(input=layer)) + + super(WeightNorm, self).__init__(layer, **kwargs) + self._track_trackable(layer, name="layer") + + def _compute_weights(self): + """Generate weights with normalization.""" + with tf.variable_scope("compute_weights"): + self.layer.kernel = tf.nn.l2_normalize( + self.layer.v, axis=self.norm_axes) * self.layer.g + + def _init_norm(self, weights): + """Set the norm of the weight vector.""" + with tf.variable_scope("init_norm"): + flat = tf.reshape(weights, [-1, self.layer_depth]) + return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,)) + + def _data_dep_init(self, inputs): + """Data dependent initialization for eager execution.""" + + with tf.variable_scope("data_dep_init"): + # Generate data dependent init values + activation = self.layer.activation + self.layer.activation = None + x_init = self.layer.call(inputs) + m_init, v_init = tf.moments(x_init, self.norm_axes) + scale_init = 1. / tf.sqrt(v_init + 1e-10) + + # Assign data dependent init values + self.layer.g = self.layer.g * scale_init + self.layer.bias = (-m_init * scale_init) + self.layer.activation = activation + self.initialized = True + + def build(self, input_shape=None): + """Build `Layer`.""" + input_shape = tf.TensorShape(input_shape).as_list() + self.input_spec = layers().InputSpec(shape=input_shape) + + if not self.layer.built: + self.layer.build(input_shape) + self.layer.built = False + + if not hasattr(self.layer, "kernel"): + raise ValueError("`WeightNorm` must wrap a layer that" + " contains a `kernel` for weights") + + # The kernel's filter or unit dimension is -1 + self.layer_depth = int(self.layer.kernel.shape[-1]) + self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1)) + + self.layer.v = self.layer.kernel + self.layer.g = self.layer.add_variable( + name="g", + shape=(self.layer_depth,), + initializer=tf.ones_initializer, + dtype=self.layer.kernel.dtype, + trainable=True) + + # with ops.control_dependencies([self.layer.g.assign( + # self._init_norm(self.layer.v))]): + # self._compute_weights() + self._compute_weights() + + self.layer.built = True + + super(WeightNorm, self).build() + self.built = True + + def call(self, inputs): + """Call `Layer`.""" + # if context.executing_eagerly(): + # if not self.initialized: + # self._data_dep_init(inputs) + self._compute_weights() # Recompute weights for each forward pass + + output = self.layer.call(inputs) + return output + + def compute_output_shape(self, input_shape): + return tf.TensorShape( + self.layer.compute_output_shape(input_shape).as_list()) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/modalities.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/modalities.py new file mode 100644 index 0000000000000000000000000000000000000000..f5444f1b16c214e1df951638591598c855610e96 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/modalities.py @@ -0,0 +1,302 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Modalities, which specify a feature's domain. + +T2TModel applies a default transformation to each feature according to its +modality. Override them by specifying a model's +hparams.{bottom,loss,top,weights_fn}. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range # pylint: disable=redefined-builtin + +from TensorFlow.nlp.transformer.layers import common_attention +from TensorFlow.nlp.transformer.layers import common_layers + +import tensorflow.compat.v1 as tf +import tensorflow_probability as tfp + + +class ModalityType(object): + """Types of modalities.""" + + IDENTITY = "identity" # identity top and bottom + IDENTITY_SYMBOL = "identity_symbol" # symbol with identity top and bottom + SYMBOL = "symbol" + + + @staticmethod + def get_choices(): + return [ + ModalityType.IDENTITY, + ModalityType.IDENTITY_SYMBOL, + ModalityType.SYMBOL, + ] + + +def class_label_targets_bottom(x, model_hparams, vocab_size): + with tf.variable_scope("class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size)): + return tf.zeros([common_layers.shape_list(x)[0], + 1, + 1, + model_hparams.hidden_size]) + + +def identity_bottom(x, model_hparams, vocab_size): + del model_hparams, vocab_size # unused arg + return tf.cast(x, tf.float32) + + +def make_targets_bottom(bottom): + def targets_bottom(x, model_hparams, vocab_size): + with tf.variable_scope("targets_bottom"): + return bottom(x, model_hparams, vocab_size) + return targets_bottom + + +def real_bottom(x, model_hparams, vocab_size): + del vocab_size # unused arg + with tf.variable_scope("real"): + return tf.layers.dense( + tf.cast(x, tf.float32), model_hparams.hidden_size, name="bottom") + + +def get_weights(model_hparams, vocab_size, hidden_dim=None): + """Create or get concatenated embedding or softmax variable. + + Args: + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size + + Returns: + a list of num_shards Tensors. + """ + if hidden_dim is None: + hidden_dim = model_hparams.hidden_size + num_shards = model_hparams.symbol_modality_num_shards + shards = [] + for i in range(num_shards): + shard_size = (vocab_size // num_shards) + ( + 1 if i < vocab_size % num_shards else 0) + var_name = "weights_%d" % i + shards.append( + tf.get_variable( + var_name, [shard_size, hidden_dim], + initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5))) + if num_shards == 1: + ret = shards[0] + else: + ret = tf.concat(shards, 0) + # Convert ret to tensor. + if not tf.executing_eagerly(): + ret = common_layers.convert_gradient_to_tensor(ret) + return ret + + +def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse): + """Bottom transformation for symbols.""" + with tf.variable_scope(name, reuse=reuse): + # Ensure the inputs are 3-D + if len(x.get_shape()) == 4: + x = tf.squeeze(x, axis=3) + while len(x.get_shape()) < 3: + x = tf.expand_dims(x, axis=-1) + + var = get_weights(model_hparams, vocab_size) + x = common_layers.dropout_no_scaling( + x, 1.0 - model_hparams.symbol_dropout) + ret = common_layers.gather(var, x) + if model_hparams.multiply_embedding_mode == "sqrt_depth": + ret *= model_hparams.hidden_size**0.5 + ret *= tf.expand_dims( + common_layers.cast_like(tf.not_equal(x, 0), ret), -1) + return ret + + +def symbol_bottom(x, model_hparams, vocab_size): + if (model_hparams.shared_embedding_and_softmax_weights or + model_hparams.get("shared_embedding")): + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "shared", reuse=None) + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "input_emb", reuse=None) + + +def symbol_targets_bottom(x, model_hparams, vocab_size): + if (model_hparams.shared_embedding_and_softmax_weights or + model_hparams.get("shared_embedding")): + try: + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "shared", reuse=True) + except ValueError: + # perhaps there were no inputs, and this is a new variable. + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "shared", reuse=None) + else: + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "target_emb", reuse=None) + + +# Loss transformations, applied to target features + + +def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + """Compute loss numerator and denominator for one shard of output.""" + del vocab_size # unused arg + logits = top_out + logits = common_attention.maybe_upcast(logits, hparams=model_hparams) + cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0) + + return common_layers.padded_cross_entropy( + logits, + targets, + model_hparams.label_smoothing, + cutoff=cutoff, + weights_fn=weights_fn) + + +# Top transformations, applied to target features + + +def is_pointwise(func): + """Decorator for whether the function is pointwise. + + An example of a pointwise function is a linear layer followed by + a softmax. Given a tensor [batch, length, height, depth] it operates + only on the last axis, on every point in [batch, length, height] fully + independently. In contrast, a classifier that first averages over length + and height is not pointwise, as it depends on the whole field. It is useful + to know if top functions are pointwise to speed up decoding in certain models. + + Args: + func: Function to decorate. + + Returns: + Original function with an attribute pointwise set to True. + """ + func.pointwise = True + return func + + +def identity_top(body_output, targets, model_hparams, vocab_size): + del targets, model_hparams, vocab_size # unused arg + return body_output + + +@is_pointwise +def symbol_top(body_output, targets, model_hparams, vocab_size): + """Generate logits. + + Args: + body_output: A Tensor with shape + [batch, p0, p1, model_hparams.hidden_size]. + targets: Unused. + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + + Returns: + logits: A Tensor with shape [batch, p0, p1, ?, vocab_size]. + """ + del targets # unused arg + if model_hparams.shared_embedding_and_softmax_weights: + scope_name = "shared" + reuse = tf.AUTO_REUSE + else: + scope_name = "softmax" + reuse = False + with tf.variable_scope(scope_name, reuse=reuse): + body_output_shape = common_layers.shape_list(body_output) + var = get_weights(model_hparams, vocab_size, body_output_shape[-1]) + if (model_hparams.factored_logits and + model_hparams.mode == tf.estimator.ModeKeys.TRAIN): + # insert channels dimension + body_output = tf.expand_dims(body_output, 3) + return common_layers.FactoredTensor(body_output, var) + else: + body_output = tf.reshape(body_output, [-1, body_output_shape[-1]]) + logits = tf.matmul(body_output, var, transpose_b=True) + return tf.reshape(logits, + body_output_shape[:-1] + [1, vocab_size]) + + +# Utility functions similar to tf.keras for default transformations + + +def get_bottom(modality_type, value=None): + """Gets default bottom transformation; if none available, return value.""" + if modality_type == ModalityType.SYMBOL: + return symbol_bottom + elif modality_type in (ModalityType.IDENTITY, + ModalityType.IDENTITY_SYMBOL): + return identity_bottom + return value + + +def get_loss(modality_type, value=None): + """Gets default loss transformation; if none available, return value.""" + if modality_type in (ModalityType.IDENTITY, + ModalityType.IDENTITY_SYMBOL, + ModalityType.SYMBOL): + return generic_loss + return value + + +def get_name(modality_type, value=None): + """Gets default name for transformations; if none available, return value.""" + # For legacy reasons, modalities vary in their naming scheme. Future plans are + # to remove any need for get_name. We do not recommend using it. + if modality_type == ModalityType.IDENTITY: + return lambda model_hparams, vocab_size: "identity_modality" + elif modality_type == ModalityType.SYMBOL: + def name(model_hparams, vocab_size): + return "symbol_modality_%d_%d" % (vocab_size, model_hparams.hidden_size) + return name + return value + + +def get_targets_bottom(modality_type, value=None): + """Gets default bottom transformation for targets; if none, return value.""" + if modality_type == ModalityType.SYMBOL: + return symbol_targets_bottom + elif modality_type == ModalityType.IDENTITY_SYMBOL: + return identity_bottom + elif modality_type == ModalityType.IDENTITY: + return make_targets_bottom(identity_bottom) + return value + + +def get_top(modality_type, value=None): + """Gets default top transformation; if none available, return value.""" + if modality_type in (ModalityType.IDENTITY, + ModalityType.IDENTITY_SYMBOL): + return identity_top + elif modality_type == ModalityType.SYMBOL: + return symbol_top + return value + + +def get_weights_fn(modality_type, value=None): + """Gets default weights function; if none available, return value.""" + if modality_type in (ModalityType.IDENTITY_SYMBOL, + ModalityType.SYMBOL): + return common_layers.weights_nonzero + elif modality_type in ModalityType.get_choices(): + return common_layers.weights_all + return value diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_layers.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..45ebf2524a4095f9c3770b9f87ecbfe32866885f --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_layers.py @@ -0,0 +1,366 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Commonly re-used transformer layers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from TensorFlow.nlp.transformer.layers import common_attention +from TensorFlow.nlp.transformer.layers import common_layers +from TensorFlow.nlp.transformer.utils import expert_utils + +import tensorflow.compat.v1 as tf + + +# TODO(lukaszkaiser): remove this function when not needed any more. +def layers(): + return common_layers.layers() + + +def transformer_prepare_encoder(inputs, target_space, hparams, features=None, + type_ids=None, num_types=None, + reuse_target_embedding=tf.AUTO_REUSE): + """Prepare one shard of the model for the encoder. + + Args: + inputs: a Tensor. + target_space: a Tensor. + hparams: run hyperparameters + features: optionally pass the entire features dictionary as well. + This is needed now for "packed" datasets. + type_ids: optional, an int64 Tensor of shape [batch, length] that allows + for adding type embeddings, similar to positional embeddings. + num_types: optional, an int that decides the number of types in type_ids. + reuse_target_embedding: option to reuse variable name in the case that + symbol modalities are reused between inputs/targets. + + Returns: + encoder_input: a Tensor, bottom of encoder stack + encoder_self_attention_bias: a bias tensor for use in encoder self-attention + encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder + attention + """ + ishape_static = inputs.shape.as_list() + encoder_input = inputs + if features and "inputs_segmentation" in features: + # Packed dataset. Keep the examples from seeing each other. + inputs_segmentation = features["inputs_segmentation"] + inputs_position = features["inputs_position"] + targets_segmentation = features["targets_segmentation"] + if (hasattr(hparams, "unidirectional_encoder") and + hparams.unidirectional_encoder): + tf.logging.info("Using unidirectional encoder") + encoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle( + common_layers.shape_list(inputs)[1])) + else: + encoder_self_attention_bias = ( + common_attention.attention_bias_same_segment( + inputs_segmentation, inputs_segmentation)) + encoder_decoder_attention_bias = ( + common_attention.attention_bias_same_segment(targets_segmentation, + inputs_segmentation)) + else: + encoder_padding = common_attention.embedding_to_padding(encoder_input) + ignore_padding = common_attention.attention_bias_ignore_padding( + encoder_padding) + if (hasattr(hparams, "unidirectional_encoder") and + hparams.unidirectional_encoder): + tf.logging.info("Using unidirectional encoder") + encoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle( + common_layers.shape_list(inputs)[1])) + else: + # Usual case - not a packed dataset. + encoder_self_attention_bias = ignore_padding + encoder_decoder_attention_bias = ignore_padding + inputs_position = None + if hparams.proximity_bias: + encoder_self_attention_bias += common_attention.attention_bias_proximal( + common_layers.shape_list(inputs)[1]) + if target_space is not None and hparams.get("use_target_space_embedding", + True): + # Append target_space_id embedding to inputs. + emb_target_space = common_layers.embedding( + target_space, + 32, + ishape_static[-1], + name="target_space_embedding", + dtype=hparams.get("activation_dtype", "float32"), + reuse=reuse_target_embedding) + emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) + encoder_input += emb_target_space + if hparams.pos == "timing": + if inputs_position is not None: + encoder_input = common_attention.add_timing_signal_1d_given_position( + encoder_input, inputs_position) + else: + encoder_input = common_attention.add_timing_signal_1d(encoder_input) + elif hparams.pos == "timing_from_features": + encoder_input = common_attention.add_timing_signals_from_features( + encoder_input, features, hparams.position_features) + elif hparams.pos == "emb": + encoder_input = common_attention.add_positional_embedding( + encoder_input, hparams.max_length, "inputs_positional_embedding", + inputs_position) + + # Add type embeddings + if type_ids is not None: + if not num_types: + raise ValueError("Need to set num_types as well.") + encoder_input = common_attention.add_positional_embedding( + encoder_input, num_types, "inputs_type_embedding", type_ids) + + encoder_self_attention_bias = common_layers.cast_like( + encoder_self_attention_bias, encoder_input) + encoder_decoder_attention_bias = common_layers.cast_like( + encoder_decoder_attention_bias, encoder_input) + return (encoder_input, encoder_self_attention_bias, + encoder_decoder_attention_bias) + + +def transformer_encoder(encoder_input, + encoder_self_attention_bias, + hparams, + name="encoder", + nonpadding=None, + save_weights_to=None, + make_image_summary=False, + losses=None, + attn_bias_for_padding=None): + """A stack of transformer layers. + + Args: + encoder_input: a Tensor + encoder_self_attention_bias: bias Tensor for self-attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + name: a string + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This must either be + passed in, which we do for "packed" datasets, or inferred from + encoder_self_attention_bias. The knowledge about padding is used + for pad_remover(efficiency) and to mask out padding in convolutional + layers. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + losses: optional list onto which to append extra training losses + attn_bias_for_padding: Padded attention bias in case a unidirectional + encoder is being used where future attention is masked. + + Returns: + y: a Tensors + """ + x = encoder_input + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + + with tf.variable_scope(name): + if nonpadding is not None: + padding = 1.0 - nonpadding + else: + attention_bias = encoder_self_attention_bias + if attn_bias_for_padding is not None: + attention_bias = attn_bias_for_padding + padding = common_attention.attention_bias_to_padding(attention_bias) + nonpadding = 1.0 - padding + pad_remover = None + if hparams.use_pad_remover and not common_layers.is_xla_compiled(): + pad_remover = expert_utils.PadRemover(padding) + for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): + with tf.variable_scope("layer_%d" % layer): + with tf.variable_scope("self_attention"): + if layer < hparams.get("num_area_layers", 0): + max_area_width = hparams.get("max_area_width", 1) + max_area_height = hparams.get("max_area_height", 1) + memory_height = hparams.get("memory_height", 1) + else: + max_area_width = 1 + max_area_height = 1 + memory_height = 1 + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + encoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32"), + hard_attention_k=hparams.get("hard_attention_k", 0), + gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0), + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height, + area_key_mode=hparams.get("area_key_mode", "none"), + area_value_mode=hparams.get("area_value_mode", "none"), + training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN) + == tf.estimator.ModeKeys.TRAIN)) + x = common_layers.layer_postprocess(x, y, hparams) + with tf.variable_scope("ffn"): + y = transformer_ffn_layer( + common_layers.layer_preprocess(x, hparams), + hparams, + pad_remover, + conv_padding="SAME", + nonpadding_mask=nonpadding, + losses=losses) + x = common_layers.layer_postprocess(x, y, hparams) + # if normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + return common_layers.layer_preprocess(x, hparams) + + +def transformer_ffn_layer(x, + hparams, + pad_remover=None, + conv_padding="LEFT", + nonpadding_mask=None, + losses=None, + cache=None, + decode_loop_step=None, + readout_filter_size=0, + layer_collection=None): + """Feed-forward layer in the transformer. + + Args: + x: a Tensor of shape [batch_size, length, hparams.hidden_size] + hparams: hyperparameters for model + pad_remover: an expert_utils.PadRemover object tracking the padding + positions. If provided, when using convolutional settings, the padding + is removed before applying the convolution, and restored afterward. This + can give a significant speedup. + conv_padding: a string - either "LEFT" or "SAME". + nonpadding_mask: an optional Tensor with shape [batch_size, length]. + needed for convolutional layers with "SAME" padding. + Contains 1.0 in positions corresponding to nonpadding. + losses: optional list onto which to append extra training losses + cache: dict, containing tensors which are the results of previous + attentions, used for fast decoding. + decode_loop_step: An integer, step number of the decoding loop. + Only used for inference on TPU. + readout_filter_size: if it's greater than 0, then it will be used instead of + filter_size + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + + Returns: + a Tensor of shape [batch_size, length, hparams.hidden_size] + + Raises: + ValueError: If losses arg is None, but layer generates extra losses. + """ + ffn_layer = hparams.ffn_layer + relu_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "relu_dropout_broadcast_dims", ""))) + if ffn_layer == "conv_hidden_relu": + # Backwards compatibility + ffn_layer = "dense_relu_dense" + if ffn_layer == "dense_relu_dense": + if pad_remover: + original_shape = common_layers.shape_list(x) + # Collapse `x` across examples, and remove padding positions. + x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0)) + x = tf.expand_dims(pad_remover.remove(x), axis=0) + conv_output = common_layers.dense_relu_dense( + x, + hparams.filter_size, + hparams.hidden_size, + dropout=hparams.relu_dropout, + dropout_broadcast_dims=relu_dropout_broadcast_dims, + layer_collection=layer_collection) + if pad_remover: + # Restore `conv_output` to the original shape of `x`, including padding. + conv_output = tf.reshape( + pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape) + return conv_output + elif ffn_layer == "conv_relu_conv": + return common_layers.conv_relu_conv( + x, + readout_filter_size or hparams.filter_size, + hparams.hidden_size, + first_kernel_size=hparams.conv_first_kernel, + second_kernel_size=1, + padding=conv_padding, + nonpadding_mask=nonpadding_mask, + dropout=hparams.relu_dropout, + cache=cache, + decode_loop_step=decode_loop_step) + elif ffn_layer == "parameter_attention": + return common_attention.parameter_attention( + x, hparams.parameter_attention_key_channels or hparams.hidden_size, + hparams.parameter_attention_value_channels or hparams.hidden_size, + hparams.hidden_size, readout_filter_size or hparams.filter_size, + hparams.num_heads, + hparams.attention_dropout) + elif ffn_layer == "conv_hidden_relu_with_sepconv": + return common_layers.conv_hidden_relu( + x, + readout_filter_size or hparams.filter_size, + hparams.hidden_size, + kernel_size=(3, 1), + second_kernel_size=(31, 1), + padding="LEFT", + dropout=hparams.relu_dropout) + elif ffn_layer == "sru": + return common_layers.sru(x) + elif ffn_layer == "local_moe_tpu": + overhead = hparams.moe_overhead_eval + if hparams.mode == tf.estimator.ModeKeys.TRAIN: + overhead = hparams.moe_overhead_train + ret, loss = expert_utils.local_moe_tpu( + x, + hparams.filter_size // 2, + hparams.hidden_size, + hparams.moe_num_experts, + overhead=overhead, + loss_coef=hparams.moe_loss_coef) + elif ffn_layer == "local_moe": + overhead = hparams.moe_overhead_eval + if hparams.mode == tf.estimator.ModeKeys.TRAIN: + overhead = hparams.moe_overhead_train + ret, loss = expert_utils.local_moe( + x, + True, + expert_utils.ffn_expert_fn(hparams.hidden_size, [hparams.filter_size], + hparams.hidden_size), + hparams.moe_num_experts, + k=hparams.moe_k, + hparams=hparams) + losses.append(loss) + return ret + else: + assert ffn_layer == "none" + return x diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_memory.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..556c019df3876eb7b140acce72e507cb8f487888 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_memory.py @@ -0,0 +1,393 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The memory unit for Transformer.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from TensorFlow.nlp.transformer.layers import common_layers +import tensorflow.compat.v1 as tf + + +class RecurrentMemory(object): + """Base class for recurrent memory. + + This class defines the memory interface, but behaves like a no-op. + """ + + def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): + """Called prior to self-attention, to incorporate memory items. + + Args: + segment: an integer Tensor with shape [batch] + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: must be None. Attention normally allows this to be a + Tensor with shape [batch, length_m, channels], but we currently only + support memory for decoder-side self-attention. + bias: bias Tensor (see attention_bias()) + Returns: + (data, new_query_antecedent, new_memory_antecedent, new_bias) + """ + del segment + return None, query_antecedent, memory_antecedent, bias + + def post_attention(self, token, x): + """Called after self-attention. The memory can be updated here. + + Args: + token: Data returned by pre_attention, which can be used to carry over + state related to the current memory operation. + x: a Tensor of data after self-attention and feed-forward + Returns: + a (possibly modified) version of the input x + """ + assert token is None + return x + + +class RecentTokensMemory(RecurrentMemory): + """A memory module that caches features for recent tokens. + + When the number of tokens cached is equal to the chunk size, this is + equivalent to the memory used by Transformer-XL + (https://arxiv.org/abs/1901.02860) + """ + + def __init__(self, name, hparams): + hidden_size = hparams.hidden_size + self.chunk_length = hparams.split_targets_chunk_length + assert self.chunk_length > 0, "Chunking is required to use recurrent memory" + + if hasattr(hparams, "num_memory_items") and hparams.num_memory_items > 0: + self.tokens_to_cache = hparams.num_memory_items + else: + self.tokens_to_cache = self.chunk_length + + # TODO(kitaev): The implementation of the chunking code makes it somewhat + # convoluted to figure out how many actual sequences we can have per batch. + # The data pipeline should be revisited at some point. + if (hasattr(hparams, "recurrent_memory_batch_size") + and hparams.recurrent_memory_batch_size > 0): + batch_size_in_sequences = hparams.recurrent_memory_batch_size + else: + batch_size_in_sequences = hparams.batch_size / hparams.max_length + + memory_shape = [batch_size_in_sequences, self.tokens_to_cache, hidden_size] + bias_shape = [batch_size_in_sequences, 1, 1, self.tokens_to_cache] + + with tf.variable_scope(name): + self.previous_segment = tf.get_variable( + "memsegment", (batch_size_in_sequences,), + dtype=tf.int32, trainable=False, + collections=[tf.GraphKeys.LOCAL_VARIABLES], + initializer=tf.constant_initializer(0)) + + self.previous_vals = tf.get_variable( + "memvals", memory_shape, + dtype=tf.float32, trainable=False, + collections=[tf.GraphKeys.LOCAL_VARIABLES], + initializer=tf.constant_initializer(.0)) + + self.previous_bias = tf.get_variable( + "membias", bias_shape, + dtype=tf.float32, trainable=False, + collections=[tf.GraphKeys.LOCAL_VARIABLES], + initializer=tf.constant_initializer(-1e9)) + + def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): + """Called prior to self-attention, to incorporate memory items. + + Args: + segment: an integer Tensor with shape [batch] + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: must be None. Attention normally allows this to be a + Tensor with shape [batch, length_m, channels], but we currently only + support memory for decoder-side self-attention. + bias: bias Tensor (see attention_bias()) + Returns: + (data, new_query_antecedent, new_memory_antecedent, new_bias) + """ + assert memory_antecedent is None, "We only support language modeling" + + # In eval mode, batch size may be variable + memory_batch_size = tf.shape(self.previous_vals)[0] + current_batch_size = tf.shape(query_antecedent)[0] + amount_to_pad = memory_batch_size - current_batch_size + + # If segment id is zero, don't attend back to the memory + previous_bias = self.previous_bias[:current_batch_size, :, :, :] + tf.cast( + tf.equal(segment[:, None, None, None], 0), tf.float32) * -1e9 + + sliced_previous_vals = self.previous_vals[:current_batch_size, :, :] + + new_memory_antecedent = tf.concat( + [tf.stop_gradient(sliced_previous_vals), query_antecedent], 1) + new_bias = tf.concat([ + tf.tile(tf.stop_gradient(previous_bias), [1, 1, self.chunk_length, 1]), + tf.tile(bias, [current_batch_size, 1, 1, 1]), + ], -1) + + remember_segment = tf.pad(segment, [[0, amount_to_pad]]) + # TODO(kitaev): The code assumes that we always either increment the chunk + # number or reset it to zero. This assumption will not hold if we re-run the + # model for each token, e.g. for autoregressive greedy/beam/sampling decode. + remember_vals = tf.pad(query_antecedent, + [[0, amount_to_pad], [0, 0], [0, 0]]) + # Query position is on axis -2 for bias: as long as a token can be attended + # to from at least one query position (i.e. it's not padding), memorize it. + remember_bias = tf.tile( + tf.reduce_max(bias, -2, keepdims=True), [memory_batch_size, 1, 1, 1]) + # Assume that query_antecedent is always a full chunk (i.e. not truncated) + if self.chunk_length < self.tokens_to_cache: + remember_vals = tf.concat([self.previous_vals, remember_vals], 1) + remember_bias = tf.concat([ + self.previous_bias - 1e9 * tf.cast( + tf.equal( + tf.pad(segment, [[0, amount_to_pad]])[:, None, None, None], + 0), tf.float32), + remember_bias + ], -1) + if self.chunk_length != self.tokens_to_cache: + remember_vals = remember_vals[:, -self.tokens_to_cache:, :] + remember_bias = remember_bias[:, :, :, -self.tokens_to_cache:] + token = (remember_segment, remember_vals, remember_bias) + + return token, query_antecedent, new_memory_antecedent, new_bias + + def post_attention(self, token, x): + """Called after self-attention. The memory can be updated here. + + Args: + token: Data returned by pre_attention, which can be used to carry over + state related to the current memory operation. + x: a Tensor of data after self-attention and feed-forward + Returns: + a (possibly modified) version of the input x + """ + with tf.control_dependencies([ + self.previous_segment.assign(token[0]), + self.previous_vals.assign(token[1]), + self.previous_bias.assign(token[2]), + ]): + return tf.identity(x) + + +class TransformerMemory(object): + """Implements the Memory module. + + Based on Neural Turing Machines: arXiv:1410.5401 [cs.NE] + """ + + def __init__(self, batch_size, key_depth, val_depth, memory_size, + sharpen_factor=1., name="neural_memory"): + """Initialize the memory object. + + Args: + batch_size: the batch size. + key_depth: the depth of the memory keys. + val_depth: the depth of the memory values. + memory_size: the number of items in the memory. + sharpen_factor: the sharpen_factor for addressing the memory. + name: the optional variable scope. + """ + self.name = name + self.batch_size = batch_size + self.key_depth = key_depth + self.val_depth = val_depth + self.memory_size = memory_size + self.sharpen_factor = sharpen_factor + with tf.variable_scope(name): + self.segment_number = tf.get_variable( + "segment_number", [self.batch_size], + dtype=tf.int32, trainable=False, + initializer=tf.constant_initializer(100000)) + self.mem_vals = tf.get_variable( + "memvals", [self.batch_size, self.memory_size, self.val_depth], + dtype=tf.float32, trainable=False, + initializer=tf.constant_initializer(.0)) + self.mean_logits = tf.get_variable( + "meanlogits", [self.batch_size, self.memory_size], + dtype=tf.float32, trainable=False, + initializer=tf.constant_initializer(.0)) + + def _norm(self, x): + """Compute the safe norm.""" + return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7) + + def _address_content(self, x): + """Address the memory based on content similarity. + + Args: + x: a tensor in the shape of [batch_size, length, depth]. + Returns: + the logits for each memory entry [batch_size, length, memory_size]. + """ + mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, + bias_initializer=tf.constant_initializer(1.0), + name="mem_key") + mem_query = tf.layers.dense(x, self.key_depth, + bias_initializer=tf.constant_initializer(1.0), + name="mem_query") + norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), + transpose_b=True) + dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) + cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") + access_logits = self.sharpen_factor * cos_dist + return access_logits + + def read(self, x): + """Read from the memory. + + An external component can use the results via a simple MLP, + e.g., fn(x W_x + retrieved_mem W_m). + + Args: + x: a tensor in the shape of [batch_size, length, depth]. + Returns: + access_logits: the logits for accessing the memory in shape of + [batch_size, length, memory_size]. + retrieved_mem: the retrieved results in the shape of + [batch_size, length, val_depth]. + """ + access_logits = self._address_content(x) + weights = tf.nn.softmax(access_logits) + retrieved_mem = tf.reduce_sum( + tf.multiply(tf.expand_dims(weights, 3), + tf.expand_dims(self.mem_vals, axis=1)), axis=2) + return access_logits, retrieved_mem + + def write(self, x, access_logits): + """Write to the memory based on a combination of similarity and least used. + + Based on arXiv:1607.00036v2 [cs.LG]. + + Args: + x: a tensor in the shape of [batch_size, length, depth]. + access_logits: the logits for accessing the memory. + Returns: + the update op. + """ + gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name="gamma") + write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1) + candidate_value = tf.layers.dense(x, self.val_depth, + activation=tf.nn.relu, + name="candidate_value") + erase_gates = tf.layers.dense(x, self.memory_size, + activation=tf.nn.sigmoid, + name="erase") + write_weights = tf.nn.softmax(write_logits) + erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3) + erase = tf.multiply(erase_weights, + tf.expand_dims(self.mem_vals, 1)) + addition = tf.multiply( + tf.expand_dims(write_weights, 3), + tf.expand_dims(candidate_value, 2)) + update_value_op = self.mem_vals.assign( + tf.reduce_mean(erase + addition, axis=1)) + with tf.control_dependencies([update_value_op]): + write_op = self.mean_logits.assign( + self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1)) + return write_op + + def set(self, mem_vals, mean_logits): + set_op = tf.group([ + self.mem_vals.assign(mem_vals), + self.mean_logits.assign(mean_logits)]) + return set_op + + def get(self): + return self.mem_vals, self.mean_logits + + def update_segment_number(self, segment_number): + return self.segment_number.assign(segment_number) + + def reset(self, entries_to_reset): + """Reset the entries in the memory. + + Args: + entries_to_reset: a 1D tensor. + Returns: + the reset op. + """ + num_updates = tf.size(entries_to_reset) + update_vals = tf.scatter_update( + self.mem_vals, entries_to_reset, + tf.tile(tf.expand_dims( + tf.fill([self.memory_size, self.val_depth], .0), 0), + [num_updates, 1, 1])) + update_logits = tf.scatter_update( + self.mean_logits, entries_to_reset, + tf.tile(tf.expand_dims( + tf.fill([self.memory_size], .0), 0), + [num_updates, 1])) + reset_op = tf.group([update_vals, update_logits]) + return reset_op + + def pre_attention(self, segment_number, query_antecedent, + memory_antecedent, bias): + """Called prior to self-attention, to incorporate memory items. + + Args: + segment_number: an integer Tensor with shape [batch] + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: must be None. Attention normally allows this to be a + Tensor with shape [batch, length_m, channels], but we currently only + support memory for decoder-side self-attention. + bias: bias Tensor (see attention_bias()) + Returns: + (data, new_query_antecedent, new_memory_antecedent, new_bias) + """ + with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE): + assert memory_antecedent is None, "We only support language modeling" + with tf.control_dependencies([ + tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]): + difference = self.batch_size - tf.size(segment_number) + segment_number = tf.pad(segment_number, [[0, difference]]) + reset_op = self.reset(tf.reshape(tf.where( + tf.less(segment_number, self.segment_number)), [-1])) + memory_results = {} + with tf.control_dependencies([reset_op]): + with tf.control_dependencies([ + self.update_segment_number(segment_number)]): + x = tf.pad(query_antecedent, [ + [0, difference], [0, 0], [0, 0]]) + access_logits, retrieved_mem = self.read(x) + memory_results["x"] = x + memory_results["access_logits"] = access_logits + memory_results["retrieved_mem"] = retrieved_mem + return memory_results, query_antecedent, memory_antecedent, bias + + def post_attention(self, token, x): + """Called after self-attention. The memory can be updated here. + + Args: + token: Data returned by pre_attention, which can be used to carry over + state related to the current memory operation. + x: a Tensor of data after self-attention and feed-forward + Returns: + a (possibly modified) version of the input x + """ + with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE): + depth = common_layers.shape_list(x)[-1] + actual_batch_size = common_layers.shape_list(x)[0] + memory_output = tf.gather(token["retrieved_mem"], + tf.range(actual_batch_size)) + output = tf.add(tf.layers.dense(x, depth, use_bias=False), + tf.layers.dense(memory_output, depth)) + with tf.control_dependencies([output]): + with tf.control_dependencies([ + self.write(token["x"], token["access_logits"])]): + return tf.identity(output) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/vq_discrete.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/vq_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..6e324d28bb1d46c8522188435b4b38637c35fbf4 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/vq_discrete.py @@ -0,0 +1,310 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Clean discrete bottleneck as in https://arxiv.org/abs/1805.11063.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from functools import partial +from TensorFlow.nlp.transformer.layers import common_layers +import tensorflow.compat.v1 as tf +from tensorflow.python.training import moving_averages + + +class DiscreteBottleneck(object): + """Discrete bottleneck class.""" + + def __init__(self, hparams): + self.hparams = hparams + print ("self.hparams.z_size", self.hparams.z_size) + # Set the discretization bottleneck specific things here + self.hparams.z_size_per_residual = self.hparams.z_size // \ + self.hparams.num_residuals + print ("self.hparams.num_residuals", self.hparams.num_residuals) + self.hparams.block_dim = int( + self.hparams.hidden_size // self.hparams.num_blocks) + self.hparams.block_v_size = 2**( + self.hparams.z_size_per_residual / self.hparams.num_blocks) + self.hparams.block_v_size = int(self.hparams.block_v_size) + self.means = tf.get_variable( + name="means", + shape=[ + self.hparams.num_blocks, self.hparams.block_v_size, + self.hparams.block_dim + ], + initializer=tf.initializers.variance_scaling(distribution="uniform")) + + # Create the shadow variables if we are using EMA + if self.hparams.ema: + self.ema_count = tf.get_variable( + "ema_count", [self.hparams.num_blocks, self.hparams.block_v_size], + initializer=tf.constant_initializer(0), + trainable=False) + with tf.colocate_with(self.means): + self.ema_means = tf.get_variable( + "ema_means", + initializer=self.means.initialized_value(), + trainable=False) + + def slice_hidden(self, x): + """Slice encoder hidden state into block_dim. + + Args: + x: Encoder hidden state of shape [-1, hidden_size]. + + Returns: + Sliced states of shape [-1, num_blocks, block_dim]. + """ + x_sliced = tf.reshape( + x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim]) + return x_sliced + + def nearest_neighbor(self, x, means): + """Find the nearest element in means to elements in x. + + Args: + x: Batch of encoder continuous latent states sliced/projected into + shape [-1, num_blocks, block_dim]. + means: Embedding means of shape. + + Returns: + Tensor with nearest element in mean encoded in one-hot notation. + """ + x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True) + means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True) + scalar_prod = tf.matmul( + tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1])) + scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2]) + dist = x_norm_sq + tf.transpose( + means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod + + if self.hparams.soft_em: + nearest_idx = tf.stack( + [ + tf.multinomial( + -dist[:, i, :], num_samples=self.hparams.num_samples) + for i in range(self.hparams.num_blocks) + ], + axis=1) + nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size) + nearest_hot = tf.reduce_mean(nearest_hot, axis=-2) + else: + if self.hparams.random_top_k > 1: + _, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k) + nearest_idx = tf.gather( + top_k_idx, + tf.random_uniform( + [1], + minval=0, + maxval=self.hparams.random_top_k - 1, + dtype=tf.int32), + axis=-1) + else: + if self.hparams.use_scales: + dist /= tf.reshape(self.hparams.scales, + [1, 1, self.hparams.moe_num_experts]) + nearest_idx = tf.argmax(-dist, axis=-1) + nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size) + return nearest_hot + + def embedding_lookup(self, x, means): + """Compute nearest neighbors and loss for training the embeddings. + + Args: + x: Batch of encoder continuous latent states sliced/projected into + shape + [-1, num_blocks, block_dim]. + means: Embedding means. + + Returns: + The nearest neighbor in one hot form, the nearest neighbor + itself, the + commitment loss, embedding training loss. + """ + x_means_hot = self.nearest_neighbor(x, means) + x_means_hot_flat = tf.reshape( + x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size]) + x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) + x_means = tf.transpose(x_means, [1, 0, 2]) + q_loss = tf.reduce_mean( + tf.squared_difference(tf.stop_gradient(x), x_means)) + e_loss = tf.reduce_mean( + tf.squared_difference(x, tf.stop_gradient(x_means))) + return x_means_hot, x_means, q_loss, e_loss + + def bit_to_int(self, x_bit, num_bits, base=2): + """Turn x_bit representing numbers bitwise (lower-endian) to int tensor. + + Args: + x_bit: Tensor containing numbers in a particular base to be + converted to + int. + num_bits: Number of bits in the representation. + base: Base of the representation. + + Returns: + Integer representation of this number. + """ + x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits]))) + # pylint: disable=g-complex-comprehension + x_labels = [ + x_l[:, i] * tf.to_int32(base)**tf.to_int32(i) for i in range(num_bits)] + res = sum(x_labels) + return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) + + def int_to_bit(self, x_int, num_bits, base=2): + """Turn x_int representing numbers into a bitwise (lower-endian) tensor. + + Args: + x_int: Tensor containing integer to be converted into base + notation. + num_bits: Number of bits in the representation. + base: Base of the representation. + + Returns: + Corresponding number expressed in base. + """ + x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1)) + # pylint: disable=g-complex-comprehension + x_labels = [ + tf.floormod( + tf.floordiv(tf.to_int32(x_l), + tf.to_int32(base)**i), tf.to_int32(base)) + for i in range(num_bits)] + res = tf.concat(x_labels, axis=-1) + return tf.cast(res, tf.float32) + + def embed(self, x): + """Embedding function that takes discrete latent and returns embedding. + + Args: + x: Input to the discretization bottleneck. + Returns: + Continuous embedding to be passed on to the decoder. + + Raises: + ValueError: For unknown or missing arguments. + """ + shape_x = common_layers.shape_list(x) + x_flat = tf.reshape(x, [-1, 1]) + c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2) + shape = common_layers.shape_list(c) + new_shape = shape + new_shape.append(self.hparams.num_blocks) + new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks)) + c = tf.to_int32(tf.reshape(c, shape=new_shape)) + h1_shape = shape_x + h1_shape.append(self.hparams.hidden_size) + h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) + c_int = self.bit_to_int( + c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2) + c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1) + c_hot_flat = tf.reshape( + c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]) + h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means) + h1 = tf.transpose(h1, perm=[1, 0, 2]) + h1 = tf.reshape(h1, shape=h1_shape) + h1_shape[0] = self.hparams.batch_size + h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") + res = tf.layers.dense( + tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") + return res + + def discrete_bottleneck(self, x): + """Discretization bottleneck for latent variables. + + Args: + x: Input to the discretization bottleneck. + + Returns: + Embedding to pass to the decoder, discrete latent, loss, and the + embedding + function. + + Raises: + ValueError: If projection_tensors is None for reshape_method + project, or + ema_count or ema_means is None if we are using ema, or unknown + args. + """ + x_reshaped = self.slice_hidden(x) + x_means_hot = [] + x_means = 0 + loss = 0 + x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup( + x_reshaped, self.means) + + if self.hparams.ema: + tf.logging.info("Using EMA with beta = {}".format(self.hparams.beta)) + updated_ema_count = \ + moving_averages.assign_moving_average( + self.ema_count, + tf.reduce_sum( + tf.reshape( + x_means_hot, + shape=[-1, self.hparams.num_blocks, + self.hparams.block_v_size]), + axis=0), + self.hparams.decay, + zero_debias=False) + + dw = tf.matmul( + tf.transpose(x_means_hot, perm=[1, 2, 0]), + tf.transpose(x_reshaped, perm=[1, 0, 2])) + + updated_ema_means = \ + moving_averages.assign_moving_average( + self.ema_means, dw, self.hparams.decay, + zero_debias=False) + n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True) + updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / ( + n + 2**self.hparams.z_size * self.hparams.epsilon) * n) + updated_ema_means = updated_ema_means / tf.expand_dims( + updated_ema_count, axis=-1) + + with tf.control_dependencies([e_loss]): + update_means = tf.assign(self.means, updated_ema_means) + with tf.control_dependencies([update_means]): + loss += self.hparams.beta * e_loss + else: + # Use a gradient based loss for learning the cluster centers + loss += q_loss + self.hparams.beta * e_loss + + # Get the discrete latent representation + x_means_idx = tf.argmax(x_means_hot, axis=-1) + + # Get the binary representation + num_bits = int(self.hparams.z_size // self.hparams.num_blocks) + x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2) + x_discrete = self.bit_to_int( + tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2) + + # Reshape x_discrete + shape_x = common_layers.shape_list(x) + shape_discrete = shape_x[:-1] + x_discrete = tf.reshape(x_discrete, shape_discrete) + x_means = tf.reshape(x_means, shape=shape_x) + h1 = x + tf.stop_gradient(x_means - x) + + h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") + res = tf.layers.dense( + tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") + embed_fn = partial(self.embed) + return { + "dense": res, + "discrete": x_discrete, + "loss": loss, + "embed": embed_fn + } diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/models/__init__.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f1809d5897fb0269794e110f6decce69fed3cfd --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/models/__init__.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated and removed unnecessary imports + +"""Models defined in T2T. Imports here force registration.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six + +# pylint: disable=unused-import + +from TensorFlow.nlp.transformer.layers import modalities # pylint: disable=g-import-not-at-top +from TensorFlow.nlp.transformer.models import transformer +from TensorFlow.nlp.transformer.utils import contrib +from TensorFlow.nlp.transformer.utils import registry + + +def model(name): + return registry.model(name) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/models/transformer.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/models/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..0bd7571706e7b22def6e1521da935f1082dda9fa --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/models/transformer.py @@ -0,0 +1,2904 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021-2022 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - removed mlperf logs +# - disabled image_summary in encoder_function +# - removed modalities.ModalityType.CLASS_LABEL +# - changed to_float to cast +# - cleaned hparams for transformer +# - added 'use_static_shapes' hparam +# - removed unused hparam sets +# - replaced tf.slice with tf.gather +# - added support for fast inference on HPU + +"""Transformer model from "Attention Is All You Need". + +The Transformer model consists of an encoder and a decoder. Both are stacks +of self-attention layers followed by feed-forward layers. This model yields +good results on a number of problems, especially in NLP and machine translation. + +See "Attention Is All You Need" (https://arxiv.org/abs/1706.03762) for the full +description of the model and the results obtained with its early version. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import range # pylint: disable=redefined-builtin + +from TensorFlow.nlp.transformer.layers import common_attention +from TensorFlow.nlp.transformer.layers import common_layers +from TensorFlow.nlp.transformer.layers import modalities +from TensorFlow.nlp.transformer.layers import transformer_layers +from TensorFlow.nlp.transformer.layers import transformer_memory +from TensorFlow.nlp.transformer.utils import beam_search +from TensorFlow.nlp.transformer.utils import expert_utils +from TensorFlow.nlp.transformer.utils import hparam +from TensorFlow.nlp.transformer.utils import registry +from TensorFlow.nlp.transformer.utils import t2t_model + +import tensorflow.compat.v1 as tf + +# pylint: disable=g-direct-tensorflow-import +from tensorflow.python.ops import inplace_ops +from tensorflow.python.util import nest +# pylint: enable=g-direct-tensorflow-import + +# Alias some commonly reused layers, here and elsewhere. +transformer_prepare_encoder = transformer_layers.transformer_prepare_encoder +transformer_encoder = transformer_layers.transformer_encoder +transformer_ffn_layer = transformer_layers.transformer_ffn_layer + + +def transformer_encode(encoder_function, inputs, target_space, hparams, + attention_weights=None, features=None, losses=None, + prepare_encoder_fn=None, **kwargs): + """Encode transformer inputs. + + Args: + encoder_function: the encoder function + inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which + will be flattened along the two spatial dimensions. + target_space: scalar, target space ID. + hparams: hyperparameters for model. + attention_weights: weight to store attention to. + features: optionally pass the entire features dictionary as well. This is + needed now for "packed" datasets. + losses: optional list onto which to append extra training losses + prepare_encoder_fn: optional, alternative to transformer_prepare_encoder. + **kwargs: additional arguments to pass to encoder_function + + Returns: + Tuple of: + encoder_output: Encoder representation. + [batch_size, input_length, hidden_dim] + encoder_decoder_attention_bias: Bias and mask weights for + encoder-decoder attention. [batch_size, input_length] + """ + inputs = common_layers.flatten4d3d(inputs) + + if not prepare_encoder_fn: + prepare_encoder_fn = transformer_prepare_encoder + encoder_input, self_attention_bias, encoder_decoder_attention_bias = ( + prepare_encoder_fn( + inputs, target_space, hparams, features=features)) + + encoder_input = tf.nn.dropout(encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + attn_bias_for_padding = None + # Otherwise the encoder will just use encoder_self_attention_bias. + if hparams.unidirectional_encoder: + attn_bias_for_padding = encoder_decoder_attention_bias + + encoder_output = encoder_function( + encoder_input, + self_attention_bias, + hparams, + nonpadding=features_to_nonpadding(features, "inputs"), + save_weights_to=attention_weights, + make_image_summary=False, + losses=losses, + attn_bias_for_padding=attn_bias_for_padding, + **kwargs) + + return encoder_output, encoder_decoder_attention_bias + + +def transformer_decode(decoder_function, + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + attention_weights=None, + cache=None, + decode_loop_step=None, + nonpadding=None, + losses=None, + **kwargs): + """Decode Transformer outputs from encoder representation. + + Args: + decoder_function: the decoder function + decoder_input: inputs to bottom of the model. [batch_size, decoder_length, + hidden_dim] + encoder_output: Encoder representation. [batch_size, input_length, + hidden_dim] + encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder + attention. [batch_size, input_length] + decoder_self_attention_bias: Bias and mask weights for decoder + self-attention. [batch_size, decoder_length] + hparams: hyperparameters for model. + attention_weights: weight to store attention to. + cache: dict, containing tensors which are the results of previous + attentions, used for fast decoding. + decode_loop_step: An integer, step number of the decoding loop. Only used + for inference on TPU. + nonpadding: optional Tensor with shape [batch_size, decoder_length] + losses: optional list onto which to append extra training losses + **kwargs: additional arguments to pass to decoder_function + + Returns: + Final decoder representation. [batch_size, decoder_length, hidden_dim] + """ + decoder_input = tf.nn.dropout(decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + decoder_output = decoder_function( + decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + cache=cache, + decode_loop_step=decode_loop_step, + nonpadding=nonpadding, + save_weights_to=attention_weights, + losses=losses, + **kwargs) + + if (common_layers.is_xla_compiled() and + hparams.mode == tf.estimator.ModeKeys.TRAIN): + # TPU does not react kindly to extra dimensions. + # TODO(noam): remove this once TPU is more forgiving of extra dims. + return decoder_output + else: + # Expand since t2t expects 4d tensors. + return tf.expand_dims(decoder_output, axis=2) + + +@registry.register_model +class Transformer(t2t_model.T2TModel): + """Attention net. See file docstring.""" + + def __init__(self, *args, **kwargs): + super(Transformer, self).__init__(*args, **kwargs) + self.attention_weights = {} # For visualizing attention heads. + self.recurrent_memory_by_layer = None # Override to enable recurrent memory + self._encoder_function = transformer_encoder + self._decoder_function = transformer_decoder + self._init_cache_fn = _init_transformer_cache + self._prepare_encoder_fn = transformer_prepare_encoder + self._prepare_decoder_fn = transformer_prepare_decoder + + def encode(self, inputs, target_space, hparams, features=None, losses=None): + """Encode transformer inputs, see transformer_encode.""" + return transformer_encode( + self._encoder_function, inputs, target_space, hparams, + attention_weights=self.attention_weights, + features=features, losses=losses, + prepare_encoder_fn=self._prepare_encoder_fn) + + def decode(self, + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + cache=None, + decode_loop_step=None, + nonpadding=None, + losses=None, + **kwargs): + """Decode Transformer outputs, see transformer_decode.""" + return transformer_decode( + self._decoder_function, decoder_input, encoder_output, + encoder_decoder_attention_bias, decoder_self_attention_bias, + hparams, attention_weights=self.attention_weights, cache=cache, + decode_loop_step=decode_loop_step, nonpadding=nonpadding, losses=losses, + **kwargs) + + def body(self, features): + """Transformer main model_fn. + + Args: + features: Map of features to the model. Should contain the following: + "inputs": Transformer inputs. [batch_size, input_length, 1, + hidden_dim]. + "targets": Target decoder outputs. [batch_size, decoder_length, 1, + hidden_dim] + "target_space_id": A scalar int from data_generators.problem.SpaceID. + + Returns: + Final decoder representation. [batch_size, decoder_length, hidden_dim] + """ + hparams = self._hparams + + losses = [] + + if self.has_input: + inputs = self._prepare_inputs_for_body(features) + target_space = features["target_space_id"] + encoder_output, encoder_decoder_attention_bias = self.encode( + inputs, target_space, hparams, features=features, losses=losses) + else: + encoder_output, encoder_decoder_attention_bias = (None, None) + + targets = features["targets"] + targets_shape = common_layers.shape_list(targets) + targets = common_layers.flatten4d3d(targets) + decoder_input, decoder_self_attention_bias = self._prepare_decoder_fn( + targets, hparams, features=features) + + # Not all subclasses of Transformer support keyword arguments related to + # recurrent memory, so only pass these arguments if memory is enabled. + decode_kwargs = {} + if self.recurrent_memory_by_layer is not None: + # TODO(kitaev): The chunk_number feature currently has the same shape as + # "targets", but this is only for the purposes of sharing sharding code. + # In fact every token within an example must have the same chunk number. + chunk_number_each_token = tf.squeeze(features["chunk_number"], (-1, -2)) + chunk_number_each_example = chunk_number_each_token[:, 0] + # Uncomment the code below to verify that tokens within a batch share the + # same chunk number: + # with tf.control_dependencies([ + # tf.assert_equal(chunk_number_each_token, + # chunk_number_each_example[:, None]) + # ]): + # chunk_number_each_example = tf.identity(chunk_number_each_example) + decode_kwargs = dict( + recurrent_memory_by_layer=self.recurrent_memory_by_layer, + chunk_number=chunk_number_each_example, + ) + decoder_output = self.decode( + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + nonpadding=features_to_nonpadding(features, "targets"), + losses=losses, + **decode_kwargs + ) + expected_attentions = features.get("expected_attentions") + if expected_attentions is not None: + attention_loss = common_attention.encoder_decoder_attention_loss( + expected_attentions, self.attention_weights, + hparams.expected_attention_loss_type, + hparams.expected_attention_loss_multiplier) + return decoder_output, {"attention_loss": attention_loss} + + ret = tf.reshape(decoder_output, targets_shape) + if losses: + return ret, {"extra_loss": tf.add_n(losses)} + else: + return ret + + def _prepare_inputs_for_body(self, features): + """Prepare inputs for body. + + Args: + features: Map of string to model features. Should contain + "inputs": Transformer inputs. [batch_size, input_length, 1, + hidden_dim]. + + Returns: + Inputs which will be passed to the model. [batch_size, input_length, 1, + hidden_dim] + """ + return features["inputs"] + + def _greedy_infer(self, features, decode_length, use_tpu=False): + """Fast version of greedy decoding. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + use_tpu: A bool. Whether to build the inference graph for TPU. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + + Raises: + NotImplementedError: If there are multiple data shards. + """ + # For real-valued modalities use the slow decode path for now. + if (self._target_modality_is_real or + self._hparams.self_attention_type != "dot_product"): + return super(Transformer, self)._greedy_infer(features, decode_length) + with tf.variable_scope(self.name): + if use_tpu: + return self._fast_decode_tpu(features, decode_length) + return self._fast_decode(features, decode_length) + + def _beam_decode(self, + features, + decode_length, + beam_size, + top_beams, + alpha, + use_tpu=False): + """Beam search decoding. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + use_tpu: A bool, whether to do beam decode on TPU. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + """ + if (self._hparams.self_attention_type not in [ + "dot_product", "dot_product_relative" + ]): + # Caching is not guaranteed to work with attention types other than + # dot_product and dot_product_relative. + return self._beam_decode_slow(features, decode_length, beam_size, + top_beams, alpha, use_tpu) + with tf.variable_scope(self.name): + if tf.flags.FLAGS.use_fast_inference or use_tpu: + return self._fast_decode_tpu(features, decode_length, beam_size, + top_beams, alpha) + return self._fast_decode(features, decode_length, beam_size, top_beams, + alpha) + + def _prepare_inputs_for_decode(self, features): + """Prepare inputs for decoding. + + Args: + features: A map of string to model features. + + Returns: + Inputs after fixing shape and applying modality. + """ + dp = self._data_parallelism + hparams = self._hparams + inputs = features["inputs"] + # TODO(llion): Clean up this reshaping logic. + inputs = tf.expand_dims(inputs, axis=1) + if len(inputs.shape) < 5: + inputs = tf.expand_dims(inputs, axis=4) + s = common_layers.shape_list(inputs) + inputs = tf.reshape(inputs, [s[0] * s[1], s[2], s[3], s[4]]) + # _shard_features called to ensure that the variable names match + inputs = self._shard_features({"inputs": inputs})["inputs"] + input_modality = self._problem_hparams.modality["inputs"] + input_vocab_size = self._problem_hparams.vocab_size["inputs"] + if input_vocab_size is not None and hasattr(hparams, "vocab_divisor"): + input_vocab_size += (-input_vocab_size) % hparams.vocab_divisor + modality_name = hparams.name.get("inputs", + modalities.get_name(input_modality))( + hparams, input_vocab_size) + with tf.variable_scope(modality_name): + bottom = hparams.bottom.get("inputs", + modalities.get_bottom(input_modality)) + inputs = dp(bottom, inputs, hparams, input_vocab_size) + return inputs + + def _fast_decode_tpu(self, + features, + decode_length, + beam_size=1, + top_beams=1, + alpha=1.0): + """Fast decoding. + + Implements both greedy and beam search decoding on TPU, uses beam search + iff beam_size > 1, otherwise beam search related arguments are ignored. + + Args: + features: A map of string to model features. + decode_length: An integer, how many additional timesteps to decode. + beam_size: An integer, number of beams. + top_beams: An integer, how many of the beams to return. + alpha: A float that controls the length penalty. Larger the alpha, + stronger the preference for longer translations. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + }. + + Raises: + NotImplementedError: If there are multiple data shards. + """ + if self._num_datashards != 1: + raise NotImplementedError("Fast decoding only supports a single shard.") + if "targets_segmentation" in features: + raise NotImplementedError( + "Decoding not supported on packed datasets " + " If you want to decode from a dataset, use the non-packed version" + " of the dataset when decoding.") + dp = self._data_parallelism + hparams = self._hparams + target_modality = self._problem_hparams.modality["targets"] + target_vocab_size = self._problem_hparams.vocab_size["targets"] + if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"): + target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor + + if self.has_input: + inputs_shape = common_layers.shape_list(features["inputs"]) + if (self._problem_hparams.get("regression_targets")): + decode_length = 1 + else: + decode_length = ( + inputs_shape[1] + features.get("decode_length", decode_length)) + batch_size = inputs_shape[0] + inputs = self._prepare_inputs_for_decode(features) + with tf.variable_scope("body"): + encoder_output, encoder_decoder_attention_bias = dp( + self.encode, + inputs, + features["target_space_id"], + hparams, + features=features) + encoder_output = encoder_output[0] + encoder_decoder_attention_bias = encoder_decoder_attention_bias[0] + partial_targets = None + else: + # The problem has no inputs. + encoder_output = None + encoder_decoder_attention_bias = None + + # Prepare partial targets. + # In either features["inputs"] or features["targets"]. + # We force the outputs to begin with these sequences. + partial_targets = features.get("inputs") + if partial_targets is None: + partial_targets = features["targets"] + assert partial_targets is not None + partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2) + partial_targets = tf.to_int64(partial_targets) + partial_targets_shape = common_layers.shape_list(partial_targets) + partial_targets_length = partial_targets_shape[1] + decode_length = ( + partial_targets_length + features.get("decode_length", decode_length)) + batch_size = partial_targets_shape[0] + + if hparams.pos == "timing": + positional_encoding = common_attention.get_timing_signal_1d( + decode_length + 1, hparams.hidden_size) + elif hparams.pos == "timing_from_features": + positional_encoding = common_attention.add_timing_signals_from_features( + tf.zeros([1, decode_length + 1, hparams.hidden_size]), features, + hparams.position_features) + elif hparams.pos == "emb": + positional_encoding = common_attention.add_positional_embedding( + tf.zeros([1, decode_length + 1, hparams.hidden_size]), + hparams.max_length, "body/targets_positional_embedding", None) + else: + positional_encoding = None + + def preprocess_targets(targets, i): + """Performs preprocessing steps on the targets to prepare for the decoder. + + This includes: + - Embedding the ids. + - Flattening to 3D tensor. + - Optionally adding timing signals. + + Args: + targets: A tensor, inputs ids to the decoder. [batch_size, 1]. + i: An integer, Step number of the decoding loop. + + Returns: + A tensor, processed targets [batch_size, 1, hidden_dim]. + """ + # _shard_features called to ensure that the variable names match + targets = self._shard_features({"targets": targets})["targets"] + modality_name = hparams.name.get( + "targets", + modalities.get_name(target_modality))(hparams, target_vocab_size) + with tf.variable_scope(modality_name): + bottom = hparams.bottom.get( + "targets", modalities.get_targets_bottom(target_modality)) + targets = dp(bottom, targets, hparams, target_vocab_size)[0] + targets = common_layers.flatten4d3d(targets) + + # GO embeddings are all zero, this is because transformer_prepare_decoder + # Shifts the targets along by one for the input which pads with zeros. + # If the modality already maps GO to the zero embeddings this is not + # needed. + targets = tf.cond( + tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets) + + if positional_encoding is not None: + targets += tf.gather(positional_encoding, [i], axis=1) + return targets + + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle(decode_length)) + if hparams.proximity_bias: + decoder_self_attention_bias += common_attention.attention_bias_proximal( + decode_length) + + def symbols_to_logits_tpu_fn(ids, i, cache): + """Go from ids to logits for next symbol on TPU. + + Args: + ids: A tensor, symbol IDs. + i: An integer, step number of the decoding loop. Only used for inference + on TPU. + cache: A dict, containing tensors which are the results of previous + attentions, used for fast decoding. + + Returns: + ret: A tensor, computed logits. + cache: A dict, containing tensors which are the results of previous + attentions, used for fast decoding. + """ + ids = ids[:, -1:] + targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3) + targets = preprocess_targets(targets, i) + bias = tf.gather(decoder_self_attention_bias, [i], axis=2) + + with tf.variable_scope("body"): + body_outputs = dp( + self.decode, + targets, + cache.get("encoder_output"), + cache.get("encoder_decoder_attention_bias"), + bias, + hparams, + cache, + i, + nonpadding=features_to_nonpadding(features, "targets")) + modality_name = hparams.name.get( + "targets", + modalities.get_name(target_modality))(hparams, target_vocab_size) + with tf.variable_scope(modality_name): + top = hparams.top.get("targets", + modalities.get_top(target_modality)) + logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0] + + ret = tf.squeeze(logits, axis=[1, 2, 3]) + if partial_targets is not None: + # If the position is within the given partial targets, we alter the + # logits to always return those values. + # A faster approach would be to process the partial targets in one + # iteration in order to fill the corresponding parts of the cache. + # This would require broader changes, though. + vocab_size = tf.shape(ret)[1] + + def forced_logits(): + return tf.one_hot( + tf.tile( + tf.slice(partial_targets, [0, i], + [partial_targets.shape.as_list()[0], 1]), + [beam_size]), vocab_size, 0.0, -1e9) + + ret = tf.cond( + tf.less(i, partial_targets_length), forced_logits, lambda: ret) + return ret, cache + + eos_id = self.get_decode_end_id() or beam_search.EOS_ID + temperature = features.get("sampling_temp", + getattr(hparams, "sampling_temp", 0.0)) + top_k = features.get("sampling_keep_top_k", + getattr(hparams, "sampling_keep_top_k", -1)) + + ret = fast_decode_tpu( + encoder_output=encoder_output, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + symbols_to_logits_fn=symbols_to_logits_tpu_fn, + hparams=hparams, + decode_length=decode_length, + vocab_size=target_vocab_size, + init_cache_fn=self._init_cache_fn, + beam_size=beam_size, + top_beams=top_beams, + alpha=alpha, + batch_size=batch_size, + force_decode_length=self._decode_hparams.force_decode_length, + eos_id=eos_id, + sampling_temperature=temperature, + top_k=top_k, + use_top_k_with_unique=not tf.flags.FLAGS.use_fast_inference) + if partial_targets is not None: + if beam_size <= 1 or top_beams <= 1: + ret["outputs"] = ret["outputs"][:, partial_targets_length:] + else: + ret["outputs"] = ret["outputs"][:, :, partial_targets_length:] + return ret + + def get_decode_start_id(self): + """Returns the id of the first decoder input symbol. + + The default case maps None to a vector of 0's for transformer. This method + can be overridden to return a different id by a model wanting to use a + different decoder start symbol. The id returned by this method is used to + index the embedding matrix, and retrieve the vector that will be used as the + first input to the decoder + """ + return None + + def get_decode_end_id(self): + """Returns the id of the output symbol that terminates decoding. + + This method can be overridden by a different model. The id returned by this + method is used to check if the generation is complete during decoding. + """ + return None + + def _fast_decode(self, + features, + decode_length, + beam_size=1, + top_beams=1, + alpha=1.0, + preprocess_targets_method=None): + """Fast decoding. + + Implements both greedy and beam search decoding, uses beam search iff + beam_size > 1, otherwise beam search related arguments are ignored. + + Args: + features: a map of string to model features. + decode_length: an integer. How many additional timesteps to decode. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + preprocess_targets_method: method used to preprocess targets. If None, + uses method "preprocess_targets" defined inside this method. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + + Raises: + NotImplementedError: If there are multiple data shards. + """ + if self._num_datashards != 1: + raise NotImplementedError("Fast decoding only supports a single shard.") + dp = self._data_parallelism + hparams = self._hparams + target_modality = self._problem_hparams.modality["targets"] + target_vocab_size = self._problem_hparams.vocab_size["targets"] + if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"): + target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor + if "targets_segmentation" in features: + raise NotImplementedError( + "Decoding not supported on packed datasets " + " If you want to decode from a dataset, use the non-packed version" + " of the dataset when decoding.") + if self.has_input: + inputs_shape = common_layers.shape_list(features["inputs"]) + if (self._problem_hparams.get("regression_targets")): + decode_length = 1 + else: + decode_length = ( + inputs_shape[1] + features.get("decode_length", decode_length)) + batch_size = inputs_shape[0] + inputs = self._prepare_inputs_for_decode(features) + with tf.variable_scope("body"): + encoder_output, encoder_decoder_attention_bias = dp( + self.encode, + inputs, + features["target_space_id"], + hparams, + features=features) + encoder_output = encoder_output[0] + encoder_decoder_attention_bias = encoder_decoder_attention_bias[0] + partial_targets = features.get("partial_targets") + else: + # The problem has no inputs. + encoder_output = None + encoder_decoder_attention_bias = None + + # Prepare partial targets. + # In either features["inputs"] or features["targets"]. + # We force the outputs to begin with these sequences. + partial_targets = features.get("inputs") + if partial_targets is None: + partial_targets = features["targets"] + assert partial_targets is not None + + if partial_targets is not None: + partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2) + partial_targets = tf.to_int64(partial_targets) + partial_targets_shape = common_layers.shape_list(partial_targets) + partial_targets_length = partial_targets_shape[1] + decode_length = ( + partial_targets_length + features.get("decode_length", decode_length)) + batch_size = partial_targets_shape[0] + + if hparams.pos == "timing": + positional_encoding = common_attention.get_timing_signal_1d( + decode_length + 1, hparams.hidden_size) + elif hparams.pos == "timing_from_features": + positional_encoding = common_attention.add_timing_signals_from_features( + tf.zeros([1, decode_length, hparams.hidden_size]), features, + hparams.position_features) + elif hparams.pos == "emb": + positional_encoding = common_attention.add_positional_embedding( + tf.zeros([1, decode_length, hparams.hidden_size]), hparams.max_length, + "body/targets_positional_embedding", None) + else: + positional_encoding = None + + def preprocess_targets(targets, i): + """Performs preprocessing steps on the targets to prepare for the decoder. + + This includes: + - Embedding the ids. + - Flattening to 3D tensor. + - Optionally adding timing signals. + + Args: + targets: inputs ids to the decoder. [batch_size, 1] + i: scalar, Step number of the decoding loop. + + Returns: + Processed targets [batch_size, 1, hidden_dim] + """ + # _shard_features called to ensure that the variable names match + targets = self._shard_features({"targets": targets})["targets"] + modality_name = hparams.name.get( + "targets", + modalities.get_name(target_modality))(hparams, target_vocab_size) + with tf.variable_scope(modality_name): + bottom = hparams.bottom.get( + "targets", modalities.get_targets_bottom(target_modality)) + targets = dp(bottom, targets, hparams, target_vocab_size)[0] + targets = common_layers.flatten4d3d(targets) + + # GO embeddings are all zero, this is because transformer_prepare_decoder + # Shifts the targets along by one for the input which pads with zeros. + # If the modality already maps GO to the zero embeddings this is not + # needed. + if not self.get_decode_start_id(): + targets = tf.cond( + tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets) + + if positional_encoding is not None: + targets += positional_encoding[:, i:i + 1] + return targets + + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle(decode_length)) + if hparams.proximity_bias: + decoder_self_attention_bias += common_attention.attention_bias_proximal( + decode_length) + + # Create tensors for encoder-decoder attention history + att_cache = {"attention_history": {}} + num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers + if encoder_output is not None: + att_batch_size, enc_seq_length = common_layers.shape_list( + encoder_output)[0:2] + for layer in range(num_layers): + att_cache["attention_history"]["layer_%d" % layer] = tf.zeros( + [att_batch_size, hparams.num_heads, 0, enc_seq_length]) + + def update_decoder_attention_history(cache): + """Save attention weights in cache, e.g., for vizualization.""" + for k in [x for x in self.attention_weights + if "decoder" in x and "self" not in x and "logits" not in x]: + idx = k.find("layer_") + if idx < 0: + continue + # Get layer number from the string name. + layer_nbr = k[idx + 6:] + idx = 0 + while idx + 1 < len(layer_nbr) and layer_nbr[:idx + 1].isdigit(): + idx += 1 + layer_nbr = "layer_%d" % int(layer_nbr[:idx]) + if layer_nbr in cache["attention_history"]: + cache["attention_history"][layer_nbr] = tf.concat( + [cache["attention_history"][layer_nbr], + self.attention_weights[k]], + axis=2) + if not preprocess_targets_method: + preprocess_targets_method = preprocess_targets + + def symbols_to_logits_fn(ids, i, cache): + """Go from ids to logits for next symbol.""" + ids = ids[:, -1:] + targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3) + targets = preprocess_targets_method(targets, i) + + bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1] + with tf.variable_scope("body"): + body_outputs = dp( + self.decode, + targets, + cache.get("encoder_output"), + cache.get("encoder_decoder_attention_bias"), + bias, + hparams, + cache, + nonpadding=features_to_nonpadding(features, "targets")) + + update_decoder_attention_history(cache) + + modality_name = hparams.name.get( + "targets", + modalities.get_name(target_modality))(hparams, target_vocab_size) + with tf.variable_scope(modality_name): + top = hparams.top.get("targets", modalities.get_top(target_modality)) + logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0] + + ret = tf.squeeze(logits, axis=[1, 2, 3]) + if partial_targets is not None: + # If the position is within the given partial targets, we alter the + # logits to always return those values. + # A faster approach would be to process the partial targets in one + # iteration in order to fill the corresponding parts of the cache. + # This would require broader changes, though. + vocab_size = tf.shape(ret)[1] + + def forced_logits(): + return tf.one_hot( + tf.tile(partial_targets[:, i], [beam_size]), vocab_size, 0.0, + -1e9) + + ret = tf.cond( + tf.less(i, partial_targets_length), forced_logits, lambda: ret) + return ret, cache + + sos_id = self.get_decode_start_id() or 0 + eos_id = self.get_decode_end_id() or beam_search.EOS_ID + temperature = features.get("sampling_temp", + getattr(hparams, "sampling_temp", 0.0)) + top_k = features.get("sampling_keep_top_k", + getattr(hparams, "sampling_keep_top_k", -1)) + + ret = fast_decode( + encoder_output=encoder_output, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + symbols_to_logits_fn=symbols_to_logits_fn, + hparams=hparams, + decode_length=decode_length, + vocab_size=target_vocab_size, + init_cache_fn=self._init_cache_fn, + beam_size=beam_size, + top_beams=top_beams, + alpha=alpha, + batch_size=batch_size, + force_decode_length=self._decode_hparams.force_decode_length, + sos_id=sos_id, + eos_id=eos_id, + sampling_temperature=temperature, + top_k=top_k, + cache=att_cache) + if partial_targets is not None: + if beam_size <= 1 or top_beams <= 1: + ret["outputs"] = ret["outputs"][:, partial_targets_length:] + else: + ret["outputs"] = ret["outputs"][:, :, partial_targets_length:] + return ret + + +def _init_transformer_cache(cache, hparams, batch_size, attention_init_length, + encoder_output, encoder_decoder_attention_bias, + scope_prefix): + """Create the initial cache for Transformer fast decoding.""" + key_channels = hparams.attention_key_channels or hparams.hidden_size + value_channels = hparams.attention_value_channels or hparams.hidden_size + num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers + vars_3d_num_heads = ( + hparams.num_heads if hparams.get("attention_variables_3d") else 0) + + if cache is None: + cache = {} + cache.update({ + "layer_%d" % layer: { # pylint: disable=g-complex-comprehension + "k": + common_attention.split_heads( + tf.zeros([batch_size, + attention_init_length, + key_channels]), hparams.num_heads), + "v": + common_attention.split_heads( + tf.zeros([batch_size, + attention_init_length, + value_channels]), hparams.num_heads), + } for layer in range(num_layers) + }) + + # If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the + # cache key "f" won't be used, which means that the` shape of cache["f"]` + # won't be changed to + # `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause + # error when applying `nest.map reshape function` on it. + if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]: + for layer in range(num_layers): + cache["layer_%d" % layer]["f"] = tf.zeros( + [batch_size, 0, hparams.hidden_size]) + + if encoder_output is not None: + for layer in range(num_layers): + layer_name = "layer_%d" % layer + with tf.variable_scope( + "%sdecoder/%s/encdec_attention/multihead_attention" % + (scope_prefix, layer_name)): + k_encdec = common_attention.compute_attention_component( + encoder_output, + key_channels, + name="k", + vars_3d_num_heads=vars_3d_num_heads) + k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads) + v_encdec = common_attention.compute_attention_component( + encoder_output, + value_channels, + name="v", + vars_3d_num_heads=vars_3d_num_heads) + v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads) + cache[layer_name]["k_encdec"] = k_encdec + cache[layer_name]["v_encdec"] = v_encdec + + cache["encoder_output"] = encoder_output + cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias + return cache + + +def fast_decode_tpu(encoder_output, + encoder_decoder_attention_bias, + symbols_to_logits_fn, + hparams, + decode_length, + vocab_size, + init_cache_fn=_init_transformer_cache, + beam_size=1, + top_beams=1, + alpha=1.0, + sos_id=0, + eos_id=beam_search.EOS_ID, + batch_size=None, + force_decode_length=False, + scope_prefix="body/", + use_top_k_with_unique=True, + sampling_temperature=0.0, + top_k=-1): + """Given encoder output and a symbols to logits function, does fast decoding. + + Implements both greedy and beam search decoding for TPU, uses beam search iff + beam_size > 1, otherwise beam search related arguments are ignored. + + Args: + encoder_output: A tensor, output from encoder. + encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder + attention. + symbols_to_logits_fn: Incremental decoding, function mapping triple `(ids, + step, cache)` to symbol logits. + hparams: Run hyperparameters. + decode_length: An integer, how many additional timesteps to decode. + vocab_size: Output vocabulary size. + init_cache_fn: Function that returns the initial cache dict. + beam_size: An integer, number of beams. + top_beams: An integer, how many of the beams to return. + alpha: A float that controls the length penalty. Larger the alpha, stronger + the preference for longer translations. + sos_id: Start-of-sequence symbol. + eos_id: End-of-sequence symbol. + batch_size: An integer, must be passed if there is no input. + force_decode_length: A bool, whether to force the full decode length, or if + False, stop when all beams hit eos_id. + scope_prefix: str, prefix for decoder layer variable scopes. + use_top_k_with_unique: bool, whether to use a fast (but decreased precision) + top_k during beam search. + sampling_temperature: scalar, temperature with which to sample. + top_k: scalar, sample only top k. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if top_beams == 1 or + [batch_size, top_beams, <= decode_length] otherwise + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + }. + + Raises: + NotImplementedError: If beam size > 1 with partial targets. + """ + if encoder_output is not None: + batch_size = common_layers.shape_list(encoder_output)[0] + + cache = init_cache_fn(None, hparams, batch_size, decode_length, + encoder_output, encoder_decoder_attention_bias, + scope_prefix) + + if beam_size > 1: # Beam Search + initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32) + decoded_ids, scores, _ = beam_search.beam_search( + symbols_to_logits_fn, + initial_ids, + beam_size, + decode_length, + vocab_size, + alpha, + states=cache, + eos_id=eos_id, + stop_early=(top_beams == 1), + use_tpu=True, + use_top_k_with_unique=use_top_k_with_unique) + + if top_beams == 1: + decoded_ids = decoded_ids[:, 0, 1:] + scores = scores[:, 0] + else: + decoded_ids = decoded_ids[:, :top_beams, 1:] + scores = scores[:, :top_beams] + else: # Greedy + + def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob): + """One step of greedy decoding.""" + logits, cache = symbols_to_logits_fn(next_id, i, cache) + log_probs = common_layers.log_prob_from_logits(logits) + temperature = sampling_temperature + if hparams.sampling_method == "random_per_example": + next_id = common_layers.sample_temperature_per_example( + logits, temperature, top_k) + else: + if hparams.sampling_method == "argmax": + temperature = 0.0 + next_id = common_layers.sample_with_temperature(logits, temperature, + top_k) + + log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id], + axis=1) + log_prob += tf.gather_nd( + log_probs, log_prob_indices) * (1 - tf.cast(hit_eos, tf.float32)) + # Note(thangluong): we purposely update hit_eos after aggregating log_prob + # There is a subtle detail here that we want to include log_probs up to + # (and inclusive of) the first eos generated, but not subsequent tokens. + hit_eos |= tf.equal(next_id, eos_id) + + next_id = tf.expand_dims(next_id, axis=1) + decoded_ids = tf.transpose(decoded_ids) + decoded_ids = inplace_ops.alias_inplace_update( + decoded_ids, i, tf.squeeze(next_id, axis=1)) + decoded_ids = tf.transpose(decoded_ids) + return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob + + def is_not_finished(i, hit_eos, *_): + finished = i >= decode_length + if not force_decode_length: + finished |= tf.reduce_all(hit_eos) + return tf.logical_not(finished) + + decoded_ids = tf.zeros([batch_size, decode_length], dtype=tf.int64) + hit_eos = tf.fill([batch_size], False) + next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64) + initial_log_prob = tf.zeros([batch_size], dtype=tf.float32) + + def compute_cache_shape_invariants(tensor): + return tf.TensorShape(tensor.shape.as_list()) + + _, _, _, decoded_ids, _, log_prob = tf.while_loop( + is_not_finished, + inner_loop, [ + tf.constant(0), hit_eos, next_id, decoded_ids, cache, + initial_log_prob + ], + shape_invariants=[ + tf.TensorShape([]), + tf.TensorShape([batch_size]), + tf.TensorShape([batch_size, 1]), + tf.TensorShape([batch_size, decode_length]), + nest.map_structure(compute_cache_shape_invariants, cache), + tf.TensorShape([batch_size]), + ]) + scores = log_prob + + return {"outputs": decoded_ids, "scores": scores} + + +def fast_decode(encoder_output, + encoder_decoder_attention_bias, + symbols_to_logits_fn, + hparams, + decode_length, + vocab_size, + init_cache_fn=_init_transformer_cache, + beam_size=1, + top_beams=1, + alpha=1.0, + sos_id=0, + eos_id=beam_search.EOS_ID, + batch_size=None, + force_decode_length=False, + scope_prefix="body/", + sampling_temperature=0.0, + top_k=-1, + cache=None): + """Given encoder output and a symbols to logits function, does fast decoding. + + Implements both greedy and beam search decoding, uses beam search iff + beam_size > 1, otherwise beam search related arguments are ignored. + + Args: + encoder_output: Output from encoder. + encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder + attention + symbols_to_logits_fn: Incremental decoding; function mapping triple `(ids, + step, cache)` to symbol logits. + hparams: run hyperparameters + decode_length: an integer. How many additional timesteps to decode. + vocab_size: Output vocabulary size. + init_cache_fn: Function that returns the initial cache dict. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + sos_id: End-of-sequence symbol in beam search. + eos_id: End-of-sequence symbol in beam search. + batch_size: an integer scalar - must be passed if there is no input + force_decode_length: bool, whether to force the full decode length, or if + False, stop when all beams hit eos_id. + scope_prefix: str, prefix for decoder layer variable scopes. + sampling_temperature: scalar, temperature with which to sample. + top_k: scalar, sample only top k. + cache: cache dictionary for additional predictions. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if top_beams == 1 or + [batch_size, top_beams, <= decode_length] otherwise + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + """ + if encoder_output is not None: + batch_size = common_layers.shape_list(encoder_output)[0] + + cache = init_cache_fn( + cache=cache, + hparams=hparams, + batch_size=batch_size, + attention_init_length=0, + encoder_output=encoder_output, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + scope_prefix=scope_prefix) + + if beam_size > 1: # Beam Search + initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32) + decoded_ids, scores, cache = beam_search.beam_search( + symbols_to_logits_fn, + initial_ids, + beam_size, + decode_length, + vocab_size, + alpha, + states=cache, + eos_id=eos_id, + stop_early=(top_beams == 1)) + + if top_beams == 1: + decoded_ids = decoded_ids[:, 0, 1:] + scores = scores[:, 0] + else: + decoded_ids = decoded_ids[:, :top_beams, 1:] + scores = scores[:, :top_beams] + else: # Greedy + + def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob): + """One step of greedy decoding.""" + logits, cache = symbols_to_logits_fn(next_id, i, cache) + log_probs = common_layers.log_prob_from_logits(logits) + temperature = sampling_temperature + if hparams.sampling_method == "random_per_example": + next_id = common_layers.sample_temperature_per_example( + logits, temperature, top_k) + else: + if hparams.sampling_method == "argmax": + temperature = 0.0 + next_id = common_layers.sample_with_temperature(logits, temperature, + top_k) + + log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id], + axis=1) + log_prob += tf.gather_nd( + log_probs, log_prob_indices) * (1 - tf.cast(hit_eos, tf.float32)) + # Note(thangluong): we purposely update hit_eos after aggregating log_prob + # There is a subtle detail here that we want to include log_probs up to + # (and inclusive of) the first eos generated, but not subsequent tokens. + hit_eos |= tf.equal(next_id, eos_id) + + next_id = tf.expand_dims(next_id, axis=1) + decoded_ids = tf.concat([decoded_ids, next_id], axis=1) + + return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob + + def is_not_finished(i, hit_eos, *_): + finished = i >= decode_length + if not force_decode_length: + finished |= tf.reduce_all(hit_eos) + return tf.logical_not(finished) + + decoded_ids = tf.zeros([batch_size, 0], dtype=tf.int64) + hit_eos = tf.fill([batch_size], False) + next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64) + initial_log_prob = tf.zeros([batch_size], dtype=tf.float32) + _, _, _, decoded_ids, cache, log_prob = tf.while_loop( + is_not_finished, + inner_loop, [ + tf.constant(0), hit_eos, next_id, decoded_ids, cache, + initial_log_prob + ], + shape_invariants=[ + tf.TensorShape([]), + tf.TensorShape([None]), + tf.TensorShape([None, None]), + tf.TensorShape([None, None]), + nest.map_structure(beam_search.get_state_shape_invariants, cache), + tf.TensorShape([None]), + ]) + scores = log_prob + + return {"outputs": decoded_ids, "scores": scores, "cache": cache} + + +@registry.register_model +class TransformerScorer(Transformer): + """Transformer model, but only scores in PREDICT mode. + + Checkpoints between Transformer and TransformerScorer are interchangeable. + """ + + def __init__(self, *args, **kwargs): + super(TransformerScorer, self).__init__(*args, **kwargs) + self._name = "transformer" + self._base_name = "transformer" + + def infer(self, + features=None, + decode_length=50, + beam_size=1, + top_beams=1, + alpha=0.0, + use_tpu=False): + """Returns the targets and their log probabilities.""" + del decode_length, beam_size, top_beams, alpha, use_tpu + assert features is not None + + # Run the model + self.hparams.force_full_predict = True + with tf.variable_scope(self.name): + logits, _ = self.model_fn(features) + assert len(logits.shape) == 5 # [batch, time, 1, 1, vocab] + logits = tf.squeeze(logits, [2, 3]) + + # Compute the log probabilities + log_probs = common_layers.log_prob_from_logits(logits) + + targets = features["targets"] + assert len(targets.shape) == 4 # [batch, time, 1, 1] + targets = tf.squeeze(targets, [2, 3]) + + # Slice out the log_probs of the targets + log_probs = common_layers.index_last_dim_with_indices(log_probs, targets) + + # Sum over time to get the log_prob of the sequence + scores = tf.reduce_sum(log_probs, axis=1) + + return {"outputs": targets, "scores": scores} + + +@registry.register_model +class TransformerEncoder(t2t_model.T2TModel): + """Transformer, encoder only.""" + + def body(self, features): + hparams = self._hparams + inputs = features["inputs"] + target_space = features["target_space_id"] + + inputs = common_layers.flatten4d3d(inputs) + + (encoder_input, encoder_self_attention_bias, _) = ( + transformer_prepare_encoder(inputs, target_space, hparams)) + + encoder_input = tf.nn.dropout(encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + encoder_output = transformer_encoder( + encoder_input, + encoder_self_attention_bias, + hparams, + nonpadding=features_to_nonpadding(features, "inputs")) + encoder_output = tf.expand_dims(encoder_output, 2) + + return encoder_output + + +@registry.register_model +class TransformerRegressor(TransformerEncoder): + """Transformer inheriting from Encoder, for the regression problem. + + Final result is a tensor that has a shape of (?, 1, 1, 1). + """ + + def top(self, body_output, features): + """Computes single scalar value from body_output.""" + + with tf.variable_scope("reg_top_ffn"): + x = body_output + x = tf.reduce_mean(x, axis=[1, 2], keepdims=True) + res = tf.layers.dense(x, 1, name="model_top") + return res + + +def features_to_nonpadding(features, inputs_or_targets="inputs"): + key = inputs_or_targets + "_segmentation" + if features and key in features: + return tf.minimum(tf.cast(features[key], tf.float32), 1.0) + return None + + +def transformer_prepare_decoder(targets, hparams, features=None, pad=None): + """Prepare one shard of the model for the decoder. + + Args: + targets: a Tensor. + hparams: run hyperparameters + features: optionally pass the entire features dictionary as well. This is + needed now for "packed" datasets. + pad: vector to use for padding when shifting targets right + + Returns: + decoder_input: a Tensor, bottom of decoder stack + decoder_self_attention_bias: a bias tensor for use in decoder self-attention + """ + if hparams.causal_decoder_self_attention: + # Causal attention. + if hparams.prepend_mode == "prepend_inputs_full_attention": + decoder_self_attention_bias = ( + common_attention.attention_bias_prepend_inputs_full_attention( + common_attention.embedding_to_padding(targets))) + else: + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle( + common_layers.shape_list(targets)[1])) + else: + # Full attention. + decoder_padding = common_attention.embedding_to_padding(targets) + decoder_self_attention_bias = ( + common_attention.attention_bias_ignore_padding(decoder_padding)) + + if features and "targets_segmentation" in features: + # "Packed" dataset - keep the examples from seeing each other. + targets_segmentation = features["targets_segmentation"] + targets_position = features["targets_position"] + decoder_self_attention_bias += common_attention.attention_bias_same_segment( + targets_segmentation, targets_segmentation) + else: + targets_position = None + if hparams.proximity_bias: + decoder_self_attention_bias += common_attention.attention_bias_proximal( + common_layers.shape_list(targets)[1]) + decoder_input = common_layers.shift_right_3d(targets, pad) + if hparams.pos == "timing": + if targets_position is not None: + decoder_input = common_attention.add_timing_signal_1d_given_position( + decoder_input, targets_position) + else: + decoder_input = common_attention.add_timing_signal_1d(decoder_input) + elif hparams.pos == "timing_from_features": + decoder_input = common_attention.add_timing_signals_from_features( + decoder_input, features, hparams.position_features) + elif hparams.pos == "emb": + decoder_input = common_attention.add_positional_embedding( + decoder_input, hparams.max_length, "targets_positional_embedding", + targets_position) + + if hparams.activation_dtype == "bfloat16": + decoder_self_attention_bias = tf.cast(decoder_self_attention_bias, + tf.bfloat16) + return (decoder_input, decoder_self_attention_bias) + + +def transformer_self_attention_layer(decoder_input, + decoder_self_attention_bias, + layer_idx, + hparams, + encoder_output=None, + encoder_decoder_attention_bias=None, + cache=None, + decode_loop_step=None, + save_weights_to=None, + make_image_summary=False, + layer_collection=None, + recurrent_memory_by_layer=None, + chunk_number=None): + """A single transformer self-attention layer.""" + x = decoder_input + layer = layer_idx + layer_name = "layer_%d" % layer + layer_cache = cache[layer_name] if cache is not None else None + + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + + if recurrent_memory_by_layer is not None: + recurrent_memory = recurrent_memory_by_layer[layer_name] + else: + recurrent_memory = None + + if layer < hparams.get("num_area_layers", 0): + max_area_width = hparams.get("max_area_width", 1) + max_area_height = hparams.get("max_area_height", 1) + memory_height = hparams.get("max_area_height", 1) + else: + max_area_width = 1 + max_area_height = 1 + memory_height = 1 + with tf.variable_scope(layer_name): + with tf.variable_scope("self_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess( + x, hparams, layer_collection=layer_collection), + None, + decoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + cache=layer_cache, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + decode_loop_step=decode_loop_step, + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32"), + layer_collection=layer_collection, + recurrent_memory=recurrent_memory, + chunk_number=chunk_number, + hard_attention_k=hparams.get("hard_attention_k", 0), + gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0), + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height, + area_key_mode=hparams.get("area_key_mode", "none"), + area_value_mode=hparams.get("area_value_mode", "none"), + training=(hparams.get( + "mode", + tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN)) + x = common_layers.layer_postprocess(x, y, hparams) + if encoder_output is not None: + if not isinstance(encoder_output, (list,)): + encoder_output = [encoder_output] + with tf.variable_scope("encdec_attention"): + for enc_output in encoder_output: + y = common_attention.multihead_attention( + common_layers.layer_preprocess( + x, hparams, layer_collection=layer_collection), + enc_output, + encoder_decoder_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + cache=layer_cache, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32"), + layer_collection=layer_collection, + hard_attention_k=hparams.get("hard_attention_k", 0), + gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0), + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height, + area_key_mode=hparams.get("area_key_mode", "none"), + area_value_mode=hparams.get("area_value_mode", "none"), + training=(hparams.get( + "mode", + tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN)) + x = common_layers.layer_postprocess(x, y, hparams) + return x, layer_cache + + +def transformer_decoder_layer(decoder_input, + decoder_self_attention_bias, + layer_idx, + hparams, + encoder_output=None, + encoder_decoder_attention_bias=None, + cache=None, + decode_loop_step=None, + nonpadding=None, + save_weights_to=None, + make_image_summary=False, + losses=None, + layer_collection=None, + recurrent_memory_by_layer=None, + chunk_number=None): + """A single transformer decoder layer.""" + x, layer_cache = transformer_self_attention_layer( + decoder_input=decoder_input, + decoder_self_attention_bias=decoder_self_attention_bias, + layer_idx=layer_idx, + hparams=hparams, + encoder_output=encoder_output, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + cache=cache, + decode_loop_step=decode_loop_step, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + layer_collection=layer_collection, + recurrent_memory_by_layer=recurrent_memory_by_layer, + chunk_number=chunk_number) + + layer = layer_idx + layer_name = "layer_%d" % layer + with tf.variable_scope(layer_name): + with tf.variable_scope("ffn"): + y = transformer_ffn_layer( + common_layers.layer_preprocess( + x, hparams, layer_collection=layer_collection), + hparams, + conv_padding="LEFT", + nonpadding_mask=nonpadding, + losses=losses, + cache=layer_cache, + decode_loop_step=decode_loop_step, + layer_collection=layer_collection) + x = common_layers.layer_postprocess(x, y, hparams) + return x + + +def transformer_decoder(decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + cache=None, + decode_loop_step=None, + name="decoder", + nonpadding=None, + save_weights_to=None, + make_image_summary=False, + losses=None, + layer_collection=None, + recurrent_memory_by_layer=None, + chunk_number=None): + """A stack of transformer layers. + + Args: + decoder_input: a Tensor + encoder_output: a Tensor + decoder_self_attention_bias: bias Tensor for self-attention (see + common_attention.attention_bias()) + encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + cache: dict, containing tensors which are the results of previous + attentions, used for fast decoding. + decode_loop_step: An integer, step number of the decoding loop. Only used + for inference on TPU. + name: a string + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This is used to mask out + padding in convolutional layers. We generally only need this mask for + "packed" datasets, because for ordinary datasets, no padding is ever + followed by nonpadding. + save_weights_to: an optional dictionary to capture attention weights for + visualization; the weights tensor will be appended there under a string + key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + losses: optional list onto which to append extra training losses + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC + optimizer. Default is None. + recurrent_memory_by_layer: Optional dict, mapping layer names to instances + of transformer_memory.RecurrentMemory. Default is None. + chunk_number: an optional integer Tensor with shape [batch] used to operate + the recurrent_memory. + + Returns: + y: a Tensors + """ + x = decoder_input + + with tf.variable_scope(name): + for layer_idx in range(hparams.num_decoder_layers or + hparams.num_hidden_layers): + x = transformer_decoder_layer( + x, + decoder_self_attention_bias, + layer_idx, + hparams, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + encoder_output=encoder_output, + cache=cache, + decode_loop_step=decode_loop_step, + nonpadding=nonpadding, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + losses=losses, + layer_collection=layer_collection, + recurrent_memory_by_layer=recurrent_memory_by_layer, + chunk_number=chunk_number + ) + + # if normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + return common_layers.layer_preprocess( + x, hparams, layer_collection=layer_collection) + + +@registry.register_model +class TransformerMemory(Transformer): + """Transformer language model with memory across chunks.""" + + # TODO(kitaev): consider overriding set_mode to swap out recurrent memory when + # switching between training and evaluation. + + def __init__(self, *args, **kwargs): + super(TransformerMemory, self).__init__(*args, **kwargs) + + hparams = self._hparams + self.recurrent_memory_by_layer = {} + for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): + layer_name = "layer_%d" % layer + if hparams.memory_type == "neural_memory": + memory = transformer_memory.TransformerMemory( + batch_size=int(hparams.batch_size / hparams.max_length), + key_depth=hparams.hidden_size, + val_depth=hparams.hidden_size, + memory_size=hparams.split_targets_chunk_length, + sharpen_factor=1., + name=layer_name + "/recurrent_memory") + elif hparams.memory_type == "transformer_xl": + memory = transformer_memory.RecentTokensMemory( + layer_name + "/recurrent_memory", hparams) + else: + raise ValueError("Unsupported memory type: %s" % hparams.memory_type) + self.recurrent_memory_by_layer[layer_name] = memory + + @property + def has_input(self): + if hasattr(self._hparams, "unconditional") and self._hparams.unconditional: + return False + return super(TransformerMemory, self).has_input + + def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha, + use_tpu=False): + """Overriding beam search because for now only the slow version works with + memory + """ + return self._beam_decode_slow(features, decode_length, beam_size, + top_beams, alpha, use_tpu) + + +@registry.register_hparams +def transformer_base_v1(): + """Set of hyperparameters.""" + hparams = hparam.HParams( + hidden_size=512, + batch_size=4096, + max_length=256, + clip_grad_norm = 0., # i.e. no gradient clipping + optimizer="adam", + optimizer_adam_epsilon=1e-9, + optimizer_adam_beta1=0.9, + optimizer_adam_beta2=0.98, + learning_rate_schedule="legacy", + learning_rate_decay_scheme="noam", + learning_rate=0.1, + learning_rate_warmup_steps=4000, + initializer_gain=1.0, + initializer="uniform_unit_scaling", + num_hidden_layers=6, + weight_decay=0.0, + label_smoothing=0.1, + learning_rate_constant=1.0, + + # decay_steps and decay_staircase for learning_rate_decay_scheme=="exp" + learning_rate_decay_steps=5000, + learning_rate_decay_staircase=False, + learning_rate_minimum=None, + learning_rate_decay_rate=1.0, + learning_rate_cosine_cycle_steps=250000, + + batch_shuffle_size=512, + # If True, then if the features are of variable length, the batch_size is + # used as the actual batch size (and not tokens per batch). + use_fixed_batch_size=False, + kernel_height=3, + kernel_width=1, + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + dropout=0.2, + grad_noise_scale=0.0, + optimizer_momentum_momentum=0.9, + optimizer_momentum_nesterov=False, + # Number of accumulating steps for multi step optimizers. + optimizer_multistep_accumulate_steps=0, + # Loss scaling used. + # Generally only necessary with mixed precision training. + # Mixed precision training only supports exponential scaling currently + # To disable the scaler, see to 0/False + mixed_precision_optimizer_loss_scaler="exponential", + # Determines the initial loss scaling value for mixed precision + mixed_precision_optimizer_init_loss_scale=2**15, + # Whether to zero gradients that were not computed, so that the + # appropriate slots are created. Useful for sharing checkpoints between + # models with different sets of heads. + optimizer_zero_grads=False, + weight_noise=0.0, + + sampling_method="argmax", # "argmax" or "random" + sampling_temp=1.0, # temperature for sampling + sampling_keep_top_k=-1, # If >0, ignore all but the top k logits + # expand the logits a piece at a time - saves memory. + factored_logits=False, + multiply_embedding_mode="sqrt_depth", + # Parameters related to mixtures of experts. + moe_hidden_sizes="2048", # hidden layer sizes (comma-separated) + moe_num_experts=64, # number of experts per layer + moe_k=2, # how many experts to use for each batch element + moe_loss_coef=1e-2, + # Sequences of operations to perform on layer input and layer output. + # Used by common_layers.layer_preprocess, common_layers.layer_postprocess + # Each character represents an operation: + # none: no preprocessing + # d: apply dropout + # n: apply normalization (see norm_type and norm_epsilon) + # a: add layer input (residual connection - only during postprocess) + # The special string "none" is used instead of the empty string + # to indicate no pre/postprocessing, since the empty string causes + # trouble for hyperparameter tuning. + # TODO(noam): The current settings ("", "dan") are the published version + # of the transformer. ("n", "da") seems better for harder-to-learn + # models, so it should probably be the default. + layer_preprocess_sequence="none", + layer_postprocess_sequence="dan", + # dropout rate to use during layer_preprocess and layer_postprocess + layer_prepostprocess_dropout=0.1, + # broadcast dimensions for layer_prepostprocess_dropout + # a comma-separated list of integers. + # see common_layers.dropout_with_broadcast_dims() + # Change this to "1" to save memory. + layer_prepostprocess_dropout_broadcast_dims="", + # dropout some symbols (set them to 0) before embedding. + symbol_dropout=0.0, + # What type of normalization to use + norm_type="layer", # "batch", layer", "noam", "none". + # epsilon parameter to normalization function + norm_epsilon=1e-6, + # pad vocabularies so that this value divides the vocabulary size. + vocab_divisor=1, + # During training, we drop sequences whose inputs and targets are shorter + # than min_length + min_length=0, + # Pack examples on the fly. + pack_dataset=False, + # Use custom ops not included in standard tensorflow. + use_custom_ops=True, + # Split targets on the first axis into chunks of this length. + split_targets_chunk_length=0, + split_targets_max_chunks=100, + split_targets_strided_training=False, + # Maximum length in the smallest length bucket. Setting this + # flag too high will result in wasteful padding of short + # sequences. Due to some (hopefully) temporary hacks in the + # data reading and batching code, setting this flag too low + # results in a very long batch-shuffling queue. + # TODO(noam): change this once the Datasets API changes. + min_length_bucket=8, + # This flag controls the number of length buckets in the data + # reader. The buckets have maximum lengths from + # min_bucket_length to (max_length or batch_size), increasing + # (approximately) by factors of length_bucket_step. + length_bucket_step=1.1, + # If True, run the model autoregressively instead of teacher-forcing + # during eval + eval_run_autoregressive=False, + # (For features with symbol modality) If True, share all of the + # input embeddings, target embeddings, and softmax weights. + shared_embedding_and_softmax_weights=True, + # (For features with symbol modality) If True, share the input embeddings + # and target embeddings. + shared_embedding=False, + # (For features with symbol modality) Number to shard embeddings by. + symbol_modality_num_shards=16, + # Feature transformations are optional dictionaries comprising key-value + # pairs of a feature name (str) and its transformation (function). If not + # specified, T2TModel applies a default transformation according to the + # feature's modality. Bottom is applicable to all features; loss, top, and + # weights_fn are only applicable to target features. + # TODO(trandustin): `name` is an optional hparam for legacy reasons, + # defining variable scope names. Remove this hparam in the future. + bottom={}, + loss={}, + name={}, + top={}, + weights_fn={}, + # The maximum length of "input" sequence. + # Sequences longer than this value will be truncated. 0 or negative values + # mean there is no maximum or truncation. + # You can change this behavior by overriding preprocess_example() method + # in your problem class. + max_input_seq_length=0, + # The maximum length of "target" sequence. + # Sequences longer than this value will be truncated. 0 or negative values + # mean there is no maximum or truncation. + # You can change this behavior by overriding preprocess_example() method + # in your problem class. + max_target_seq_length=0, + # if nonzero, we split the target sequences on example read. + # This is for use with language modeling problems with fixed length + # examples. e.g. The examples may be written with length 65536, but we + # want to split each example into 64 examples of length 1024. + split_to_length=0, + # This flag allows us to optionally treat a seq-to-seq problem + # as a language model. Legal values are: + # + # "none" - Do not prepend the inputs to the targets. + # "prepend_inputs_masked_attention" + # replace "targets" in preprocessing with + # tf.concat([inputs, [0], targets], axis=1) + # i.e. we prepend the inputs to the targets with a single + # padding token in between. Use masked self-attention on the + # entire resulting sequence. During training, we compute losses on + # the combined sequence. During eval, we compute the metrics + # on only the targets portion. + # "prepend_inputs_full_attention" + # similar to the previous option except that each + # position in the inputs portion can see the + # entire inputs portion. This removes the challenge of + # autoregressively predicting the inputs portion. + prepend_mode="none", + # Scheduled sampling is interesting for auto-regressive models. + # It runs an additional step using the generated output as autoregressive + # targets, which can improve the models inference results later. The + # parameter scheduled_sampling_prob determines with what probability + # will such additional step be run. It's turned off (0.0) by default. + # This probability will exponentially warm up for the number of + # steps determined by scheduled_sampling_warmup_steps. + # The tensor used for the n-th pass will consist of outputs from + # the (n-1)-th pass mixed with gold truth, with the proportion of gold + # determined by scheduled_sampling_gold_mixin_prob. Control the number + # of passes with scheduled_sampling_num_passes. + scheduled_sampling_prob=0.0, + scheduled_sampling_method="parallel", # parallel or sequential. + scheduled_sampling_warmup_steps=50000, + scheduled_sampling_gold_mixin_prob=0.5, + scheduled_sampling_num_passes=1, + scheduled_sampling_warmup_schedule="exp", # exp, linear, or sigmoid. + + # This setting controls whether to copy variables around in a daisy chain + # (if true) or leave their placement to TensorFlow. It only affects multi + # device training and mostly should be turned on for performance. One + # exception are recurrent models: with dynamic loops it must be off. + daisy_chain_variables=True, + # If True in PREDICT mode, then last-position-only optimizations are not + # used. + force_full_predict=False, + # Set this for pure model parallelism. There is only one data shard. + no_data_parallelism=False, + # dtype used for activations. - "float32" or "bfloat16" + # activation_dtype="bfloat16" currently only works on TPU. + # It lowers activation-memory usage + # and does not appear to affect quality. + # You can train on TPU with activation_dtype="bfloat16" and evaluate + # on CPU/GPU with activation_dtype="float32" + activation_dtype="float32", + # dtype used for parameters: "float32" or "bfloat16" + # bfloat16 currently only works with optimizer="adafactor". + # The savings in memory allow for training larger models. + # Weights are encoded as (w*128)^8, using pseudostochastic + # roundoff. Initial experiments show that model quality is similar + # to baseline for about 3M training steps, but worse thereafter. + weight_dtype="float32", + # Hyperparameters for relative attention. + # The maximum relative positional distance to learn an embedding for. + max_relative_position=0, + # If heads share the same relative embedding. + heads_share_relative_embedding=False, + # If relative embedding terms are added to values too. + add_relative_to_values=False, + # Pad batch dim of inputs to nearest multiple of batch multiple. + pad_batch=False, + # When true, do not evaluate on the language model data when running the + # multiproblem since it can take a while. If False, set eval_steps to + # something large like 6000 or 10000. + multiproblem_target_eval_only=False, + # Max out the vocab size to a power of 2 for efficiency and to reserve + # extra space in the vocabulary for new task ids and label classes. + multiproblem_vocab_size=-1, + # When using multiproblem with generation tasks, need to truncate the + # inputs and targets manually before concatenating them. + multiproblem_max_input_length=-1, + multiproblem_max_target_length=-1, + # If positive, makes training targets fixed-length in MultiProblem. + multiproblem_fixed_train_length=-1, + # Load weights from a second model. For instance, when using + # pre-trained weights, you might want to initialize the encoder + # and decoder by loading different models. + warm_start_from_second="", + # Area attention hyper parameters + area_value_mode="none", + area_key_mode="none", + # Using area attention for the number of layers from the bottom + num_area_layers=0, + max_area_width=1, + max_area_height=1, + memory_height=1, + # Whether to use GPU automatic mixed precision (via graph rewrite) + gpu_automatic_mixed_precision=False, + ) + + # Add new ones like this. + hparams.add_hparam("filter_size", 2048) + # Layer-related flags. If zero, these fall back on hparams.num_hidden_layers. + hparams.add_hparam("num_encoder_layers", 0) + hparams.add_hparam("num_decoder_layers", 0) + # Attention-related flags. + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + hparams.add_hparam("ffn_layer", "dense_relu_dense") + hparams.add_hparam("parameter_attention_key_channels", 0) + hparams.add_hparam("parameter_attention_value_channels", 0) + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("attention_dropout_broadcast_dims", "") + hparams.add_hparam("relu_dropout", 0.0) + hparams.add_hparam("relu_dropout_broadcast_dims", "") + hparams.add_hparam("pos", "timing") # timing, none + hparams.add_hparam("position_features", "") + hparams.add_hparam("nbr_decoder_problems", 1) + hparams.add_hparam("proximity_bias", False) + hparams.add_hparam("causal_decoder_self_attention", True) + hparams.add_hparam("use_pad_remover", True) + hparams.add_hparam("use_static_shapes", False) + hparams.add_hparam("self_attention_type", "dot_product") + hparams.add_hparam("conv_first_kernel", 3) + hparams.add_hparam("attention_variables_3d", False) + hparams.add_hparam("use_target_space_embedding", True) + # These parameters are only used when ffn_layer=="local_moe_tpu" + hparams.add_hparam("moe_overhead_train", 1.0) + hparams.add_hparam("moe_overhead_eval", 2.0) + hparams.moe_num_experts = 16 + hparams.moe_loss_coef = 1e-3 + # If specified, use this value instead of problem name in metrics.py. + # This is useful for programs that can automatically compare experiments side + # by side based on the same metric names. + hparams.add_hparam("overload_eval_metric_name", "") + # For making a transformer encoder unidirectional by using masked + # attention. + hparams.add_hparam("unidirectional_encoder", False) + # For hard attention. + hparams.add_hparam("hard_attention_k", 0) + hparams.add_hparam("gumbel_noise_weight", 0.0) + return hparams + + +@registry.register_hparams +def transformer_base_v2(): + """Set of hyperparameters.""" + hparams = transformer_base_v1() + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.layer_prepostprocess_dropout = 0.1 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + hparams.learning_rate_warmup_steps = 8000 + hparams.learning_rate = 0.2 + return hparams + + +@registry.register_hparams +def transformer_base_vq_ada_32ex_packed(): + """Set of hyperparameters for lm1b packed following tpu params.""" + hparams = transformer_base_v2() + expert_utils.update_hparams_for_vq_gating(hparams) + hparams.moe_num_experts = 32 + hparams.gating_type = "vq" + # this gives us a batch size of 16 because each seq is len 256 + hparams.batch_size = 5072 + hparams.ffn_layer = "local_moe" + hparams.shared_embedding_and_softmax_weights = False + hparams.learning_rate_warmup_steps = 10000 + # one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128 + hparams.learning_rate_decay_steps = 27200 + hparams.num_heads = 4 + hparams.num_blocks = 1 + hparams.moe_k = 1 + hparams.num_decoder_layers = 6 + hparams.label_smoothing = 0. + hparams.layer_prepostprocess_dropout = 0.1 + hparams.layer_postprocess_sequence = "dan" + hparams.layer_preprocess_sequence = "none" + hparams.weight_decay = 1e-06 + hparams.attention_dropout = 0.1 + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay" + hparams.activation_dtype = "float32" + hparams.learning_rate = 0.1 + hparams.learning_rate_constant = 1.0 + return hparams + + +@registry.register_hparams +def transformer_topk_16_packed(): + hparams = transformer_base_vq_ada_32ex_packed() + hparams.gating_type = "topk" + hparams.moe_num_experts = 16 + hparams.moe_k = 2 + return hparams + + +@registry.register_hparams +def transformer_base_vq1_16_nb1_packed_nda_b01_scales(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.use_scales = int(True) + hparams.moe_num_experts = 16 + hparams.moe_k = 1 + hparams.beta = 0.1 + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.ema = False + return hparams + + +@registry.register_hparams +def transformer_base_vq1_16_nb1_packed_dan_b01_scales(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.use_scales = int(True) + hparams.moe_num_experts = 16 + hparams.moe_k = 1 + hparams.beta = 0.1 + hparams.ema = False + return hparams + + +@registry.register_hparams +def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog(): + """Set of hyperparameters.""" + hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales() + hparams.batch_size = 2048 + hparams.max_length = 1024 + hparams.filter_size = 3072 + return hparams + + +@registry.register_hparams +def transformer_ada_lmpackedbase(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.ffn_layer = "dense_relu_dense" + return hparams + + +@registry.register_hparams +def transformer_ada_lmpackedbase_dialog(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.max_length = 1024 + hparams.ffn_layer = "dense_relu_dense" + hparams.batch_size = 4096 + return hparams + + +@registry.register_hparams +def transformer_ada_lmpackedbase_relative(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.ffn_layer = "dense_relu_dense" + return hparams + + +@registry.register_hparams +def transformer_base_v3(): + """Base parameters for Transformer model.""" + # Update parameters here, then occasionally cut a versioned set, e.g. + # transformer_base_v2. + hparams = transformer_base_v2() + hparams.optimizer_adam_beta2 = 0.997 + # New way of specifying learning rate schedule. + # Equivalent to previous version. + hparams.learning_rate_schedule = ( + "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size") + hparams.learning_rate_constant = 2.0 + return hparams + +@registry.register_hparams +def transformer_base_hpu(): + """Base parameters for Transformer model running on HPU.""" + hparams = transformer_base_v3() + hparams.batch_size = 4096 + hparams.max_length = 256 + hparams.optimizer = 'true_adam' + hparams.use_pad_remover = False + hparams.use_static_shapes = True + return hparams + +@registry.register_hparams +def transformer_base(): + """Base parameters for Transformer model.""" + hparams = transformer_base_hpu() + return hparams + + +@registry.register_hparams +def transformer_big(): + """HParams for transformer big model on WMT.""" + hparams = transformer_base() + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + # Reduce batch size to 2048 from 4096 to be able to train the model on a GPU + # with 12 GB memory. For example, NVIDIA TITAN V GPU. + hparams.batch_size = 2048 + hparams.num_heads = 16 + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def transformer_big_single_gpu(): + """HParams for transformer big model for single GPU.""" + hparams = transformer_big() + hparams.layer_prepostprocess_dropout = 0.1 + hparams.learning_rate_warmup_steps = 16000 + return hparams + + +@registry.register_hparams +def transformer_base_single_gpu(): + """HParams for transformer base model for single GPU.""" + hparams = transformer_base() + hparams.batch_size = 1024 + hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay" + hparams.learning_rate_constant = 0.1 + hparams.learning_rate_warmup_steps = 16000 + return hparams + + +@registry.register_hparams +def transformer_base_multistep8(): + """HParams for simulating 8 GPUs with MultistepAdam optimizer.""" + hparams = transformer_base() + hparams.optimizer = "multistep_adam" + hparams.optimizer_multistep_accumulate_steps = 8 + return hparams + + +@registry.register_hparams +def transformer_cubbitt(): + """Transformer hyperparameters used in CUBBITT experiments.""" + hparams = transformer_big_single_gpu() + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.batch_size = 2900 + hparams.learning_rate_warmup_steps = 8000 + hparams.max_length = 150 + hparams.layer_prepostprocess_dropout = 0 + hparams.optimizer = "Adafactor" + return hparams + + +@registry.register_hparams +def transformer_parsing_base(): + """HParams for parsing on WSJ only.""" + hparams = transformer_base() + hparams.attention_dropout = 0.2 + hparams.layer_prepostprocess_dropout = 0.2 + hparams.max_length = 512 + hparams.learning_rate_warmup_steps = 16000 + hparams.hidden_size = 1024 + hparams.learning_rate = 0.05 + hparams.shared_embedding_and_softmax_weights = False + return hparams + + +@registry.register_hparams +def transformer_parsing_big(): + """HParams for parsing on WSJ semi-supervised.""" + hparams = transformer_big() + hparams.max_length = 512 + hparams.shared_source_target_embedding = False + hparams.learning_rate_warmup_steps = 4000 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.batch_size = 2048 + hparams.learning_rate = 0.05 + return hparams + + +@registry.register_hparams +def transformer_parsing_ice(): + """HParams for parsing and tagging Icelandic text.""" + hparams = transformer_base_single_gpu() + hparams.batch_size = 4096 + hparams.shared_embedding_and_softmax_weights = False + return hparams + + +@registry.register_hparams +def transformer_tiny(): + hparams = transformer_base() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 128 + hparams.filter_size = 512 + hparams.num_heads = 4 + return hparams + + +@registry.register_hparams +def transformer_test(): + hparams = transformer_base() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 16 + hparams.filter_size = 8 + hparams.num_heads = 2 + return hparams + + +@registry.register_hparams +def transformer_small(): + hparams = transformer_base() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 256 + hparams.filter_size = 1024 + hparams.num_heads = 4 + return hparams + + +@registry.register_hparams +def transformer_l2(): + hparams = transformer_base() + hparams.num_hidden_layers = 2 + return hparams + + +@registry.register_hparams +def transformer_l4(): + hparams = transformer_base() + hparams.num_hidden_layers = 4 + return hparams + + +@registry.register_hparams +def transformer_l8(): + hparams = transformer_base() + hparams.num_hidden_layers = 8 + return hparams + + +@registry.register_hparams +def transformer_l10(): + hparams = transformer_base() + hparams.num_hidden_layers = 10 + return hparams + + +@registry.register_hparams +def transformer_h1(): + hparams = transformer_base() + hparams.num_heads = 1 + return hparams + + +@registry.register_hparams +def transformer_h4(): + hparams = transformer_base() + hparams.num_heads = 4 + return hparams + + +@registry.register_hparams +def transformer_h16(): + hparams = transformer_base() + hparams.num_heads = 16 + return hparams + + +@registry.register_hparams +def transformer_h32(): + hparams = transformer_base() + hparams.num_heads = 32 + return hparams + + +@registry.register_hparams +def transformer_k128(): + hparams = transformer_base() + hparams.attention_key_channels = 128 + return hparams + + +@registry.register_hparams +def transformer_k256(): + hparams = transformer_base() + hparams.attention_key_channels = 256 + return hparams + + +@registry.register_hparams +def transformer_ff1024(): + hparams = transformer_base() + hparams.filter_size = 1024 + return hparams + + +@registry.register_hparams +def transformer_ff4096(): + hparams = transformer_base() + hparams.filter_size = 4096 + return hparams + + +@registry.register_hparams +def transformer_dr0(): + hparams = transformer_base() + hparams.layer_prepostprocess_dropout = 0.0 + return hparams + + +@registry.register_hparams +def transformer_dr2(): + hparams = transformer_base() + hparams.layer_prepostprocess_dropout = 0.2 + return hparams + + +@registry.register_hparams +def transformer_ls0(): + hparams = transformer_base() + hparams.label_smoothing = 0.0 + return hparams + + +@registry.register_hparams +def transformer_ls2(): + hparams = transformer_base() + hparams.label_smoothing = 0.2 + return hparams + + +@registry.register_hparams +def transformer_hs256(): + hparams = transformer_base() + hparams.hidden_size = 256 + return hparams + + +@registry.register_hparams +def transformer_hs1024(): + hparams = transformer_base() + hparams.hidden_size = 1024 + return hparams + + +@registry.register_hparams +def transformer_big_dr1(): + hparams = transformer_base() + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + hparams.num_heads = 16 + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def transformer_big_enfr(): + hparams = transformer_big_dr1() + hparams.shared_embedding_and_softmax_weights = False + hparams.filter_size = 8192 + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def transformer_big_enfr_tpu(): + hparams = transformer_big_enfr() + # For performance, use fewer heads so that matrix dimensions are at least 128 + hparams.num_heads = 8 + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_hparams +def transformer_big_dr2(): + hparams = transformer_big_dr1() + hparams.layer_prepostprocess_dropout = 0.2 + return hparams + + +@registry.register_hparams +def transformer_parameter_attention_a(): + hparams = transformer_base() + hparams.ffn_layer = "parameter_attention" + hparams.filter_size = 1536 + return hparams + + +@registry.register_hparams +def transformer_parameter_attention_b(): + hparams = transformer_base() + hparams.ffn_layer = "parameter_attention" + hparams.filter_size = 512 + hparams.parameter_attention_key_channels = 1024 + hparams.parameter_attention_value_channels = 1024 + hparams.num_heads = 16 + return hparams + + +@registry.register_hparams +def transformer_prepend_v2(): + hparams = transformer_base_v2() + hparams.prepend_mode = "prepend_inputs_masked_attention" + hparams.max_length = 0 + return hparams + + +@registry.register_hparams +def transformer_prepend_v1(): + hparams = transformer_base_v1() + hparams.prepend_mode = "prepend_inputs_masked_attention" + hparams.max_length = 0 + return hparams + + +@registry.register_hparams +def transformer_prepend(): + return transformer_prepend_v2() + + +@registry.register_ranged_hparams +def transformer_base_range(rhp): + """Small range of hyperparameters.""" + # After starting from base, set intervals for some parameters. + rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) + rhp.set_discrete("learning_rate_warmup_steps", + [1000, 2000, 4000, 8000, 16000]) + rhp.set_float("initializer_gain", 0.5, 2.0) + rhp.set_float("optimizer_adam_beta1", 0.85, 0.95) + rhp.set_float("optimizer_adam_beta2", 0.97, 0.99) + rhp.set_float("weight_decay", 0.0, 1e-4) + + +@registry.register_hparams +def transformer_relative(): + """Use relative position embeddings instead of absolute position encodings.""" + hparams = transformer_base() + hparams.pos = None + hparams.self_attention_type = "dot_product_relative" + hparams.max_relative_position = 20 + return hparams + + +@registry.register_hparams +def transformer_relative_tiny(): + hparams = transformer_relative() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 128 + hparams.filter_size = 512 + hparams.num_heads = 4 + return hparams + + +@registry.register_hparams +def transformer_relative_big(): + hparams = transformer_big() + hparams.pos = None + hparams.self_attention_type = "dot_product_relative" + hparams.max_relative_position = 20 + return hparams + + +@registry.register_hparams +def transformer_timeseries(): + hparams = transformer_small() + hparams.batch_size = 256 + hparams.learning_rate_warmup_steps = 2000 + return hparams + + +def update_hparams_for_tpu(hparams): + """Change hparams to be compatible with TPU training.""" + + # Adafactor uses less memory than Adam. + # switch to Adafactor with its recommended learning rate scheme. + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + + # Avoid an expensive concat on TPU. + # >1 shards helps with faster parameter distribution on multi-GPU machines + hparams.symbol_modality_num_shards = 1 + + # Adaptive batch sizes and sequence lengths are not supported on TPU. + # Instead, every batch has the same sequence length and the same batch size. + # Longer sequences are dropped and shorter ones are padded. + # + # It is therefore suggested to use a problem where examples have been combined + # to a longer length, e.g. the "_packed" problems. + # + # For problems with variable sequence lengths, this parameter controls the + # maximum sequence length. Longer sequences are dropped and shorter ones + # are padded. + # + # For problems with fixed sequence lengths - e.g. the "_packed" problems, + # this hyperparameter is ignored. + hparams.max_length = 64 + + # TPUs have less memory than GPUs, so decrease the batch size if it's too high + if hparams.batch_size > 2048: + hparams.batch_size = 2048 + + # Using noise broadcast in the dropout layers saves memory during training. + hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads + hparams.relu_dropout_broadcast_dims = "1" # length + hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length + return hparams + + +@registry.register_hparams +def transformer_tpu(): + """HParams for Transformer model on TPU.""" + hparams = transformer_base() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_hparams +def transformer_timeseries_tpu(): + """HParams for running Transformer model on timeseries on TPU.""" + hparams = transformer_timeseries() + update_hparams_for_tpu(hparams) + hparams.batch_size = 256 # revert to value set in transformer_timeseries + return hparams + + +@registry.register_hparams +def transformer_tpu_bf16_activation(): + """HParams for Transformer model with BF16 activation on TPU.""" + hparams = transformer_tpu() + hparams.activation_dtype = "bfloat16" + return hparams + + +@registry.register_hparams +def transformer_fairseq_fp16_activation_big(): + """Hparams intended to mirror those used in arxiv.org/pdf/1806.00187.pdf.""" + hparams = transformer_big() + hparams.activation_dtype = "float16" + hparams.batch_size = 3584 + return hparams + + +@registry.register_hparams +def transformer_packed_tpu(): + """Deprecated alias for transformer_tpu().""" + return transformer_tpu() + + +@registry.register_hparams +def transformer_big_tpu(): + hparams = transformer_big() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_hparams +def transformer_tiny_tpu(): + hparams = transformer_tiny() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_ranged_hparams +def transformer_tiny_tpu_range(rhp): + """Small range of hyperparameters.""" + rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) + rhp.set_float("weight_decay", 0.0, 2.0) + + +@registry.register_ranged_hparams +def transformer_tpu_range(rhp): + """Small range of hyperparameters.""" + # After starting from base, set intervals for some parameters. + rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) + rhp.set_discrete("learning_rate_warmup_steps", + [1000, 2000, 4000, 8000, 16000]) + rhp.set_float("initializer_gain", 0.5, 2.0) + rhp.set_float("optimizer_adam_beta1", 0.85, 0.95) + rhp.set_float("optimizer_adam_beta2", 0.97, 0.99) + rhp.set_float("weight_decay", 0.0, 2.0) + + +@registry.register_hparams +def transformer_small_tpu(): + """TPU-friendly version of transformer_small. + + Returns: + an hparams object. + """ + hparams = transformer_small() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_hparams +def transformer_clean(): + """No dropout, label smoothing, max_length.""" + hparams = transformer_base_v2() + hparams.label_smoothing = 0.0 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.attention_dropout = 0.0 + hparams.relu_dropout = 0.0 + hparams.max_length = 0 + return hparams + + +@registry.register_hparams +def transformer_clean_big(): + hparams = transformer_clean() + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + return hparams + + +@registry.register_hparams +def transformer_clean_big_tpu(): + hparams = transformer_clean_big() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_hparams +def transformer_tpu_with_conv(): + """Cut down on the number of heads, and use convs instead.""" + hparams = transformer_tpu() + hparams.num_heads = 4 # Heads are expensive on TPUs. + hparams.ffn_layer = "conv_relu_conv" + return hparams + + +@registry.register_hparams +def transformer_lm_tpu_0(): + """HParams for training languagemodel_lm1b8k on tpu. 92M Params.""" + hparams = transformer_clean_big() + update_hparams_for_tpu(hparams) + hparams.num_heads = 4 # Heads are expensive on TPUs. + hparams.batch_size = 4096 + hparams.shared_embedding_and_softmax_weights = False + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def transformer_lm_tpu_1(): + """HParams for training languagemodel_lm1b8k on tpu. 335M Params.""" + hparams = transformer_lm_tpu_0() + hparams.hidden_size = 2048 + hparams.filter_size = 8192 + return hparams + + +@registry.register_hparams +def transformer_supervised_attention(): + """HParams for supervised attention problems.""" + hparams = transformer_base() + # Attention loss type (KL-divergence or MSE). + hparams.add_hparam("expected_attention_loss_type", "kl_divergence") + # Multiplier to the encoder-decoder expected attention loss. + hparams.add_hparam("expected_attention_loss_multiplier", 1.0) + return hparams + + +@registry.register_hparams +def transformer_tpu_1b(): + """Hparams for machine translation with ~1.1B parameters.""" + hparams = transformer_tpu() + hparams.hidden_size = 2048 + hparams.filter_size = 8192 + hparams.num_hidden_layers = 8 + # smaller batch size to avoid OOM + hparams.batch_size = 1024 + hparams.activation_dtype = "bfloat16" + hparams.weight_dtype = "bfloat16" + # maximize number of parameters relative to computation by not sharing. + hparams.shared_embedding_and_softmax_weights = False + return hparams + + +@registry.register_hparams +def transformer_wikitext103_l4k_v0(): + """HParams for training languagemodel_wikitext103_l4k.""" + hparams = transformer_big() + + # Adafactor uses less memory than Adam. + # switch to Adafactor with its recommended learning rate scheme. + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + + hparams.num_heads = 4 + hparams.max_length = 4096 + hparams.batch_size = 4096 + hparams.shared_embedding_and_softmax_weights = False + + hparams.num_hidden_layers = 8 + hparams.attention_dropout = 0.1 + hparams.layer_prepostprocess_dropout = 0.2 + hparams.relu_dropout = 0.1 + hparams.label_smoothing = 0.0 + + # Using noise broadcast in the dropout layers saves memory during training. + hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads + hparams.relu_dropout_broadcast_dims = "1" # length + hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length + + # Avoid an expensive concat on TPU. + # >1 shards helps with faster parameter distribution on multi-GPU machines + hparams.symbol_modality_num_shards = 1 + + return hparams + + +@registry.register_hparams +def transformer_wikitext103_l4k_memory_v0(): + """HParams for training languagemodel_wikitext103_l4k with memory.""" + hparams = transformer_wikitext103_l4k_v0() + + hparams.split_targets_chunk_length = 64 + hparams.split_targets_max_chunks = 64 + hparams.split_targets_strided_training = True + hparams.add_hparam("memory_type", "transformer_xl") + + # The hparams specify batch size *before* chunking, but we want to have a + # consistent 4K batch size *after* chunking to fully utilize the hardware. + target_tokens_per_batch = 4096 + hparams.batch_size = int(target_tokens_per_batch * ( + hparams.max_length / hparams.split_targets_chunk_length)) # 262144 + + hparams.pos = None + hparams.self_attention_type = "dot_product_relative" + hparams.max_relative_position = 2 * hparams.split_targets_chunk_length + + hparams.add_hparam("unconditional", True) + hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess + # By default, cache one chunk only (like Transformer-XL) + hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length) + + return hparams + + +@registry.register_hparams +def transformer_wikitext103_l16k_memory_v0(): + """HParams for training languagemodel_wikitext103_l16k with memory.""" + hparams = transformer_wikitext103_l4k_memory_v0() + + hparams.max_length = 16384 + hparams.split_targets_chunk_length = 64 + hparams.split_targets_max_chunks = int( + hparams.max_length / hparams.split_targets_chunk_length) + + # The hparams specify batch size *before* chunking, but we want to have a + # consistent 4K batch size *after* chunking to fully utilize the hardware. + target_tokens_per_batch = 4096 + hparams.batch_size = int(target_tokens_per_batch * ( + hparams.max_length / hparams.split_targets_chunk_length)) + + hparams.max_relative_position = 2 * hparams.split_targets_chunk_length + + return hparams + + +@registry.register_hparams +def transformer_cifar10_memory_v0(): + """HParams for training image_cifar10_plain_gen_flat_rev with memory.""" + hparams = transformer_wikitext103_l4k_memory_v0() + + hparams.num_hidden_layers = 6 + + hparams.max_length = 32 * 32 * 3 + hparams.split_targets_chunk_length = 64 * 3 + hparams.split_targets_max_chunks = int( + hparams.max_length / hparams.split_targets_chunk_length) + hparams.num_memory_items = 128 * 3 + + # Since this is an image problem, batch size refers to examples (not tokens) + target_images_per_batch = 4 + hparams.batch_size = int(target_images_per_batch * ( + hparams.max_length / hparams.split_targets_chunk_length)) + + # The recurrent memory needs to know the actual batch size (in sequences) + hparams.recurrent_memory_batch_size = hparams.batch_size + + hparams.max_relative_position = ( + hparams.num_memory_items + hparams.split_targets_chunk_length) + + return hparams + + +@registry.register_hparams +def transformer_imagenet64_memory_v0(): + """HParams for training image_imagenet64_gen_flat_rev with memory.""" + hparams = transformer_cifar10_memory_v0() + + hparams.max_length = 64 * 64 * 3 + hparams.split_targets_chunk_length = 64 * 3 + hparams.split_targets_max_chunks = int( + hparams.max_length / hparams.split_targets_chunk_length) + hparams.num_memory_items = 128 * 3 + + # Since this is an image problem, batch size refers to examples (not tokens) + target_images_per_batch = 2 + hparams.batch_size = int(target_images_per_batch * ( + hparams.max_length / hparams.split_targets_chunk_length)) + + # The recurrent memory needs to know the actual batch size (in sequences) + hparams.recurrent_memory_batch_size = hparams.batch_size + + hparams.max_relative_position = 3072 + + return hparams diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/requirements.txt b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..65ada27ecbabb38041cad3eab1ee999949d79b31 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/requirements.txt @@ -0,0 +1,12 @@ +matplotlib==3.6.2 +tensorflow_probability==0.22.1 +six==1.15.0 +git+https://github.com/google-research/tf-slim.git@dbe1d0b0e9d82ecf6307b670f23b45c29b4cb193#egg=tf_slim +absl_py==1.0.0 +requests==2.25.1 +PyYaml==6.0.0 +sacrebleu==1.4.14 +sacremoses==0.0.46 +click==8.0.4 +mpi4py==3.1.3 +numpy==1.23.5 diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/trainer.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..3487e6367cd226b3da7da4dba1c1bacb2a771843 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/trainer.py @@ -0,0 +1,499 @@ +#!/usr/bin/env python3 + +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021-2022 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - renamed t2t_trainer.py to trainer.py +# - updated imports +# - changed default ExportSavedModelApiVersion to V2 +# - removed unused flags +# - removed TPU related code +# - added no_checkpoints, deterministic_dataset, save_summary_steps, use_horovod, +# use_hpu, use_bf16, bf16_config_path flags +# - removed mtf mode handling +# - added support for horovod +# - added disable_v2_behavior and enable_resource_variables calls +# - removed mlperf log +# - removed call to tf.logging.set_verbosity +# - added support for running on GPU through horovod +# - disabled dynamic shapes by default +# - added support for recipe cache +# - added support for fast inference on HPU +# - changed the default value of the log_step_count_steps flag +# - added line tf.get_logger().propagate = False +# - added profile_steps flag +# - turned off Horovod fusion buffer for Gaudi2 +# - enabled Signaling from Graph feature + +"""Train and evaluate.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import os +import sys +import shutil +from TensorFlow.nlp.transformer import models # pylint: disable=unused-import +from TensorFlow.nlp.transformer.utils import problems as problems_lib # pylint: disable=unused-import +from TensorFlow.nlp.transformer.data_generators import problem # pylint: disable=unused-import + +from TensorFlow.nlp.transformer.utils import contrib +from TensorFlow.nlp.transformer.utils import decoding +from TensorFlow.nlp.transformer.utils import flags as t2t_flags # pylint: disable=unused-import +from TensorFlow.nlp.transformer.utils import hparams_lib +from TensorFlow.nlp.transformer.utils import registry +from TensorFlow.nlp.transformer.utils import trainer_lib +from TensorFlow.nlp.transformer.utils import usr_dir +from TensorFlow.nlp.transformer.utils.mpi import MPI_barrier, MPI_is_distributed, MPI_world_rank +import tensorflow.compat.v1 as tf + +tf.get_logger().propagate = False + +flags = tf.flags +FLAGS = flags.FLAGS + +# See utils/flags.py for additional command-line flags. +flags.DEFINE_string("t2t_usr_dir", None, + "Path to a Python module that will be imported. The " + "__init__.py file should include the necessary imports. " + "The imported files should contain registrations, " + "e.g. @registry.register_model calls, that will then be " + "available to the t2t-trainer.") +flags.DEFINE_integer("random_seed", None, "Random seed.") +flags.DEFINE_integer("tpu_num_shards", 8, "Number of tpu shards.") +flags.DEFINE_string("tpu_job_name", None, + "TPU job name. TPUEstimator can auto-infer this but if the " + "configuration is esoteric it should be provided here.") +flags.DEFINE_integer("iterations_per_loop", 100, + "Number of iterations in a TPU training loop.") +flags.DEFINE_bool("use_tpu", False, "Whether to use TPU.") +flags.DEFINE_bool("use_tpu_estimator", False, "Whether to use TPUEstimator. " + "This is always enabled when use_tpu is True.") +flags.DEFINE_integer("export_saved_model_api_version", 2, + "ExportSavedModelApiVersion, 1 V1 or 2 (V2, default). " + "Default V2 uses model_fn_inference_on_tpu for rewrite." + "Flag use_guarantee_const is only enabled in V2.") +flags.DEFINE_bool("use_guarantee_const_getter", False, + "Whether to use GuaranteeConst Ops to mark all weights as " + "constant. It may improve TPU inference performance and " + "reduce HBM arguments usage. Only available when " + "export_saved_model_api_version=2 and use_tpu=True.") +flags.DEFINE_bool("xla_compile", False, + "Whether to use XLA to compile model_fn.") +flags.DEFINE_integer("xla_jit_level", -1, + "GlobalJitLevel to use while compiling the full graph.") +flags.DEFINE_integer("tpu_infeed_sleep_secs", None, + "How long to sleep the infeed thread.") +flags.DEFINE_bool("generate_data", False, "Generate data before training?") +flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen", + "Temporary storage directory, used if --generate_data.") +flags.DEFINE_bool("profile", False, "Profile performance?") +flags.DEFINE_string("profile_steps", None, "When to start and stop profiling") +flags.DEFINE_integer("inter_op_parallelism_threads", 0, + "Number of inter_op_parallelism_threads to use for CPU. " + "See TensorFlow config.proto for details.") +flags.DEFINE_integer("intra_op_parallelism_threads", 0, + "Number of intra_op_parallelism_threads to use for CPU. " + "See TensorFlow config.proto for details.") +# TODO(lukaszkaiser): resolve memory and variable assign issues and set to True. +flags.DEFINE_bool( + "optionally_use_dist_strat", False, + "Whether to use TensorFlow DistributionStrategy instead of explicitly " + "replicating the model. DistributionStrategy is used only if the " + "model replication configuration is supported by the DistributionStrategy.") +# To maintain compatibility with some internal libs, we guard against these flag +# definitions possibly erroring. Apologies for the ugliness. +try: + flags.DEFINE_string("master", "", "Address of TensorFlow master.") + flags.DEFINE_string("output_dir", "", "Base output directory for run.") + flags.DEFINE_string("schedule", "continuous_train_and_eval", + "Method of Experiment to run.") + flags.DEFINE_integer("eval_steps", 100, + "Number of steps in evaluation. By default, eval will " + "stop after eval_steps or when it runs through the eval " + "dataset once in full, whichever comes first, so this " + "can be a very large number.") +except: # pylint: disable=bare-except + pass + +flags.DEFINE_string("std_server_protocol", "grpc", + "Protocol for tf.train.Server.") + +# Hyperparameter tuning on Cloud ML Engine +# Pass an --hparams_range to enable +flags.DEFINE_string("autotune_objective", None, + "TensorBoard metric name to optimize.") +flags.DEFINE_bool("autotune_maximize", True, + "Whether to maximize (vs. minimize) autotune_objective.") +flags.DEFINE_integer("autotune_max_trials", 10, + "Maximum number of tuning experiments to run.") +flags.DEFINE_integer("autotune_parallel_trials", 1, + "How many trials to run in parallel (will spin up this " + "many jobs.") +# Note than in open-source TensorFlow, the dash gets converted to an underscore, +# so access is FLAGS.job_dir. +flags.DEFINE_string("job-dir", None, + "DO NOT USE. Exists only for Cloud ML Engine to pass in " + "during hyperparameter tuning. Overrides --output_dir.") +flags.DEFINE_integer("log_step_count_steps", 50, + "Number of local steps after which progress is printed " + "out") +flags.DEFINE_bool("gpu_automatic_mixed_precision", False, + "Whether to employ GPU automatic mixed precision training " + "(via graph rewrite and dynamic loss scaling).") + +flags.DEFINE_bool("no_checkpoints", False, "If True checkpoints will not be saved") +flags.DEFINE_bool("deterministic_dataset", False, "If True dataset will be deterministic") +flags.DEFINE_integer("save_summary_steps", 100, "How often to save summaries to TensorBoard") +flags.DEFINE_bool("use_horovod", False, "Use Horovod for training") +flags.DEFINE_bool("use_hpu", False, "Use HPU for training") +flags.DEFINE_bool("use_bf16", False, "Use automatic bfloat16 conversion (HPU only)") + +default_bf16_config_path = os.path.normpath( + os.path.join(os.path.realpath(__file__), '..', + 'bf16_config', 'transformer.json')) +flags.DEFINE_string("bf16_config_path", default_bf16_config_path, + "Path to custom mixed precision config (in JSON format).") + +flags.DEFINE_string('recipe_cache', + default='/tmp/transformer_recipe_cache/', + help='Path to recipe cache directory. Set to empty to disable recipe cache. Externally set \'TF_RECIPE_CACHE_PATH\' will override this setting.' + ) + +def set_hparams_from_args(args): + """Set hparams overrides from unparsed args list.""" + if not args: + return + + hp_prefix = "--hp_" + tf.logging.info("Found unparsed command-line arguments. Checking if any " + "start with %s and interpreting those as hparams " + "settings.", hp_prefix) + + pairs = [] + i = 0 + while i < len(args): + arg = args[i] + if arg.startswith(hp_prefix): + pairs.append((arg[len(hp_prefix):], args[i+1])) + i += 2 + else: + tf.logging.warn("Found unknown flag: %s", arg) + i += 1 + + as_hparams = ",".join(["%s=%s" % (key, val) for key, val in pairs]) + if FLAGS.hparams: + as_hparams = "," + as_hparams + FLAGS.hparams += as_hparams + + +def create_hparams(): + """Create hparams.""" + hparams_path = os.path.join(FLAGS.output_dir, "hparams.json") + print(FLAGS.hparams) + return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams, + hparams_path=hparams_path) + + +def create_experiment_fn(): + return trainer_lib.create_experiment_fn( + model_name=FLAGS.model, + problem_name=FLAGS.problem, + data_dir=os.path.expanduser(FLAGS.data_dir), + train_steps=FLAGS.train_steps, + eval_steps=FLAGS.eval_steps, + min_eval_frequency=FLAGS.local_eval_frequency, + schedule=FLAGS.schedule, + eval_throttle_seconds=FLAGS.eval_throttle_seconds, + export=FLAGS.export_saved_model, + decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams), + use_tfdbg=FLAGS.tfdbg, + use_dbgprofile=FLAGS.dbgprofile, + eval_early_stopping_steps=FLAGS.eval_early_stopping_steps, + eval_early_stopping_metric=FLAGS.eval_early_stopping_metric, + eval_early_stopping_metric_delta=FLAGS.eval_early_stopping_metric_delta, + eval_early_stopping_metric_minimize=FLAGS + .eval_early_stopping_metric_minimize, + eval_timeout_mins=FLAGS.eval_timeout_mins, + eval_use_test_set=FLAGS.eval_use_test_set, + use_tpu=FLAGS.use_tpu, + use_tpu_estimator=FLAGS.use_tpu_estimator, + use_xla=FLAGS.xla_compile, + export_saved_model_api_version=FLAGS.export_saved_model_api_version, + use_guarantee_const_getter=FLAGS.use_guarantee_const_getter, + warm_start_from=FLAGS.warm_start_from, + decode_from_file=FLAGS.decode_from_file, + decode_to_file=FLAGS.decode_to_file, + decode_reference=FLAGS.decode_reference, + std_server_protocol=FLAGS.std_server_protocol, + use_horovod=FLAGS.use_horovod, + use_hpu=FLAGS.use_hpu) + + +def create_run_config(hp, output_dir=None): + """Create a run config. + + Args: + hp: model hyperparameters + output_dir: model's output directory, defaults to output_dir flag. + + Returns: + a run config + """ + save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency) + save_ckpt_secs = FLAGS.save_checkpoints_secs or None + if save_ckpt_secs: + save_ckpt_steps = None + assert FLAGS.output_dir + tpu_config_extra_kwargs = {} + if FLAGS.tpu_job_name is not None: + tpu_config_extra_kwargs["tpu_job_name"] = FLAGS.tpu_job_name + + model_dir = output_dir or os.path.expanduser(FLAGS.output_dir) + if FLAGS.use_horovod and model_dir: + model_dir = os.path.join(model_dir, f'worker_{hp.hvd_worker_id}') + + save_checkpoints = save_ckpt_steps + if FLAGS.no_checkpoints or (FLAGS.use_horovod and hp.hvd_worker_id != 0): + save_checkpoints = None + + # the various custom getters we have written do not play well together yet. + # TODO(noam): ask rsepassi for help here. + daisy_chain_variables = ( + hp.daisy_chain_variables and + hp.activation_dtype == "float32" and + hp.weight_dtype == "float32") + return trainer_lib.create_run_config( + model_name=FLAGS.model, + model_dir=model_dir, + master=FLAGS.master, + iterations_per_loop=FLAGS.iterations_per_loop, + num_shards=FLAGS.tpu_num_shards, + log_device_placement=FLAGS.log_device_placement, + save_checkpoints_steps=save_checkpoints, + save_checkpoints_secs=save_ckpt_secs, + keep_checkpoint_max=FLAGS.keep_checkpoint_max, + keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, + num_gpus=FLAGS.worker_gpu, + gpu_order=FLAGS.gpu_order, + num_async_replicas=FLAGS.worker_replicas, + gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction, + enable_graph_rewriter=FLAGS.enable_graph_rewriter, + use_tpu=FLAGS.use_tpu, + use_tpu_estimator=FLAGS.use_tpu_estimator, + xla_jit_level=FLAGS.xla_jit_level, + schedule=FLAGS.schedule, + no_data_parallelism=hp.no_data_parallelism, + optionally_use_dist_strat=FLAGS.optionally_use_dist_strat, + daisy_chain_variables=daisy_chain_variables, + ps_replicas=FLAGS.ps_replicas, + ps_job=FLAGS.ps_job, + ps_gpu=FLAGS.ps_gpu, + sync=FLAGS.sync, + worker_id=FLAGS.worker_id, + worker_job=FLAGS.worker_job, + random_seed=FLAGS.random_seed, + tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs, + inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads, + log_step_count_steps=FLAGS.log_step_count_steps, + intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads, + save_summary_steps=FLAGS.save_summary_steps, + use_hpu=FLAGS.use_hpu) + + +def generate_data(): + # Generate data if requested. + data_dir = os.path.expanduser(FLAGS.data_dir) + tmp_dir = os.path.expanduser(FLAGS.tmp_dir) + tf.gfile.MakeDirs(data_dir) + tf.gfile.MakeDirs(tmp_dir) + + problem_name = FLAGS.problem + tf.logging.info("Generating data for %s" % problem_name) + registry.problem(problem_name).generate_data(data_dir, tmp_dir) + + +@contextlib.contextmanager +def profile_context(): + if FLAGS.profile: + with contrib.tfprof().ProfileContext( + "t2tprof", trace_steps=range(100), dump_steps=range(100)) as pctx: + opts = tf.profiler.ProfileOptionBuilder.time_and_memory() + pctx.add_auto_profiling("op", opts, range(100)) + yield + else: + yield + + +def maybe_log_registry_and_exit(): + if FLAGS.registry_help: + tf.logging.info(registry.help_string()) + sys.exit(0) + + +def is_chief(): + schedules = ["train", "train_and_evaluate", "continuous_train_and_eval"] + return FLAGS.worker_id == 0 and FLAGS.schedule in schedules + + +def save_metadata(hparams): + """Saves FLAGS and hparams to output_dir.""" + output_dir = os.path.expanduser(FLAGS.output_dir) + if not tf.gfile.Exists(output_dir): + tf.gfile.MakeDirs(output_dir) + + # Save FLAGS in txt file + if hasattr(FLAGS, "flags_into_string"): + flags_str = FLAGS.flags_into_string() + t2t_flags_str = "\n".join([ + "--%s=%s" % (f.name, f.value) + for f in FLAGS.flags_by_module_dict()["TensorFlow.nlp.transformer.utils.flags"] + ]) + else: + flags_dict = FLAGS.__dict__["__flags"] + flags_str = "\n".join( + ["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()]) + t2t_flags_str = None + + flags_txt = os.path.join(output_dir, "flags.txt") + with tf.gfile.Open(flags_txt, "w") as f: + f.write(flags_str) + + if t2t_flags_str: + t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt") + with tf.gfile.Open(t2t_flags_txt, "w") as f: + f.write(t2t_flags_str) + + # Save hparams as hparams.json + new_hparams = hparams_lib.copy_hparams(hparams) + # Modality class is not JSON serializable so remove. + new_hparams.del_hparam("modality") + + hparams_fname = os.path.join(output_dir, "hparams.json") + with tf.gfile.Open(hparams_fname, "w") as f: + f.write(new_hparams.to_json(indent=0, sort_keys=True)) + + +def execute_schedule(exp): + if not hasattr(exp, FLAGS.schedule): + raise ValueError( + "Experiment has no method %s, from --schedule" % FLAGS.schedule) + schedule = FLAGS.schedule + if schedule == 'continuous_train_and_eval' and \ + FLAGS.use_horovod and exp._hparams.hvd_worker_id != 0: + schedule = 'train' + with profile_context(): + getattr(exp, schedule)() + + +def run_std_server(): + exp = trainer_lib.T2TExperiment(*([None] * 5)) + exp.run_std_server() + +def prepare_recipe_cache(): + # Handle recipe cache. Skip if externally set or empty. + recipe_cache = FLAGS.recipe_cache + if 'TF_RECIPE_CACHE_PATH' not in os.environ.keys() and recipe_cache: + os.environ['TF_RECIPE_CACHE_PATH'] = recipe_cache + + if not MPI_is_distributed() or MPI_world_rank() == 0: + # Clear previous recipe cache. + if os.path.exists(recipe_cache) and os.path.isdir(recipe_cache): + shutil.rmtree(recipe_cache) + # Other ranks should wait for recipe cache to be removed. + MPI_barrier() + +def init_multinode(): + if FLAGS.use_horovod: + if FLAGS.use_hpu: + from habana_frameworks.tensorflow.habana_device import get_type + if get_type() == 'GAUDI2': + os.environ['HOROVOD_FUSION_THRESHOLD'] = "0" + + import horovod.tensorflow as hvd + hvd.init() + assert hvd.is_initialized() + else: + import horovod.tensorflow as hvd + hvd.init() + assert hvd.size() > 1 + os.environ['CUDA_VISIBLE_DEVICES'] = str(hvd.local_rank()) + return hvd + return None + +def main(argv): + tf.disable_v2_behavior() + tf.enable_resource_variables() + + if FLAGS.use_hpu: + from habana_frameworks.tensorflow import load_habana_module # noqa + load_habana_module() + + hvd = init_multinode() + + if FLAGS.use_hpu: + if FLAGS.recipe_cache: + prepare_recipe_cache() + if FLAGS.use_bf16: + os.environ['TF_BF16_CONVERSION'] = FLAGS.bf16_config_path + dyn_shapes_flag = 'TF_ENABLE_DYNAMIC_SHAPES' + if dyn_shapes_flag not in os.environ: + os.environ[dyn_shapes_flag] = 'false' + os.environ["TF_CLUSTER_VARIABLES"] = "1" + + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + # If we just have to print the registry, do that and exit early. + maybe_log_registry_and_exit() + + # Create HParams. + if argv: + set_hparams_from_args(argv[1:]) + if FLAGS.schedule != "run_std_server": + hparams = create_hparams() + if FLAGS.gpu_automatic_mixed_precision: + setattr(hparams, "gpu_automatic_mixed_precision", True) + if FLAGS.deterministic_dataset: + hparams.add_hparam("deterministic_dataset", True) + + hparams.add_hparam("use_horovod", FLAGS.use_horovod) + hparams.add_hparam("use_hpu", FLAGS.use_hpu) + hparams.add_hparam("profile_steps", FLAGS.profile_steps) + if FLAGS.use_horovod: + hparams.add_hparam("hvd_worker_id", hvd.rank()) + hparams.add_hparam("hvd_size", hvd.size()) + + if FLAGS.schedule == "run_std_server": + run_std_server() + trainer_lib.set_random_seed(FLAGS.random_seed) + + if FLAGS.generate_data: + generate_data() + + exp_fn = create_experiment_fn() + exp = exp_fn(create_run_config(hparams), hparams) + if is_chief(): + save_metadata(hparams) + + from TensorFlow.common.debug import dump_callback + with dump_callback(): + execute_schedule(exp) + +if __name__ == "__main__": + tf.app.run() diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/__init__.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b0a2368a3e35eb4db097267915c30a0dc569953 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/beam_search.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/beam_search.py new file mode 100644 index 0000000000000000000000000000000000000000..e689f1ec29ac8bae3b0c7b588c734227af29d600 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/beam_search.py @@ -0,0 +1,827 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021-2022 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - added 'to_float' helper +# - added support for fast inference on HPU +# - replaced tf.slice with tf.gather +# - replaced tf.python.ops.alias_inplace_update with tf.add + tf.scatter_nd + +"""Implementation of beam search with penalties.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import numpy as np + +from TensorFlow.nlp.transformer.layers import common_layers +import tensorflow.compat.v1 as tf + +from tensorflow.python.ops import inplace_ops +from tensorflow.python.util import nest + +# Assuming EOS_ID is 1 +EOS_ID = 1 +# Default value for INF +INF = 1. * 1e7 + +def to_float(x): + """Cast x to float; created because tf.to_float is deprecated.""" + return tf.cast(x, tf.float32) + +def _merge_beam_dim(tensor): + """Reshapes first two dimensions in to single dimension. + + Args: + tensor: Tensor to reshape of shape [A, B, ...] + + Returns: + Reshaped tensor of shape [A*B, ...] + """ + shape = common_layers.shape_list(tensor) + shape[0] *= shape[1] # batch -> batch * beam_size + shape.pop(1) # Remove beam dim + return tf.reshape(tensor, shape) + + +def _unmerge_beam_dim(tensor, batch_size, beam_size): + """Reshapes first dimension back to [batch_size, beam_size]. + + Args: + tensor: Tensor to reshape of shape [batch_size*beam_size, ...] + batch_size: Tensor, original batch size. + beam_size: int, original beam size. + + Returns: + Reshaped tensor of shape [batch_size, beam_size, ...] + """ + shape = common_layers.shape_list(tensor) + new_shape = [batch_size] + [beam_size] + shape[1:] + return tf.reshape(tensor, new_shape) + + +def _expand_to_beam_size(tensor, beam_size): + """Tiles a given tensor by beam_size. + + Args: + tensor: tensor to tile [batch_size, ...] + beam_size: How much to tile the tensor by. + + Returns: + Tiled tensor [batch_size, beam_size, ...] + """ + tensor = tf.expand_dims(tensor, axis=1) + tile_dims = [1] * tensor.shape.ndims + tile_dims[1] = beam_size + + return tf.tile(tensor, tile_dims) + + +def get_state_shape_invariants(tensor): + """Returns the shape of the tensor but sets middle dims to None.""" + shape = tensor.shape.as_list() + for i in range(1, len(shape) - 1): + shape[i] = None + return tf.TensorShape(shape) + + +def compute_batch_indices(batch_size, beam_size): + """Computes the i'th coordinate that contains the batch index for gathers. + + Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which + batch the beam item is in. This will create the i of the i,j coordinate + needed for the gather. + + Args: + batch_size: Batch size + beam_size: Size of the beam. + Returns: + batch_pos: [batch_size, beam_size] tensor of ids + """ + batch_pos = tf.range(batch_size * beam_size) // beam_size + batch_pos = tf.reshape(batch_pos, [batch_size, beam_size]) + return batch_pos + + +def fast_tpu_gather(params, indices, name=None): + """Fast gather implementation for models running on TPU. + + This function use one_hot and batch matmul to do gather, which is faster + than gather_nd on TPU. For params that have dtype of int32 (sequences to + gather from), batch_gather is used to keep accuracy. + + Args: + params: A tensor from which to gather values. + [batch_size, original_size, ...] + indices: A tensor used as the index to gather values. + [batch_size, selected_size]. + name: A string, name of the operation (optional). + + Returns: + gather_result: A tensor that has the same rank as params. + [batch_size, selected_size, ...] + """ + with tf.name_scope(name): + dtype = params.dtype + + def _gather(params, indices): + """Fast gather using one_hot and batch matmul.""" + if dtype != tf.float32: + params = to_float(params) + shape = common_layers.shape_list(params) + indices_shape = common_layers.shape_list(indices) + ndims = params.shape.ndims + # Adjust the shape of params to match one-hot indices, which is the + # requirement of Batch MatMul. + if ndims == 2: + params = tf.expand_dims(params, axis=-1) + if ndims > 3: + params = tf.reshape(params, [shape[0], shape[1], -1]) + gather_result = tf.matmul( + tf.one_hot(indices, shape[1], dtype=params.dtype), params) + if ndims == 2: + gather_result = tf.squeeze(gather_result, axis=-1) + if ndims > 3: + shape[1] = indices_shape[1] + gather_result = tf.reshape(gather_result, shape) + if dtype != tf.float32: + gather_result = tf.cast(gather_result, dtype) + return gather_result + + # If the dtype is int, use the gather instead of one_hot matmul to avoid + # precision loss. The max int value can be represented by bfloat16 in MXU is + # 256, which is smaller than the possible id values. Encoding/decoding can + # potentially used to make it work, but the benenfit is small right now. + if tf.flags.FLAGS.use_fast_inference or dtype.is_integer: + gather_result = tf.batch_gather(params, indices) + else: + gather_result = _gather(params, indices) + + return gather_result + + +def _create_make_unique(inputs): + """Replaces the lower bits of each element with iota. + + The iota is used to derive the index, and also serves the purpose to + make each element unique to break ties. + + Args: + inputs: A tensor with rank of 2 and dtype of tf.float32. + [batch_size, original_size]. + + Returns: + A tensor after element wise transformation, with dtype the same as inputs. + [batch_size, original_size]. + + Raises: + ValueError: If the rank of the input tensor does not equal 2. + """ + if inputs.shape.ndims != 2: + raise ValueError("Input of top_k_with_unique must be rank-2 " + "but got: %s" % inputs.shape) + + height = inputs.shape[0] + width = inputs.shape[1] + zeros = tf.zeros([height, width], dtype=tf.int32) + + # Count_mask is used to mask away the low order bits to ensure that every + # element is distinct. + log2_ceiling = int(math.ceil(math.log(int(width), 2))) + next_power_of_two = 1 << log2_ceiling + count_mask = ~(next_power_of_two - 1) + count_mask_r0 = tf.constant(count_mask) + count_mask_r2 = tf.fill([height, width], count_mask_r0) + + # Smallest_normal is the bit representation of the smallest positive normal + # floating point number. The sign is zero, exponent is one, and the fraction + # is zero. + smallest_normal = 1 << 23 + smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32) + smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0) + + # Low_bit_mask is used to mask away the sign bit when computing the absolute + # value. + low_bit_mask = ~(1 << 31) + low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32) + low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0) + + iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0), + [height, 1]) + + # Compare the absolute value with positive zero to handle negative zero. + input_r2 = tf.bitcast(inputs, tf.int32) + abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2) + if_zero_r2 = tf.equal(abs_r2, zeros) + smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or( + input_r2, smallest_normal_r2) + input_no_zeros_r2 = tf.where( + if_zero_r2, smallest_normal_preserving_sign_r2, input_r2) + + # Discard the low-order bits and replace with iota. + and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2) + or_r2 = tf.bitwise.bitwise_or(and_r2, iota) + return tf.bitcast(or_r2, tf.float32) + + +def _create_topk_unique(inputs, k): + """Creates the top k values in sorted order with indices. + + Args: + inputs: A tensor with rank of 2. [batch_size, original_size]. + k: An integer, number of top elements to select. + + Returns: + topk_r2: A tensor, the k largest elements. [batch_size, k]. + topk_indices_r2: A tensor, indices of the top k values. [batch_size, k]. + """ + height = inputs.shape[0] + width = inputs.shape[1] + neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) + ones = tf.ones([height, width], dtype=tf.float32) + neg_inf_r2 = ones * neg_inf_r0 + inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) + + # Select the current largest value k times and keep them in topk_r2. The + # selected largest values are marked as the smallest value to avoid being + # selected again. + tmp = inputs + topk_r2 = tf.zeros([height, k], dtype=tf.float32) + for i in range(k): + kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) + k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), + [height, 1]) + topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) + ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) + tmp = tf.where(ge_r2, neg_inf_r2, inputs) + + log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) + next_power_of_two = 1 << log2_ceiling + count_mask = next_power_of_two - 1 + mask_r0 = tf.constant(count_mask) + mask_r2 = tf.fill([height, k], mask_r0) + topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) + topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) + return topk_r2, topk_indices_r2 + + +def top_k_with_unique(inputs, k): + """Finds the values and indices of the k largests entries. + + Instead of doing sort like tf.nn.top_k, this function finds the max value + k times. The running time is proportional to k, which is be faster when k + is small. The current implementation supports only inputs of rank 2. + In addition, iota is used to replace the lower bits of each element, this + makes the selection more stable when there are equal elements. The + overhead is that output values are approximated. + + Args: + inputs: A tensor with rank of 2. [batch_size, original_size]. + k: An integer, number of top elements to select. + + Returns: + top_values: A tensor, the k largest elements in sorted order. + [batch_size, k]. + indices: A tensor, indices of the top_values. [batch_size, k]. + """ + unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32)) + top_values, indices = _create_topk_unique(unique_inputs, k) + top_values = tf.cast(top_values, inputs.dtype) + return top_values, indices + + +def compute_topk_scores_and_seq(sequences, + scores, + scores_to_gather, + flags, + beam_size, + batch_size, + prefix="default", + states_to_gather=None, + use_tpu=False, + use_top_k_with_unique=True): + """Given sequences and scores, will gather the top k=beam size sequences. + + This function is used to grow alive, and finished. It takes sequences, + scores, and flags, and returns the top k from sequences, scores_to_gather, + and flags based on the values in scores. + + This method permits easy introspection using tfdbg. It adds three named ops + that are prefixed by `prefix`: + - _topk_seq: the tensor for topk_seq returned by this method. + - _topk_flags: the tensor for topk_finished_flags returned by this method. + - _topk_scores: the tensor for tokp_gathered_scores returned by this method. + + Args: + sequences: Tensor of sequences that we need to gather from. + [batch_size, beam_size, seq_length] + scores: Tensor of scores for each sequence in sequences. + [batch_size, beam_size]. We will use these to compute the topk. + scores_to_gather: Tensor of scores for each sequence in sequences. + [batch_size, beam_size]. We will return the gathered scores from here. + Scores to gather is different from scores because for grow_alive, we will + need to return log_probs, while for grow_finished, we will need to return + the length penalized scores. + flags: Tensor of bools for sequences that say whether a sequence has reached + EOS or not + beam_size: int + batch_size: int + prefix: string that will prefix unique names for the ops run. + states_to_gather: dict (possibly nested) of decoding states. + use_tpu: A bool, whether to compute topk scores and sequences on TPU. + use_top_k_with_unique: bool, whether to use a fast (but decreased precision) + top_k during TPU beam search. + + Returns: + Tuple of + (topk_seq [batch_size, beam_size, decode_length], + topk_gathered_scores [batch_size, beam_size], + topk_finished_flags[batch_size, beam_size]) + """ + if not tf.flags.FLAGS.use_fast_inference and not use_tpu: + _, topk_indexes = tf.nn.top_k(scores, k=beam_size) + # The next three steps are to create coordinates for tf.gather_nd to pull + # out the topk sequences from sequences based on scores. + # batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which + # batch the beam item is in. This will create the i of the i,j coordinate + # needed for the gather + batch_pos = compute_batch_indices(batch_size, beam_size) + + # top coordinates will give us the actual coordinates to do the gather. + # stacking will create a tensor of dimension batch * beam * 2, where the + # last dimension contains the i,j gathering coordinates. + top_coordinates = tf.stack([batch_pos, topk_indexes], axis=2) + + # Gather up the highest scoring sequences. For each operation added, give + # it a concrete name to simplify observing these operations with tfdbg. + # Clients can capture these tensors by watching these node names. + def gather(tensor, name): + return tf.gather_nd(tensor, top_coordinates, name=(prefix + name)) + topk_seq = gather(sequences, "_topk_seq") + topk_flags = gather(flags, "_topk_flags") + topk_gathered_scores = gather(scores_to_gather, "_topk_scores") + if states_to_gather: + topk_gathered_states = nest.map_structure( + lambda state: gather(state, "_topk_states"), states_to_gather) + else: + topk_gathered_states = states_to_gather + else: + if use_top_k_with_unique: + _, topk_indexes = top_k_with_unique(scores, k=beam_size) + else: + _, topk_indexes = tf.nn.top_k(scores, k=beam_size) + # Gather up the highest scoring sequences. For each operation added, give + # it a concrete name to simplify observing these operations with tfdbg. + # Clients can capture these tensors by watching these node names. + topk_seq = fast_tpu_gather(sequences, topk_indexes, prefix + "_topk_seq") + topk_flags = fast_tpu_gather(flags, topk_indexes, prefix + "_topk_flags") + topk_gathered_scores = fast_tpu_gather(scores_to_gather, topk_indexes, + prefix + "_topk_scores") + if states_to_gather: + topk_gathered_states = nest.map_structure( + # pylint: disable=g-long-lambda + lambda state: fast_tpu_gather(state, topk_indexes, + prefix + "_topk_states"), + states_to_gather) + else: + topk_gathered_states = states_to_gather + return topk_seq, topk_gathered_scores, topk_flags, topk_gathered_states + + +def beam_search(symbols_to_logits_fn, + initial_ids, + beam_size, + decode_length, + vocab_size, + alpha, + states=None, + eos_id=EOS_ID, + stop_early=True, + use_tpu=False, + use_top_k_with_unique=True): + """Beam search with length penalties. + + Requires a function that can take the currently decoded symbols and return + the logits for the next symbol. The implementation is inspired by + https://arxiv.org/abs/1609.08144. + + When running, the beam search steps can be visualized by using tfdbg to watch + the operations generating the output ids for each beam step. These operations + have the pattern: + (alive|finished)_topk_(seq,scores) + + Operations marked `alive` represent the new beam sequences that will be + processed in the next step. Operations marked `finished` represent the + completed beam sequences, which may be padded with 0s if no beams finished. + + Operations marked `seq` store the full beam sequence for the time step. + Operations marked `scores` store the sequence's final log scores. + + The beam search steps will be processed sequentially in order, so when + capturing observed from these operations, tensors, clients can make + assumptions about which step is being recorded. + + WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this + means that the shape of the 2nd dimension of these tensors will not be + available (i.e. set to None) inside symbols_to_logits_fn. + + Args: + symbols_to_logits_fn: Interface to the model, to provide logits. + Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size] + initial_ids: Ids to start off the decoding, this will be the first thing + handed to symbols_to_logits_fn (after expanding to beam size) + [batch_size] + beam_size: Size of the beam. + decode_length: Number of steps to decode for. + vocab_size: Size of the vocab, must equal the size of the logits returned by + symbols_to_logits_fn + alpha: alpha for length penalty. + states: dict (possibly nested) of decoding states. + eos_id: ID for end of sentence. + stop_early: a boolean - stop once best sequence is provably determined. + use_tpu: A bool, whether to do beam search on TPU. + use_top_k_with_unique: bool, whether to use a fast (but decreased precision) + top_k during TPU beam search. + + Returns: + Tuple of + (decoded beams [batch_size, beam_size, decode_length] + decoding probabilities [batch_size, beam_size]) + """ + batch_size = common_layers.shape_list(initial_ids)[0] + + # Assume initial_ids are prob 1.0 + initial_log_probs = tf.constant([[0.] + [-INF] * (beam_size - 1)]) + # Expand to beam_size (batch_size, beam_size) + alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1]) + + # Expand each batch and state to beam_size + alive_seq = _expand_to_beam_size(initial_ids, beam_size) + alive_seq = tf.expand_dims(alive_seq, axis=2) # (batch_size, beam_size, 1) + if tf.flags.FLAGS.use_fast_inference or use_tpu: + alive_seq = tf.tile(alive_seq, [1, 1, decode_length + 1]) + if states: + states = nest.map_structure( + lambda state: _expand_to_beam_size(state, beam_size), states) + else: + states = {} + + # Finished will keep track of all the sequences that have finished so far + # Finished log probs will be negative infinity in the beginning + # finished_flags will keep track of booleans + finished_seq = tf.zeros(common_layers.shape_list(alive_seq), tf.int32) + # Setting the scores of the initial to negative infinity. + finished_scores = tf.ones([batch_size, beam_size]) * -INF + finished_flags = tf.zeros([batch_size, beam_size], tf.bool) + + def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq, + curr_scores, curr_finished): + """Given sequences and scores, will gather the top k=beam size sequences. + + Args: + finished_seq: Current finished sequences. + [batch_size, beam_size, current_decoded_length] + finished_scores: scores for each of these sequences. + [batch_size, beam_size] + finished_flags: finished bools for each of these sequences. + [batch_size, beam_size] + curr_seq: current topk sequence that has been grown by one position. + [batch_size, beam_size, current_decoded_length] + curr_scores: scores for each of these sequences. [batch_size, beam_size] + curr_finished: Finished flags for each of these sequences. + [batch_size, beam_size] + Returns: + Tuple of + (Topk sequences based on scores, + log probs of these sequences, + Finished flags of these sequences) + """ + if not tf.flags.FLAGS.use_fast_inference and not use_tpu: + # First append a column of 0'ids to finished to make the same length with + # finished scores + finished_seq = tf.concat( + [finished_seq, + tf.zeros([batch_size, beam_size, 1], tf.int32)], axis=2) + + # Set the scores of the unfinished seq in curr_seq to large negative + # values + curr_scores += (1. - to_float(curr_finished)) * -INF + # concatenating the sequences and scores along beam axis + curr_finished_seq = tf.concat([finished_seq, curr_seq], axis=1) + curr_finished_scores = tf.concat([finished_scores, curr_scores], axis=1) + curr_finished_flags = tf.concat([finished_flags, curr_finished], axis=1) + return compute_topk_scores_and_seq( + curr_finished_seq, + curr_finished_scores, + curr_finished_scores, + curr_finished_flags, + beam_size, + batch_size, + "grow_finished", + use_tpu=use_tpu, + use_top_k_with_unique=use_top_k_with_unique) + + def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished, states): + """Given sequences and scores, will gather the top k=beam size sequences. + + Args: + curr_seq: current topk sequence that has been grown by one position. + [batch_size, beam_size, i+1] + curr_scores: scores for each of these sequences. [batch_size, beam_size] + curr_log_probs: log probs for each of these sequences. + [batch_size, beam_size] + curr_finished: Finished flags for each of these sequences. + [batch_size, beam_size] + states: dict (possibly nested) of decoding states. + Returns: + Tuple of + (Topk sequences based on scores, + log probs of these sequences, + Finished flags of these sequences) + """ + # Set the scores of the finished seq in curr_seq to large negative + # values + curr_scores += to_float(curr_finished) * -INF + return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs, + curr_finished, beam_size, batch_size, + "grow_alive", states, use_tpu=use_tpu, use_top_k_with_unique=use_top_k_with_unique) + + def grow_topk(i, alive_seq, alive_log_probs, states): + r"""Inner beam search loop. + + This function takes the current alive sequences, and grows them to topk + sequences where k = 2*beam. We use 2*beam because, we could have beam_size + number of sequences that might hit and there will be no alive + sequences to continue. With 2*beam_size, this will not happen. This relies + on the assumption the vocab size is > beam size. If this is true, we'll + have at least beam_size non extensions if we extract the next top + 2*beam words. + Length penalty is given by = (5+len(decode)/6) ^ -\alpha. Pls refer to + https://arxiv.org/abs/1609.08144. + + Args: + i: loop index + alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1] + alive_log_probs: probabilities of these sequences. [batch_size, beam_size] + states: dict (possibly nested) of decoding states. + Returns: + Tuple of + (Topk sequences extended by the next word, + The log probs of these sequences, + The scores with length penalty of these sequences, + Flags indicating which of these sequences have finished decoding, + dict of transformed decoding states) + """ + # Get the logits for all the possible next symbols + if (tf.flags.FLAGS.use_fast_inference or use_tpu) and states: + flat_ids = tf.reshape( + tf.gather(alive_seq, [i], axis=2), [batch_size * beam_size, -1]) + else: + flat_ids = tf.reshape(alive_seq, [batch_size * beam_size, -1]) + + # (batch_size * beam_size, decoded_length) + if states: + flat_states = nest.map_structure(_merge_beam_dim, states) + flat_logits, flat_states = symbols_to_logits_fn(flat_ids, i, flat_states) + states = nest.map_structure( + lambda t: _unmerge_beam_dim(t, batch_size, beam_size), flat_states) + elif tf.flags.FLAGS.use_fast_inference or use_tpu: + flat_logits = symbols_to_logits_fn(flat_ids, i) + else: + flat_logits = symbols_to_logits_fn(flat_ids) + + logits = tf.reshape(flat_logits, [batch_size, beam_size, -1]) + + # Convert logits to normalized log probs + candidate_log_probs = common_layers.log_prob_from_logits(logits) + + # Multiply the probabilities by the current probabilities of the beam. + # (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1) + log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2) + + length_penalty = tf.pow(((5. + to_float(i + 1)) / 6.), alpha) + + curr_scores = log_probs / length_penalty + # Flatten out (beam_size, vocab_size) probs in to a list of possibilities + flat_curr_scores = tf.reshape(curr_scores, [-1, beam_size * vocab_size]) + + if use_tpu and use_top_k_with_unique: + topk_scores, topk_ids = top_k_with_unique( + flat_curr_scores, k=beam_size * 2) + else: + topk_scores, topk_ids = tf.nn.top_k(flat_curr_scores, k=beam_size * 2) + + # Recovering the log probs because we will need to send them back + topk_log_probs = topk_scores * length_penalty + + # Work out what beam the top probs are in. + topk_beam_index = topk_ids // vocab_size + topk_ids %= vocab_size # Unflatten the ids + + if not tf.flags.FLAGS.use_fast_inference and not use_tpu: + # The next three steps are to create coordinates for tf.gather_nd to pull + # out the correct sequences from id's that we need to grow. + # We will also use the coordinates to gather the booleans of the beam + # items that survived. + batch_pos = compute_batch_indices(batch_size, beam_size * 2) + + # top beams will give us the actual coordinates to do the gather. + # stacking will create a tensor of dimension batch * beam * 2, where the + # last dimension contains the i,j gathering coordinates. + topk_coordinates = tf.stack([batch_pos, topk_beam_index], axis=2) + + # Gather up the most probable 2*beams both for the ids and + # finished_in_alive bools + topk_seq = tf.gather_nd(alive_seq, topk_coordinates) + if states: + states = nest.map_structure( + lambda state: tf.gather_nd(state, topk_coordinates), states) + + # Append the most probable alive + topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2) + else: + # Gather up the most probable 2*beams both for the ids and + # finished_in_alive bools + topk_seq = fast_tpu_gather(alive_seq, topk_beam_index) + + if states: + states = nest.map_structure( + lambda state: fast_tpu_gather(state, topk_beam_index), states) + + # Update the most probable alive + topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1]) + # While TPU originally uses alias_inplace_update it's unavailable on HPU. + # Due to a bug in TensorScatterUpdate we're currently using add + scatter_nd + topk_seq = tf.add(topk_seq, tf.scatter_nd([[i + 1]], tf.expand_dims(topk_ids, 0), topk_seq.shape)) + topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0]) + + topk_finished = tf.equal(topk_ids, eos_id) + + return topk_seq, topk_log_probs, topk_scores, topk_finished, states + + def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores, + finished_flags, states): + """Inner beam search loop. + + There are three groups of tensors, alive, finished, and topk. + The alive group contains information about the current alive sequences + The topk group contains information about alive + topk current decoded words + the finished group contains information about finished sentences, that is, + the ones that have decoded to . These are what we return. + The general beam search algorithm is as follows: + While we haven't terminated (pls look at termination condition) + 1. Grow the current alive to get beam*2 topk sequences + 2. Among the topk, keep the top beam_size ones that haven't reached EOS + into alive + 3. Among the topk, keep the top beam_size ones have reached EOS into + finished + Repeat + To make things simple with using fixed size tensors, we will end + up inserting unfinished sequences into finished in the beginning. To stop + that we add -ve INF to the score of the unfinished sequence so that when a + true finished sequence does appear, it will have a higher score than all the + unfinished ones. + + Args: + i: loop index + alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1] + alive_log_probs: probabilities of the beams. [batch_size, beam_size] + finished_seq: Current finished sequences. + [batch_size, beam_size, i+1] + finished_scores: scores for each of these sequences. + [batch_size, beam_size] + finished_flags: finished bools for each of these sequences. + [batch_size, beam_size] + states: dict (possibly nested) of decoding states. + + Returns: + Tuple of + (Incremented loop index + New alive sequences, + Log probs of the alive sequences, + New finished sequences, + Scores of the new finished sequences, + Flags indicating which sequence in finished as reached EOS, + dict of final decoding states) + """ + + # Each inner loop, we carry out three steps: + # 1. Get the current topk items. + # 2. Extract the ones that have finished and haven't finished + # 3. Recompute the contents of finished based on scores. + topk_seq, topk_log_probs, topk_scores, topk_finished, states = grow_topk( + i, alive_seq, alive_log_probs, states) + alive_seq, alive_log_probs, _, states = grow_alive( + topk_seq, topk_scores, topk_log_probs, topk_finished, states) + finished_seq, finished_scores, finished_flags, _ = grow_finished( + finished_seq, finished_scores, finished_flags, topk_seq, topk_scores, + topk_finished) + + return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores, + finished_flags, states) + + def _is_not_finished(i, unused_alive_seq, alive_log_probs, + unused_finished_seq, finished_scores, + unused_finished_in_finished, unused_states): + """Checking termination condition. + + We terminate when we decoded up to decode_length or the lowest scoring item + in finished has a greater score that the highest prob item in alive divided + by the max length penalty + + Args: + i: loop index + alive_log_probs: probabilities of the beams. [batch_size, beam_size] + finished_scores: scores for each of these sequences. + [batch_size, beam_size] + + Returns: + Bool. + """ + max_length_penalty = tf.pow(((5. + to_float(decode_length)) / 6.), alpha) + # The best possible score of the most likely alive sequence. + lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty + + if not stop_early: + # by considering the min score (in the top N beams) we ensure that + # the decoder will keep decoding until there is at least one beam + # (in the top N) that can be improved (w.r.t. the alive beams). + # any unfinished beam will have score -INF - thus the min + # will always be -INF if there is at least one unfinished beam - + # which means the bound_is_met condition cannot be true in this case. + lowest_score_of_finished_in_finished = tf.reduce_min(finished_scores) + else: + # by taking the max score we only care about the first beam; + # as soon as this first beam cannot be beaten from the alive beams + # the beam decoder can stop. + # similarly to the above, if the top beam is not completed, its + # finished_score is -INF, thus it will not activate the + # bound_is_met condition. (i.e., decoder will keep going on). + # note we need to find the max for every sequence eparately - so, we need + # to keep the batch dimension (see axis=1) + lowest_score_of_finished_in_finished = tf.reduce_max(finished_scores, + axis=1) + + bound_is_met = tf.reduce_all( + tf.greater(lowest_score_of_finished_in_finished, + lower_bound_alive_scores)) + + return tf.logical_and( + tf.less(i, decode_length), tf.logical_not(bound_is_met)) + + inner_shape = tf.TensorShape([None, None, None]) + if tf.flags.FLAGS.use_fast_inference or use_tpu: + inner_shape = tf.TensorShape([batch_size, beam_size, decode_length + 1]) + if tf.flags.FLAGS.use_fast_inference or use_tpu: + state_struc = nest.map_structure(lambda state: state.get_shape(), states) + else: + state_struc = nest.map_structure(get_state_shape_invariants, states) + (_, alive_seq, alive_log_probs, finished_seq, finished_scores, + finished_flags, states) = tf.while_loop( + _is_not_finished, + inner_loop, [ + tf.constant(0), alive_seq, alive_log_probs, finished_seq, + finished_scores, finished_flags, states + ], + shape_invariants=[ + tf.TensorShape([]), + inner_shape, + alive_log_probs.get_shape(), + inner_shape, + finished_scores.get_shape(), + finished_flags.get_shape(), + state_struc + ], + parallel_iterations=1, + back_prop=False) + + alive_seq.set_shape((None, beam_size, None)) + finished_seq.set_shape((None, beam_size, None)) + + # Accounting for corner case: It's possible that no sequence in alive for a + # particular batch item ever reached EOS. In that case, we should just copy + # the contents of alive for that batch item. tf.reduce_any(finished_flags, 1) + # if 0, means that no sequence for that batch index had reached EOS. We need + # to do the same for the scores as well. + finished_seq = tf.where( + tf.reduce_any(finished_flags, 1), finished_seq, alive_seq) + finished_scores = tf.where( + tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs) + return finished_seq, finished_scores, states diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/data_reader.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/data_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..8edad79028d7661d45bf72d94139a85171ccddb5 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/data_reader.py @@ -0,0 +1,594 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - changed to_int32 to cast +# - removed '_summarize_features' +# - remove mlperf logs +# - removed tpu code +# - added dataset padding +# - added support for deterministic_dataset hparam + +"""Data reader module.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import multiprocessing +import random + +import six +from six.moves import range # pylint: disable=redefined-builtin + +from TensorFlow.nlp.transformer.utils import contrib + +import tensorflow.compat.v1 as tf + + +def cast_ints_to_int32(features): + f = {} + for k, v in sorted(six.iteritems(features)): + if v.dtype in [tf.int64, tf.uint8]: + v = tf.cast(v, tf.int32) + f[k] = v + return f + + +def example_length(example): + length = 0 + # Length of the example is the maximum length of the feature lengths + for _, v in sorted(six.iteritems(example)): + # For images the sequence length is the size of the spatial dimensions. + feature_length = tf.shape(v)[0] + if len(v.get_shape()) > 2: + feature_length = tf.shape(v)[0] * tf.shape(v)[1] + length = tf.maximum(length, feature_length) + return length + + +def example_valid_size(example, min_length, max_length): + length = example_length(example) + return tf.logical_and( + length >= min_length, + length <= max_length, + ) + + +def padded_batch(dataset, batch_size, padded_shapes=None): + padded_shapes = padded_shapes or dict( + [(name, [None] * len(shape)) + for name, shape in dataset.output_shapes.items()]) + return dataset.padded_batch(batch_size, padded_shapes) + + +def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1): + """A default set of length-bucket boundaries.""" + assert length_bucket_step > 1.0 + x = min_length + boundaries = [] + while x < max_length: + boundaries.append(x) + x = max(x + 1, int(x * length_bucket_step)) + return boundaries + + +def batching_scheme(batch_size, + max_length, + min_length_bucket, + length_bucket_step, + drop_long_sequences=False, + shard_multiplier=1, + length_multiplier=1, + min_length=0): + """A batching scheme based on model hyperparameters. + + Every batch contains a number of sequences divisible by `shard_multiplier`. + + Args: + batch_size: int, total number of tokens in a batch. + max_length: int, sequences longer than this will be skipped. Defaults to + batch_size. + min_length_bucket: int + length_bucket_step: float greater than 1.0 + drop_long_sequences: bool, if True, then sequences longer than + `max_length` are dropped. This prevents generating batches with + more than the usual number of tokens, which can cause out-of-memory + errors. + shard_multiplier: an integer increasing the batch_size to suit splitting + across datashards. + length_multiplier: an integer multiplier that is used to increase the + batch sizes and sequence length tolerance. + min_length: int, sequences shorter than this will be skipped. + + Returns: + A dictionary with parameters that can be passed to input_pipeline: + * boundaries: list of bucket boundaries + * batch_sizes: list of batch sizes for each length bucket + * max_length: int, maximum length of an example + + Raises: + ValueError: If min_length > max_length + """ + max_length = max_length or batch_size + if max_length < min_length: + raise ValueError("max_length must be greater or equal to min_length") + + boundaries = _bucket_boundaries(max_length, min_length_bucket, + length_bucket_step) + boundaries = [boundary * length_multiplier for boundary in boundaries] + max_length *= length_multiplier + + batch_sizes = [ + max(1, batch_size // length) for length in boundaries + [max_length] + ] + max_batch_size = max(batch_sizes) + # Since the Datasets API only allows a single constant for window_size, + # and it needs divide all bucket_batch_sizes, we pick a highly-composite + # window size and then round down all batch sizes to divisors of that window + # size, so that a window can always be divided evenly into batches. + # TODO(noam): remove this when Dataset API improves. + highly_composite_numbers = [ + 1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680, + 2520, 5040, 7560, 10080, 15120, 20160, 25200, 27720, 45360, 50400, 55440, + 83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280, + 720720, 1081080, 1441440, 2162160, 2882880, 3603600, 4324320, 6486480, + 7207200, 8648640, 10810800, 14414400, 17297280, 21621600, 32432400, + 36756720, 43243200, 61261200, 73513440, 110270160 + ] + window_size = max( + [i for i in highly_composite_numbers if i <= 3 * max_batch_size]) + divisors = [i for i in range(1, window_size + 1) if window_size % i == 0] + batch_sizes = [max([d for d in divisors if d <= bs]) for bs in batch_sizes] + window_size *= shard_multiplier + batch_sizes = [bs * shard_multiplier for bs in batch_sizes] + # The Datasets API splits one window into multiple batches, which + # produces runs of many consecutive batches of the same size. This + # is bad for training. To solve this, we will shuffle the batches + # using a queue which must be several times as large as the maximum + # number of batches per window. + max_batches_per_window = window_size // min(batch_sizes) + shuffle_queue_size = max_batches_per_window * 3 + + ret = { + "boundaries": boundaries, + "batch_sizes": batch_sizes, + "min_length": min_length, + "max_length": (max_length if drop_long_sequences else 10**9), + "shuffle_queue_size": shuffle_queue_size, + } + return ret + + +def hparams_to_batching_scheme(hparams, + drop_long_sequences=False, + shard_multiplier=1, + length_multiplier=1): + """Wrapper around _batching_scheme with hparams.""" + return batching_scheme( + batch_size=hparams.batch_size, + min_length=hparams.min_length, + max_length=hparams.max_length, + min_length_bucket=hparams.min_length_bucket, + length_bucket_step=hparams.length_bucket_step, + drop_long_sequences=drop_long_sequences, + shard_multiplier=shard_multiplier, + length_multiplier=length_multiplier) + + +class DummyQueueRunner(object): + """Can stand-in for a QueueRunner but does nothing.""" + + def __init__(self): + pass + + def create_threads(self, sess, coord=None, daemon=False, start=False): + del sess, coord, daemon, start + return [] + + +def pad_for_tpu(shapes_dict, hparams, max_length): + """Pads unknown features' dimensions for TPU.""" + padded_shapes = {} + + def get_filler(specified_max_length): + if not specified_max_length: + return max_length + return min(specified_max_length, max_length) + + inputs_none_filler = get_filler(hparams.max_input_seq_length) + targets_none_filler = get_filler(hparams.max_target_seq_length) + + def pad_one_shape(shape, none_filler): + return [ + (dim if dim is not None else none_filler) for dim in shape.as_list() + ] + + for key, shape in six.iteritems(shapes_dict): + if key == "inputs": + padded_shapes[key] = pad_one_shape(shape, inputs_none_filler) + elif key == "targets": + padded_shapes[key] = pad_one_shape(shape, targets_none_filler) + else: + padded_shapes[key] = pad_one_shape(shape, max_length) + return padded_shapes + + +def cpu_count(): + """Return the number of available cores.""" + num_available_cores = multiprocessing.cpu_count() + return num_available_cores + + +def standardize_shapes(features, batch_size=None): + """Set the right shapes for the features.""" + for fname in ["inputs", "targets"]: + if fname not in features: + continue + f = features[fname] + while len(f.get_shape()) < 4: + f = tf.expand_dims(f, axis=-1) + features[fname] = f + + if batch_size: + # Ensure batch size is set on all features + for _, t in six.iteritems(features): + shape = t.get_shape().as_list() + shape[0] = batch_size + t.set_shape(t.get_shape().merge_with(shape)) + # Assert shapes are fully known + t.get_shape().assert_is_fully_defined() + + return features + + +def _are_shapes_fully_defined(shapes_dict): + for shape in shapes_dict.values(): + if not shape.is_fully_defined(): + return False + return True + + +def _file_num_records_cached(filename): + """Return the number of TFRecords in a file.""" + # Cache the result, as this is expensive to compute + if filename in _file_num_records_cache: + return _file_num_records_cache[filename] + ret = 0 + for _ in tf.python_io.tf_record_iterator(filename): + ret += 1 + _file_num_records_cache[filename] = ret + return ret + + +_file_num_records_cache = {} + + +def skip_random_fraction(dataset, data_file): + # Skip a random fraction at the beginning of the stream. The skip is + # essential for synchronous highly-parallel training to avoid multiple + # replicas reading the same data in lock-step. + num_skip = random.randint(0, _file_num_records_cached(data_file)) + return dataset.skip(num_skip) + + +def pad_batch(features, batch_multiple): + """Pad batch dim of features to nearest multiple of batch_multiple.""" + feature = list(features.items())[0][1] + batch_size = tf.shape(feature)[0] + mod = batch_size % batch_multiple + has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32) + batch_padding = batch_multiple * has_mod - mod + + padded_features = {} + for k, feature in features.items(): + rank = len(feature.shape) + paddings = [[0, 0] for _ in range(rank)] + paddings[0][1] = batch_padding + padded_feature = tf.pad(feature, paddings) + padded_features[k] = padded_feature + return padded_features + + +# TODO(lukaszkaiser): refactor the API to not be just a list of self params +# but make sense for other uses too. +def input_fn(dataset, + filepattern, + skip_random_fraction_when_training, + batch_size_means_tokens_param, + batch_size_multiplier, + max_length, + mode, + hparams, + data_dir=None, + params=None, + config=None, + force_repeat=False, + prevent_repeat=False): + """Builds input pipeline for problem. + + Args: + dataset: the dataset to make input function from. + filepattern: the pattern of files to read from. + skip_random_fraction_when_training: whether to skip randomly when training. + batch_size_means_tokens_param: whether batch size should mean tokens. + batch_size_multiplier: how to multiply batch size when bucketing. + max_length: maximum length, + mode: tf.estimator.ModeKeys + hparams: HParams, model hparams + data_dir: str, data directory; if None, will use hparams.data_dir + params: dict, may include "batch_size" + config: RunConfig; should have the data_parallelism attribute if not using + TPU + force_repeat: bool, whether to repeat the data even if not training + prevent_repeat: bool, whether to not repeat when in training mode. + Overrides force_repeat. + + Returns: + (features_dict, Tensor targets) + """ + is_training = mode == tf.estimator.ModeKeys.TRAIN + if config and config.use_tpu: + num_threads = 64 + else: + num_threads = cpu_count() if is_training else 1 + + if config and hasattr(config, + "data_parallelism") and config.data_parallelism: + num_shards = config.data_parallelism.n + else: + num_shards = 1 + + if hasattr(hparams, 'deterministic_dataset') and hparams.deterministic_dataset: + num_threads = 1 + skip_random_fraction_when_training = False + hparams.batch_shuffle_size = 0 + + def tpu_valid_size(example): + return example_valid_size(example, hparams.min_length, max_length) + + def gpu_valid_size(example): + drop_long_sequences = is_training + max_validate_length = max_length if drop_long_sequences else 10**9 + return example_valid_size(example, hparams.min_length, max_validate_length) + + def define_shapes(example): + batch_size = config and config.use_tpu and params["batch_size"] + return standardize_shapes(example, batch_size=batch_size) + + # Read and preprocess + data_dir = data_dir or (hasattr(hparams, "data_dir") and hparams.data_dir) + + if is_training and hparams.use_horovod: + dataset = dataset.shard(num_shards=hparams.hvd_size, index=hparams.hvd_worker_id) + + if (force_repeat or is_training) and not prevent_repeat: + # Repeat and skip a random number of records + dataset = dataset.repeat() + + if is_training and skip_random_fraction_when_training: + data_files = contrib.slim().parallel_reader.get_data_files(filepattern) + # In continuous_train_and_eval when switching between train and + # eval, this input_fn method gets called multiple times and it + # would give you the exact same samples from the last call + # (because the Graph seed is set). So this skip gives you some + # shuffling. + dataset = skip_random_fraction(dataset, data_files[0]) + + dataset = dataset.map(cast_ints_to_int32, num_parallel_calls=num_threads) + + if batch_size_means_tokens_param: + batch_size_means_tokens = True + else: + if _are_shapes_fully_defined(dataset.output_shapes): + batch_size_means_tokens = False + else: + tf.logging.warning( + "Shapes are not fully defined. Assuming batch_size means tokens.") + batch_size_means_tokens = True + + # Batching + if not batch_size_means_tokens: + # Batch size means examples per datashard. + if config and config.use_tpu: + # on TPU, we use params["batch_size"], which specifies the number of + # examples across all datashards + batch_size = params["batch_size"] + dataset = dataset.batch(batch_size, drop_remainder=True) + else: + batch_size = hparams.batch_size * num_shards + dataset = dataset.batch(batch_size) + else: + # batch_size means tokens per datashard + if config and config.use_tpu: + dataset = dataset.filter(tpu_valid_size) + padded_shapes = pad_for_tpu(dataset.output_shapes, hparams, max_length) + # on TPU, we use params["batch_size"], which specifies the number of + # examples across all datashards + batch_size = params["batch_size"] + if hparams.pad_batch: + tf.logging.warn( + "Padding the batch to ensure that remainder eval batches are " + "processed. This may lead to incorrect metrics for " + "non-zero-padded features, e.g. images. Use a smaller batch " + "size that has no remainder in that case.") + dataset = dataset.padded_batch( + batch_size, padded_shapes, drop_remainder=False) + dataset = dataset.map( + functools.partial(pad_batch, batch_multiple=batch_size), + num_parallel_calls=num_threads) + else: + dataset = dataset.padded_batch( + batch_size, padded_shapes, drop_remainder=True) + else: + # On GPU, bucket by length + dataset = dataset.filter(gpu_valid_size) + cur_batching_scheme = hparams_to_batching_scheme( + hparams, + shard_multiplier=num_shards, + length_multiplier=batch_size_multiplier) + if hparams.use_fixed_batch_size: + # Here batch_size really means examples per datashard. + cur_batching_scheme["batch_sizes"] = [hparams.batch_size] + cur_batching_scheme["boundaries"] = [] + + force_fixed_batch_size = hparams.use_static_shapes + fixed_batch_size = hparams.batch_size // hparams.max_length + if force_fixed_batch_size: + cur_batching_scheme["batch_sizes"] = [fixed_batch_size, fixed_batch_size] + cur_batching_scheme["boundaries"] = [hparams.max_length + 1] + + dataset = dataset.apply( + tf.data.experimental.bucket_by_sequence_length( + example_length, cur_batching_scheme["boundaries"], + cur_batching_scheme["batch_sizes"], + pad_to_bucket_boundary=force_fixed_batch_size)) + + if force_fixed_batch_size and is_training: + def _force_shape(example): + for _, t in six.iteritems(example): + shape = t.get_shape().as_list() + shape[0] = fixed_batch_size + shape[1] = shape[1] or hparams.max_length + t.set_shape(t.get_shape().merge_with(shape)) + # Assert shapes are fully known + t.get_shape().assert_is_fully_defined() + return example + dataset = dataset.map(_force_shape, num_parallel_calls=num_threads) + + if not is_training: + batch_multiple = num_shards + if hparams.use_fixed_batch_size: + # Make sure the last batch has the same fixed size as the rest. + batch_multiple *= hparams.batch_size + if batch_multiple > 1: + tf.logging.warn( + "Padding the batch to ensure that remainder eval batches have " + "a batch size divisible by the number of data shards. This may " + "lead to incorrect metrics for non-zero-padded features, e.g. " + "images. Use a single datashard (i.e. 1 GPU) in that case.") + dataset = dataset.map( + functools.partial(pad_batch, batch_multiple=batch_multiple), + num_parallel_calls=num_threads) + + dataset = dataset.map(define_shapes, num_parallel_calls=num_threads) + + # Add shuffling for training batches. This is necessary along with record + # level shuffling in the dataset generation. Record shuffling will shuffle + # the examples. However, in some cases, it's possible that the shuffle + # buffer size for record shuffling is smaller than the batch size. In such + # cases, adding batch shuffling ensures that the data is in random order + # during training + if (is_training and hasattr(hparams, "batch_shuffle_size") and + hparams.batch_shuffle_size): + dataset = dataset.shuffle(hparams.batch_shuffle_size) + + # Split batches into chunks if targets are too long. + # The new "chunk_number" feature is 0 for the first chunk and goes up then. + # Chunks are reversed so the 0th chunk comes first, then the 1st and so on, + # so models can attend to them in the order they arrive. The last chunk is + # usually the one containing the end of the target sentence (EOS). + chunk_length = hparams.get("split_targets_chunk_length", 0) + max_chunks = hparams.get("split_targets_max_chunks", 100) + if chunk_length > 0: + def is_nonzero_chunk(example): + """A chunk is zero if all targets are 0s.""" + return tf.less(0, tf.reduce_sum(tf.abs(example["targets"]))) + + def split_on_length(example): + """Split a batch of ditcs on length.""" + x = example["targets"] + # TODO(kitaev): This code breaks if chunk_length * max_chunks < batch_size + length_diff = chunk_length * max_chunks - tf.shape(x)[1] + padded_x = tf.pad(x, [(0, 0), (0, length_diff), (0, 0), (0, 0)]) + chunks = [padded_x[:, i*chunk_length:(i+1)*chunk_length, :, :] + for i in range(max_chunks - 1)] + chunks.append(padded_x[:, (max_chunks - 1)*chunk_length:, :, :]) + new_example = {} + # Setting chunk_number to be tf.range(max_chunks) is incompatible with TPU + new_example["chunk_number"] = tf.concat([ + tf.expand_dims(tf.ones_like(c) * n, axis=0) + for n, c in enumerate(chunks) + ], + axis=0) + new_example["targets"] = tf.concat( + [tf.expand_dims(c, axis=0) for c in chunks], axis=0) + for k in example: + if k != "targets": + assert k != "chunk_number", ( + "Chunking code expects the chunk_number feature name to be " + "available" + ) + new_example[k] = tf.concat( + [tf.expand_dims(example[k], axis=0) for _ in range(max_chunks)], + axis=0) + return tf.data.Dataset.from_tensor_slices(new_example) + + dataset = dataset.flat_map(split_on_length) + dataset = dataset.filter(is_nonzero_chunk) + + # The chunking data pipeline thus far creates batches of examples where all + # of the examples have the same chunk number. This can lead to periodic + # fluctuations in the loss; for example, when all examples in the batch have + # chunk number 0 the loss may be higher than midway through a sequence. + # Enabling split_targets_strided_training adjusts the data so that each + # batch includes examples at various points within a sequence. + if is_training and hparams.split_targets_strided_training: + # TODO(kitaev): make sure that shape inference works on GPU, not just TPU. + inferred_batch_size = dataset.output_shapes["targets"].as_list()[0] + if inferred_batch_size is None: + raise ValueError( + "Strided training is only implemented when the batch size can be " + "inferred statically, for example when training on TPU." + ) + chunk_stride = inferred_batch_size * max( + 1, max_chunks // inferred_batch_size) + 1 + + def collapse_nested_datasets(example): + """Converts a dataset of datasets to a dataset of tensor features.""" + new_example = {} + for k, v in example.items(): + v = tf.data.experimental.get_single_element( + v.batch(inferred_batch_size, drop_remainder=True)) + new_example[k] = v + return tf.data.Dataset.from_tensor_slices(new_example) + + dataset = dataset.unbatch() + dataset = dataset.window(inferred_batch_size, inferred_batch_size, + chunk_stride) + dataset = dataset.flat_map(collapse_nested_datasets) + dataset = dataset.batch(inferred_batch_size, drop_remainder=True) + + def prepare_for_output(example): + if mode == tf.estimator.ModeKeys.PREDICT: + example["infer_targets"] = example.pop("targets") + return example + else: + return example, example[hparams.get( + key="labels_feature_name", default="targets")] + + dataset = dataset.map(prepare_for_output, num_parallel_calls=num_threads) + dataset = dataset.prefetch(2) + + if mode == tf.estimator.ModeKeys.PREDICT: + # This is because of a bug in the Estimator that short-circuits prediction + # if it doesn't see a QueueRunner. DummyQueueRunner implements the + # minimal expected interface but does nothing. + tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, DummyQueueRunner()) + + return dataset diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/devices.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/devices.py new file mode 100644 index 0000000000000000000000000000000000000000..baae579236e59747dbb6005a045b185efd32013a --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/devices.py @@ -0,0 +1,185 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - use_hpu argument for setting device name in data_parallelism + +"""Device placement and data parallelism.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from TensorFlow.nlp.transformer.utils import expert_utils as eu +import tensorflow.compat.v1 as tf +from tensorflow.python.util import tf_inspect as inspect + + +def data_parallelism_from_flags(daisy_chain_variables=True, all_workers=False): + """Over which devices do we split each training batch. + + In old-fashioned async mode, we split the batch over all GPUs on the + current worker. + + In sync mode, we split the batch over all the parameter server GPUs. + + This function returns an expert_utils.Parallelism object, which can be used + to build the model. It is configured in a way that any variables created + by `tf.get_variable` will be assigned to the parameter servers and shared + between datashards. + + Args: + daisy_chain_variables: whether to copy variables in a daisy chain on GPUs. + all_workers: whether the devices are all async workers or just this one. + + Returns: + a expert_utils.Parallelism. + """ + dp_arg_names = inspect.getargspec(data_parallelism).args + + blacklist = ["daisy_chain_variables", "all_workers"] + + kwargs = {} + for arg in dp_arg_names: + if arg in blacklist: + continue + kwargs[arg] = getattr(tf.flags.FLAGS, arg) + + return data_parallelism( + daisy_chain_variables=daisy_chain_variables, + all_workers=all_workers, + **kwargs) + + +def data_parallelism(daisy_chain_variables=True, + all_workers=False, + ps_replicas=0, + ps_job="/job:ps", + ps_gpu=0, + schedule="continuous_train_and_eval", + sync=False, + worker_gpu=1, + worker_replicas=1, + worker_id=0, + gpu_order="", + worker_job="/job:localhost", + no_data_parallelism=False, + use_hpu=False): + """See data_parallelism_from_flags.""" + tf.logging.info("schedule=%s" % schedule) + tf.logging.info("worker_gpu=%s" % worker_gpu) + tf.logging.info("sync=%s" % sync) + def _ps_replicas(all_workers=False): + if all_workers: + return list(range(ps_replicas)) + # Worker K will be using replicas {0,...n-1} + K*n if we have n replicas. + num_replicas = ps_replicas // worker_replicas + return [d + worker_id * num_replicas for d in range(num_replicas)] + + def _gpu_order(num_gpus): + if gpu_order: + ret = [int(s) for s in gpu_order.split(" ")] + if len(ret) == num_gpus: + return ret + return list(range(num_gpus)) + + def _ps_gpus(all_workers=False): + ps_gpus = [] + for d in _ps_replicas(all_workers=all_workers): + ps_gpus.extend([(d, gpu) for gpu in _gpu_order(ps_gpu)]) + return ps_gpus + + def ps_devices(all_workers=False): + """List of ps devices (where to put the experts). + + Args: + all_workers: whether the list is for all async workers or just this one. + + Returns: + a list of device names + """ + if ps_replicas > 0: + if ps_gpu > 0: + return [ + ps_job + "/task:%d/GPU:%d" % (d, gpu) + for (d, gpu) in _ps_gpus(all_workers=all_workers) + ] + else: + return [ + ps_job + "/task:%d" % d + for d in _ps_replicas(all_workers=all_workers) + ] + else: + if worker_gpu > 0: + return ["gpu:%d" % d for d in _gpu_order(worker_gpu)] + else: + return [""] + + def _replica_device_setter(worker_device): + if ps_replicas == 0: + return worker_device + return tf.train.replica_device_setter( + worker_device=worker_device, + ps_tasks=ps_replicas, + ps_device=ps_job + "/GPU:0" if ps_gpu > 0 else ps_job) + + is_single_machine = ps_replicas == 0 and worker_replicas == 1 + + if no_data_parallelism: + datashard_devices = [""] + caching_devices = None + elif is_single_machine: + tf.logging.warn( + "Schedule=%s. Assuming that training is running on a single machine.", + schedule) + device_fmt = "/device:HPU:%d" if use_hpu else "gpu:%d" + datashard_devices = [device_fmt % d for d in _gpu_order(worker_gpu)] + if worker_gpu < 1: + datashard_devices += ["cpu:0"] + caching_devices = None + elif sync and ps_replicas > 0: + # compute on ps + datashard_devices = [ + _replica_device_setter(d) for d in ps_devices(all_workers=all_workers) + ] + if ps_gpu > 0 and ps_replicas > 1: + caching_devices = [ + ps_job + "/task:%d/cpu:0" % d + for (d, _) in _ps_gpus(all_workers=all_workers) + ] + else: + caching_devices = None + else: + # compute on worker - this is either a single-worker setup or asynchronous + # with parameter servers. + if worker_gpu > 1: + datashard_devices = [ + _replica_device_setter(worker_job + "/GPU:%d" % d) + for d in _gpu_order(worker_gpu) + ] + caching_devices = None + else: + datashard_devices = [_replica_device_setter(worker_job)] + caching_devices = None + tf.logging.info("datashard_devices: %s", datashard_devices) + tf.logging.info("caching_devices: %s", caching_devices) + tf.logging.info("ps_devices: %s", ps_devices(all_workers=all_workers)) + return eu.Parallelism( + datashard_devices, + caching_devices=caching_devices, + daisy_chain_variables=daisy_chain_variables, + ps_devices=ps_devices(all_workers=all_workers)) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/flags.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/flags.py new file mode 100644 index 0000000000000000000000000000000000000000..b71c88d5949b52d5f6bb5681e45e2f5c41723964 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/flags.py @@ -0,0 +1,132 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - set default model to transformer +# - set default train_steps to 300k +# - set default eval_throttle_seconds to 1 + +"""Common command-line flags.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_bool("registry_help", False, + "If True, logs the contents of the registry and exits.") +flags.DEFINE_bool("tfdbg", False, + "If True, use the TF debugger CLI on train/eval.") +flags.DEFINE_bool("export_saved_model", False, + "DEPRECATED - see serving/export.py.") +flags.DEFINE_bool("dbgprofile", False, + "If True, record the timeline for chrome://tracing/.") +flags.DEFINE_string("model", "transformer", "Which model to use.") +flags.DEFINE_string("hparams_set", None, "Which parameters to use.") +flags.DEFINE_string("hparams_range", None, "Parameters range.") +flags.DEFINE_string("hparams", "", + "A comma-separated list of `name=value` hyperparameter " + "values. This flag is used to override hyperparameter " + "settings either when manually selecting hyperparameters " + "or when using Vizier. If a hyperparameter setting is " + "specified by this flag then it must be a valid " + "hyperparameter name for the model.") +flags.DEFINE_string("problem", None, "Problem name.") + +# data_dir is a common flag name - catch conflicts and define it once. +try: + flags.DEFINE_string("data_dir", None, "Directory with training data.") +except: # pylint: disable=bare-except + pass + +flags.DEFINE_integer("train_steps", 300000, + "The number of steps to run training for.") +flags.DEFINE_string("eval_early_stopping_metric", "loss", + "If --eval_early_stopping_steps is not None, then stop " + "when --eval_early_stopping_metric has not decreased for " + "--eval_early_stopping_steps") +flags.DEFINE_float("eval_early_stopping_metric_delta", 0.1, + "Delta determining whether metric has plateaued.") +flags.DEFINE_integer("eval_early_stopping_steps", None, + "If --eval_early_stopping_steps is not None, then stop " + "when --eval_early_stopping_metric has not decreased for " + "--eval_early_stopping_steps") +flags.DEFINE_bool("eval_early_stopping_metric_minimize", True, + "Whether to check for the early stopping metric going down " + "or up.") +flags.DEFINE_integer("eval_timeout_mins", 240, + "The maximum amount of time to wait to wait between " + "checkpoints. Set -1 to wait indefinitely.") +flags.DEFINE_bool("eval_run_autoregressive", False, + "Run eval autoregressively where we condition on previous" + "generated output instead of the actual target.") +flags.DEFINE_bool("eval_use_test_set", False, + "Whether to use the '-test' data for EVAL (and PREDICT).") +flags.DEFINE_integer("keep_checkpoint_max", 20, + "How many recent checkpoints to keep.") +flags.DEFINE_bool("enable_graph_rewriter", False, + "Enable graph optimizations that are not on by default.") +flags.DEFINE_integer("keep_checkpoint_every_n_hours", 10000, + "Number of hours between each checkpoint to be saved. " + "The default value 10,000 hours effectively disables it.") +flags.DEFINE_integer("save_checkpoints_secs", 0, + "Save checkpoints every this many seconds. " + "Default=0 means save checkpoints each x steps where x " + "is max(iterations_per_loop, local_eval_frequency).") +flags.DEFINE_bool("log_device_placement", False, + "Whether to log device placement.") +flags.DEFINE_string("warm_start_from", None, "Warm start from checkpoint.") + +# Distributed training flags +flags.DEFINE_integer("local_eval_frequency", 1000, + "Save checkpoints and run evaluation every N steps during " + "local training.") +flags.DEFINE_integer("eval_throttle_seconds", 1, + "Do not re-evaluate unless the last evaluation was started" + " at least this many seconds ago.") +flags.DEFINE_bool("sync", False, "Sync compute on PS.") +flags.DEFINE_string("worker_job", "/job:localhost", "name of worker job") +flags.DEFINE_integer("worker_gpu", 1, "How many GPUs to use.") +flags.DEFINE_integer("worker_replicas", 1, "How many workers to use.") +flags.DEFINE_integer("worker_id", 0, "Which worker task are we.") +flags.DEFINE_float("worker_gpu_memory_fraction", 0.95, + "Fraction of GPU memory to allocate.") +flags.DEFINE_integer("ps_gpu", 0, "How many GPUs to use per ps.") +flags.DEFINE_string("gpu_order", "", "Optional order for daisy-chaining GPUs." + " e.g. \"1 3 2 4\"") +flags.DEFINE_string("ps_job", "/job:ps", "name of ps job") +flags.DEFINE_integer("ps_replicas", 0, "How many ps replicas.") + +# Decoding flags +flags.DEFINE_string("decode_hparams", "", + "Comma-separated list of name=value pairs to control " + "decode behavior. See decoding.decode_hparams for " + "defaults.") +flags.DEFINE_string("decode_from_file", "", + "Path to the source file for decoding, used by " + "continuous_decode_from_file.") +flags.DEFINE_string("decode_to_file", "", + "Path to the decoded file generated by decoding, used by " + "continuous_decode_from_file.") +flags.DEFINE_string("decode_reference", "", + "Path to the reference file for decoding, used by " + "continuous_decode_from_file to compute BLEU score.") diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/hparam.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/hparam.py new file mode 100644 index 0000000000000000000000000000000000000000..6e09ed3e5fcc21ddd6048a821f119c51c60b68a3 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/hparam.py @@ -0,0 +1,651 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Forked with minor changes from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/training/python/training/hparam.py pylint: disable=line-too-long +"""Hyperparameter values.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import numbers +import re +import six + +# Define the regular expression for parsing a single clause of the input +# (delimited by commas). A legal clause looks like: +# []? = +# where is either a single token or [] enclosed list of tokens. +# For example: "var[1] = a" or "x = [1,2,3]" +PARAM_RE = re.compile(r""" + (?P[a-zA-Z][\w\.]*) # variable name: "var" or "x" + (\[\s*(?P\d+)\s*\])? # (optional) index: "1" or None + \s*=\s* + ((?P[^,\[]*) # single value: "a" or None + | + \[(?P[^\]]*)\]) # list of values: None or "1,2,3" + ($|,\s*)""", re.VERBOSE) + + +def _parse_fail(name, var_type, value, values): + """Helper function for raising a value error for bad assignment.""" + raise ValueError( + 'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' % + (name, var_type.__name__, value, values)) + + +def _reuse_fail(name, values): + """Helper function for raising a value error for reuse of name.""" + raise ValueError('Multiple assignments to variable \'%s\' in %s' % (name, + values)) + + +def _process_scalar_value(name, parse_fn, var_type, m_dict, values, + results_dictionary): + """Update results_dictionary with a scalar value. + + Used to update the results_dictionary to be returned by parse_values when + encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) + + Mutates results_dictionary. + + Args: + name: Name of variable in assignment ("s" or "arr"). + parse_fn: Function for parsing the actual value. + var_type: Type of named variable. + m_dict: Dictionary constructed from regex parsing. + m_dict['val']: RHS value (scalar) + m_dict['index']: List index value (or None) + values: Full expression being parsed + results_dictionary: The dictionary being updated for return by the parsing + function. + + Raises: + ValueError: If the name has already been used. + """ + try: + parsed_value = parse_fn(m_dict['val']) + except ValueError: + _parse_fail(name, var_type, m_dict['val'], values) + + # If no index is provided + if not m_dict['index']: + if name in results_dictionary: + _reuse_fail(name, values) + results_dictionary[name] = parsed_value + else: + if name in results_dictionary: + # The name has already been used as a scalar, then it + # will be in this dictionary and map to a non-dictionary. + if not isinstance(results_dictionary.get(name), dict): + _reuse_fail(name, values) + else: + results_dictionary[name] = {} + + index = int(m_dict['index']) + # Make sure the index position hasn't already been assigned a value. + if index in results_dictionary[name]: + _reuse_fail('{}[{}]'.format(name, index), values) + results_dictionary[name][index] = parsed_value + + +def _process_list_value(name, parse_fn, var_type, m_dict, values, + results_dictionary): + """Update results_dictionary from a list of values. + + Used to update results_dictionary to be returned by parse_values when + encountering a clause with a list RHS (e.g. "arr=[1,2,3]".) + + Mutates results_dictionary. + + Args: + name: Name of variable in assignment ("arr"). + parse_fn: Function for parsing individual values. + var_type: Type of named variable. + m_dict: Dictionary constructed from regex parsing. + m_dict['val']: RHS value (scalar) + values: Full expression being parsed + results_dictionary: The dictionary being updated for return by the parsing + function. + + Raises: + ValueError: If the name has an index or the values cannot be parsed. + """ + if m_dict['index'] is not None: + raise ValueError('Assignment of a list to a list index.') + elements = filter(None, re.split('[ ,]', m_dict['vals'])) + # Make sure the name hasn't already been assigned a value + if name in results_dictionary: + raise _reuse_fail(name, values) + try: + results_dictionary[name] = [parse_fn(e) for e in elements] + except ValueError: + _parse_fail(name, var_type, m_dict['vals'], values) + + +def _cast_to_type_if_compatible(name, param_type, value): + """Cast hparam to the provided type, if compatible. + + Args: + name: Name of the hparam to be cast. + param_type: The type of the hparam. + value: The value to be cast, if compatible. + + Returns: + The result of casting `value` to `param_type`. + + Raises: + ValueError: If the type of `value` is not compatible with param_type. + * If `param_type` is a string type, but `value` is not. + * If `param_type` is a boolean, but `value` is not, or vice versa. + * If `param_type` is an integer type, but `value` is not. + * If `param_type` is a float type, but `value` is not a numeric type. + """ + fail_msg = ( + "Could not cast hparam '%s' of type '%s' from value %r" % + (name, param_type, value)) + + # Some callers use None, for which we can't do any casting/checking. :( + if issubclass(param_type, type(None)): + return value + + # Avoid converting a non-string type to a string. + if (issubclass(param_type, (six.string_types, six.binary_type)) and + not isinstance(value, (six.string_types, six.binary_type))): + raise ValueError(fail_msg) + + # Avoid converting a number or string type to a boolean or vice versa. + if issubclass(param_type, bool) != isinstance(value, bool): + raise ValueError(fail_msg) + + # Avoid converting float to an integer (the reverse is fine). + if (issubclass(param_type, numbers.Integral) and + not isinstance(value, numbers.Integral)): + raise ValueError(fail_msg) + + # Avoid converting a non-numeric type to a numeric type. + if (issubclass(param_type, numbers.Number) and + not isinstance(value, numbers.Number)): + raise ValueError(fail_msg) + + return param_type(value) + + +def parse_values(values, type_map, ignore_unknown=False): + """Parses hyperparameter values from a string into a python map. + + `values` is a string containing comma-separated `name=value` pairs. + For each pair, the value of the hyperparameter named `name` is set to + `value`. + + If a hyperparameter name appears multiple times in `values`, a ValueError + is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). + + If a hyperparameter name in both an index assignment and scalar assignment, + a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). + + The hyperparameter name may contain '.' symbols, which will result in an + attribute name that is only accessible through the getattr and setattr + functions. (And must be first explicit added through add_hparam.) + + WARNING: Use of '.' in your variable names is allowed, but is not well + supported and not recommended. + + The `value` in `name=value` must follows the syntax according to the + type of the parameter: + + * Scalar integer: A Python-parsable integer point value. E.g.: 1, + 100, -12. + * Scalar float: A Python-parsable floating point value. E.g.: 1.0, + -.54e89. + * Boolean: Either true or false. + * Scalar string: A non-empty sequence of characters, excluding comma, + spaces, and square brackets. E.g.: foo, bar_1. + * List: A comma separated list of scalar values of the parameter type + enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. + + When index assignment is used, the corresponding type_map key should be the + list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not + "arr[1]"). + + Args: + values: String. Comma separated list of `name=value` pairs where + 'value' must follow the syntax described above. + type_map: A dictionary mapping hyperparameter names to types. Note every + parameter name in values must be a key in type_map. The values must + conform to the types indicated, where a value V is said to conform to a + type T if either V has type T, or V is a list of elements of type T. + Hence, for a multidimensional parameter 'x' taking float values, + 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. + ignore_unknown: Bool. Whether values that are missing a type in type_map + should be ignored. If set to True, a ValueError will not be raised for + unknown hyperparameter type. + + Returns: + A python map mapping each name to either: + * A scalar value. + * A list of scalar values. + * A dictionary mapping index numbers to scalar values. + (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") + + Raises: + ValueError: If there is a problem with input. + * If `values` cannot be parsed. + * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). + * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', + 'a[1]=1,a[1]=2', or 'a=1,a=[1]') + """ + results_dictionary = {} + pos = 0 + while pos < len(values): + m = PARAM_RE.match(values, pos) + if not m: + raise ValueError('Malformed hyperparameter value: %s' % values[pos:]) + # Check that there is a comma between parameters and move past it. + pos = m.end() + # Parse the values. + m_dict = m.groupdict() + name = m_dict['name'] + if name not in type_map: + if ignore_unknown: + continue + raise ValueError('Unknown hyperparameter type for %s' % name) + type_ = type_map[name] + + # Set up correct parsing function (depending on whether type_ is a bool) + if type_ == bool: + + def parse_bool(value): + if value in ['true', 'True']: + return True + elif value in ['false', 'False']: + return False + else: + try: + return bool(int(value)) + except ValueError: + _parse_fail(name, type_, value, values) + + parse = parse_bool + else: + parse = type_ + + # If a singe value is provided + if m_dict['val'] is not None: + _process_scalar_value(name, parse, type_, m_dict, values, + results_dictionary) + + # If the assigned value is a list: + elif m_dict['vals'] is not None: + _process_list_value(name, parse, type_, m_dict, values, + results_dictionary) + + else: # Not assigned a list or value + _parse_fail(name, type_, '', values) + + return results_dictionary + + +class HParams(object): + """Class to hold a set of hyperparameters as name-value pairs. + + A `HParams` object holds hyperparameters used to build and train a model, + such as the number of hidden units in a neural net layer or the learning rate + to use when training. + + You first create a `HParams` object by specifying the names and values of the + hyperparameters. + + To make them easily accessible the parameter names are added as direct + attributes of the class. A typical usage is as follows: + + ```python + # Create a HParams object specifying names and values of the model + # hyperparameters: + hparams = HParams(learning_rate=0.1, num_hidden_units=100) + + # The hyperparameter are available as attributes of the HParams object: + hparams.learning_rate ==> 0.1 + hparams.num_hidden_units ==> 100 + ``` + + Hyperparameters have type, which is inferred from the type of their value + passed at construction type. The currently supported types are: integer, + float, boolean, string, and list of integer, float, boolean, or string. + + You can override hyperparameter values by calling the + [`parse()`](#HParams.parse) method, passing a string of comma separated + `name=value` pairs. This is intended to make it possible to override + any hyperparameter values from a single command-line flag to which + the user passes 'hyper-param=value' pairs. It avoids having to define + one flag for each hyperparameter. + + The syntax expected for each value depends on the type of the parameter. + See `parse()` for a description of the syntax. + + Example: + + ```python + # Define a command line flag to pass name=value pairs. + # For example using argparse: + import argparse + parser = argparse.ArgumentParser(description='Train my model.') + parser.add_argument('--hparams', type=str, + help='Comma separated list of "name=value" pairs.') + args = parser.parse_args() + ... + def my_program(): + # Create a HParams object specifying the names and values of the + # model hyperparameters: + hparams = tf.HParams(learning_rate=0.1, num_hidden_units=100, + activations=['relu', 'tanh']) + + # Override hyperparameters values by parsing the command line + hparams.parse(args.hparams) + + # If the user passed `--hparams=learning_rate=0.3` on the command line + # then 'hparams' has the following attributes: + hparams.learning_rate ==> 0.3 + hparams.num_hidden_units ==> 100 + hparams.activations ==> ['relu', 'tanh'] + + # If the hyperparameters are in json format use parse_json: + hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}') + ``` + """ + + _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. + + def __init__(self, model_structure=None, **kwargs): + """Create an instance of `HParams` from keyword arguments. + + The keyword arguments specify name-values pairs for the hyperparameters. + The parameter types are inferred from the type of the values passed. + + The parameter names are added as attributes of `HParams` object, so they + can be accessed directly with the dot notation `hparams._name_`. + + Example: + + ```python + # Define 3 hyperparameters: 'learning_rate' is a float parameter, + # 'num_hidden_units' an integer parameter, and 'activation' a string + # parameter. + hparams = tf.HParams( + learning_rate=0.1, num_hidden_units=100, activation='relu') + + hparams.activation ==> 'relu' + ``` + + Note that a few names are reserved and cannot be used as hyperparameter + names. If you use one of the reserved name the constructor raises a + `ValueError`. + + Args: + model_structure: An instance of ModelStructure, defining the feature + crosses to be used in the Trial. + **kwargs: Key-value pairs where the key is the hyperparameter name and + the value is the value for the parameter. + + Raises: + ValueError: If both `hparam_def` and initialization values are provided, + or if one of the arguments is invalid. + + """ + # Register the hyperparameters and their type in _hparam_types. + # This simplifies the implementation of parse(). + # _hparam_types maps the parameter name to a tuple (type, bool). + # The type value is the type of the parameter for scalar hyperparameters, + # or the type of the list elements for multidimensional hyperparameters. + # The bool value is True if the value is a list, False otherwise. + self._hparam_types = {} + self._model_structure = model_structure + for name, value in six.iteritems(kwargs): + self.add_hparam(name, value) + + def add_hparam(self, name, value): + """Adds {name, value} pair to hyperparameters. + + Args: + name: Name of the hyperparameter. + value: Value of the hyperparameter. Can be one of the following types: + int, float, string, int list, float list, or string list. + + Raises: + ValueError: if one of the arguments is invalid. + """ + # Keys in kwargs are unique, but 'name' could the name of a pre-existing + # attribute of this object. In that case we refuse to use it as a + # hyperparameter name. + if getattr(self, name, None) is not None: + raise ValueError('Hyperparameter name is reserved: %s' % name) + if isinstance(value, (list, tuple)): + if not value: + raise ValueError( + 'Multi-valued hyperparameters cannot be empty: %s' % name) + self._hparam_types[name] = (type(value[0]), True) + else: + self._hparam_types[name] = (type(value), False) + setattr(self, name, value) + + def set_hparam(self, name, value): + """Set the value of an existing hyperparameter. + + This function verifies that the type of the value matches the type of the + existing hyperparameter. + + Args: + name: Name of the hyperparameter. + value: New value of the hyperparameter. + + Raises: + KeyError: If the hyperparameter doesn't exist. + ValueError: If there is a type mismatch. + """ + param_type, is_list = self._hparam_types[name] + if isinstance(value, list): + if not is_list: + raise ValueError( + 'Must not pass a list for single-valued parameter: %s' % name) + setattr(self, name, [ + _cast_to_type_if_compatible(name, param_type, v) for v in value]) + else: + if is_list: + raise ValueError( + 'Must pass a list for multi-valued parameter: %s.' % name) + setattr(self, name, _cast_to_type_if_compatible(name, param_type, value)) + + def del_hparam(self, name): + """Removes the hyperparameter with key 'name'. + + Does nothing if it isn't present. + + Args: + name: Name of the hyperparameter. + """ + if hasattr(self, name): + delattr(self, name) + del self._hparam_types[name] + + def parse(self, values): + """Override existing hyperparameter values, parsing new values from a string. + + See parse_values for more detail on the allowed format for values. + + Args: + values: String. Comma separated list of `name=value` pairs where 'value' + must follow the syntax described above. + + Returns: + The `HParams` instance. + + Raises: + ValueError: If `values` cannot be parsed or a hyperparameter in `values` + doesn't exist. + """ + type_map = {} + for name, t in self._hparam_types.items(): + param_type, _ = t + type_map[name] = param_type + + values_map = parse_values(values, type_map) + return self.override_from_dict(values_map) + + def override_from_dict(self, values_dict): + """Override existing hyperparameter values, parsing new values from a dictionary. + + Args: + values_dict: Dictionary of name:value pairs. + + Returns: + The `HParams` instance. + + Raises: + KeyError: If a hyperparameter in `values_dict` doesn't exist. + ValueError: If `values_dict` cannot be parsed. + """ + for name, value in values_dict.items(): + self.set_hparam(name, value) + return self + + def set_model_structure(self, model_structure): + self._model_structure = model_structure + + def get_model_structure(self): + return self._model_structure + + def to_json(self, indent=None, separators=None, sort_keys=False): + """Serializes the hyperparameters into JSON. + + Args: + indent: If a non-negative integer, JSON array elements and object members + will be pretty-printed with that indent level. An indent level of 0, or + negative, will only insert newlines. `None` (the default) selects the + most compact representation. + separators: Optional `(item_separator, key_separator)` tuple. Default is + `(', ', ': ')`. + sort_keys: If `True`, the output dictionaries will be sorted by key. + + Returns: + A JSON string. + """ + def remove_callables(x): + """Omit callable elements from input with arbitrary nesting.""" + if isinstance(x, dict): + return {k: remove_callables(v) for k, v in six.iteritems(x) + if not callable(v)} + elif isinstance(x, list): + return [remove_callables(i) for i in x if not callable(i)] + return x + return json.dumps( + remove_callables(self.values()), + indent=indent, + separators=separators, + sort_keys=sort_keys) + + def parse_json(self, values_json): + """Override existing hyperparameter values, parsing new values from a json object. + + Args: + values_json: String containing a json object of name:value pairs. + + Returns: + The `HParams` instance. + + Raises: + KeyError: If a hyperparameter in `values_json` doesn't exist. + ValueError: If `values_json` cannot be parsed. + """ + values_map = json.loads(values_json) + return self.override_from_dict(values_map) + + def values(self): + """Return the hyperparameter values as a Python dictionary. + + Returns: + A dictionary with hyperparameter names as keys. The values are the + hyperparameter values. + """ + return {n: getattr(self, n) for n in self._hparam_types.keys()} + + def get(self, key, default=None): + """Returns the value of `key` if it exists, else `default`.""" + if key in self._hparam_types: + # Ensure that default is compatible with the parameter type. + if default is not None: + param_type, is_param_list = self._hparam_types[key] + type_str = 'list<%s>' % param_type if is_param_list else str(param_type) + fail_msg = ("Hparam '%s' of type '%s' is incompatible with " + 'default=%s' % (key, type_str, default)) + + is_default_list = isinstance(default, list) + if is_param_list != is_default_list: + raise ValueError(fail_msg) + + try: + if is_default_list: + for value in default: + _cast_to_type_if_compatible(key, param_type, value) + else: + _cast_to_type_if_compatible(key, param_type, default) + except ValueError as e: + raise ValueError('%s. %s' % (fail_msg, e)) + + return getattr(self, key) + + return default + + def __contains__(self, key): + return key in self._hparam_types + + def __str__(self): + return str(sorted(self.values().items())) + + def __repr__(self): + return '%s(%s)' % (type(self).__name__, self.__str__()) + + @staticmethod + def _get_kind_name(param_type, is_list): + """Returns the field name given parameter type and is_list. + + Args: + param_type: Data type of the hparam. + is_list: Whether this is a list. + + Returns: + A string representation of the field name. + + Raises: + ValueError: If parameter type is not recognized. + """ + if issubclass(param_type, bool): + # This check must happen before issubclass(param_type, six.integer_types), + # since Python considers bool to be a subclass of int. + typename = 'bool' + elif issubclass(param_type, six.integer_types): + # Setting 'int' and 'long' types to be 'int64' to ensure the type is + # compatible with both Python2 and Python3. + typename = 'int64' + elif issubclass(param_type, (six.string_types, six.binary_type)): + # Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is + # compatible with both Python2 and Python3. + typename = 'bytes' + elif issubclass(param_type, float): + typename = 'float' + else: + raise ValueError('Unsupported parameter type: %s' % str(param_type)) + + suffix = 'list' if is_list else 'value' + return '_'.join([typename, suffix]) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/hparams_lib.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/hparams_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..c379f51909d4454041156287459128ef1105fd66 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/hparams_lib.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports + +"""T2T HParams handling.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json + +from TensorFlow.nlp.transformer.data_generators import problem as problem_lib +from TensorFlow.nlp.transformer.utils import hparam +from TensorFlow.nlp.transformer.utils import registry + +import tensorflow.compat.v1 as tf + + +def copy_hparams(hparams): + hp_vals = hparams.values() + new_hparams = hparam.HParams(**hp_vals) + other_attrs = ["problem", "problem_hparams"] + for attr in other_attrs: + attr_val = getattr(hparams, attr, None) + if attr_val is not None: + setattr(new_hparams, attr, attr_val) + return new_hparams + + +def create_hparams(hparams_set, + hparams_overrides_str="", + data_dir=None, + problem_name=None, + hparams_path=None): + """Create HParams with data_dir and problem hparams, if kwargs provided.""" + hparams = registry.hparams(hparams_set) + if hparams_path and tf.gfile.Exists(hparams_path): + hparams = create_hparams_from_json(hparams_path, hparams) + if data_dir: + hparams.add_hparam("data_dir", data_dir) + if hparams_overrides_str: + tf.logging.info("Overriding hparams in %s with %s", hparams_set, + hparams_overrides_str) + hparams = hparams.parse(hparams_overrides_str) + if problem_name: + add_problem_hparams(hparams, problem_name) + return hparams + + +def create_hparams_from_json(json_path, hparams=None): + """Loading hparams from json; can also start from hparams if specified.""" + tf.logging.info("Loading hparams from existing json %s" % json_path) + with tf.gfile.Open(json_path, "r") as f: + hparams_values = json.load(f) + # Prevent certain keys from overwriting the passed-in hparams. + # TODO(trandustin): Remove this hack after registries are available to avoid + # saving them as functions. + if hparams: + hparams_values.pop("bottom", None) + hparams_values.pop("loss", None) + hparams_values.pop("name", None) + hparams_values.pop("top", None) + hparams_values.pop("weights_fn", None) + new_hparams = hparam.HParams(**hparams_values) + # Some keys are in new_hparams but not hparams, so we need to be more + # careful than simply using parse_json() from HParams + if hparams: # hparams specified, so update values from json + for key in sorted(new_hparams.values().keys()): + if hasattr(hparams, key): # Overlapped keys + value = getattr(hparams, key) + new_value = getattr(new_hparams, key) + if value != new_value: # Different values + tf.logging.info("Overwrite key %s: %s -> %s" % ( + key, value, new_value)) + setattr(hparams, key, new_value) + else: + hparams = new_hparams + + return hparams + + +def add_problem_hparams(hparams, problem_name_or_instance): + """Add problem hparams for the problems.""" + if isinstance(problem_name_or_instance, problem_lib.Problem): + problem = problem_name_or_instance + else: + problem = registry.problem(problem_name_or_instance) + p_hparams = problem.get_hparams(hparams) + hparams.problem = problem + hparams.problem_hparams = p_hparams diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/learning_rate.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/learning_rate.py new file mode 100644 index 0000000000000000000000000000000000000000..30f86d9d2adbddde41908fd56a53a81a349c7862 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/learning_rate.py @@ -0,0 +1,220 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - added to_float util +# - removed mlperf + +"""Optimization.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +import tensorflow.compat.v1 as tf + +def to_float(x): + """Cast x to float; created because tf.to_float is deprecated.""" + return tf.cast(x, tf.float32) + +def learning_rate_factor(name, step_num, hparams): + """Compute the designated learning rate factor from hparams.""" + if name == "constant": + tf.logging.info("Base learning rate: %f", hparams.learning_rate_constant) + return hparams.learning_rate_constant + elif name == "linear_warmup": + return tf.minimum(1.0, step_num / hparams.learning_rate_warmup_steps) + elif name == "linear_decay": + ret = (hparams.train_steps - step_num) / hparams.learning_rate_decay_steps + return tf.minimum(1.0, tf.maximum(0.0, ret)) + elif name == "cosdecay": # openai gpt + in_warmup = tf.cast(step_num <= hparams.learning_rate_warmup_steps, + dtype=tf.float32) + ret = 0.5 * (1 + tf.cos( + np.pi * step_num / hparams.learning_rate_decay_steps)) + # if in warmup stage return 1 else return the decayed value + return in_warmup * 1 + (1 - in_warmup) * ret + elif name == "single_cycle_cos_decay": + # Cosine decay to zero with a single cycle. This is different from + # "cosdecay" because it starts at 1 when the warmup steps end. + x = tf.maximum(step_num, hparams.learning_rate_warmup_steps) + step = x - hparams.learning_rate_warmup_steps + if hparams.train_steps <= hparams.learning_rate_warmup_steps: + raise ValueError("single_cycle_cos_decay cannot be used unless " + "hparams.train_steps > " + "hparams.learning_rate_warmup_steps") + return tf.math.cos( + step * np.pi / + (hparams.train_steps - hparams.learning_rate_warmup_steps)) / 2.0 + 0.5 + elif name == "multi_cycle_cos_decay": + # Cosine decay with a variable number of cycles. This is different from + # "cosdecay" because it starts at 1 when the warmup steps end. Use + # hparams.learning_rate_decay_steps to determine the number of cycles. + x = tf.maximum(step_num, hparams.learning_rate_warmup_steps) + step = x - hparams.learning_rate_warmup_steps + return tf.math.cos( + step * np.pi / hparams.learning_rate_decay_steps) / 2.0 + 0.5 + elif name == "rsqrt_decay": + return tf.rsqrt(tf.maximum(step_num, hparams.learning_rate_warmup_steps)) + elif name == "rsqrt_normalized_decay": + scale = tf.sqrt(to_float(hparams.learning_rate_warmup_steps)) + return scale * tf.rsqrt(tf.maximum( + step_num, hparams.learning_rate_warmup_steps)) + elif name == "exp_decay": + decay_steps = hparams.learning_rate_decay_steps + warmup_steps = hparams.learning_rate_warmup_steps + p = (step_num - warmup_steps) / decay_steps + p = tf.maximum(p, 0.) + if hparams.learning_rate_decay_staircase: + p = tf.floor(p) + return tf.pow(hparams.learning_rate_decay_rate, p) + elif name == "rsqrt_hidden_size": + return hparams.hidden_size ** -0.5 + elif name == "legacy": + return legacy_learning_rate_schedule(hparams) + else: + raise ValueError("unknown learning rate factor %s" % name) + + +def learning_rate_schedule(hparams): + """Learning rate schedule based on hparams.""" + step_num = _global_step(hparams) + schedule_string = hparams.learning_rate_schedule + names = schedule_string.split("*") + names = [name.strip() for name in names if name.strip()] + ret = tf.constant(1.0) + for name in names: + ret *= learning_rate_factor(name, step_num, hparams) + return ret + + +def legacy_learning_rate_schedule(hparams): + """Backwards-compatible learning-rate schedule.""" + step_num = _global_step(hparams) + warmup_steps = to_float(hparams.learning_rate_warmup_steps) + if hparams.learning_rate_decay_scheme == "noam": + ret = 5000.0 * hparams.hidden_size**-0.5 * tf.minimum( + (step_num + 1) * warmup_steps**-1.5, (step_num + 1)**-0.5) + else: + warmup_steps = hparams.learning_rate_warmup_steps + warmup = _learning_rate_warmup(warmup_steps, hparams=hparams) + decay = _learning_rate_decay(hparams, warmup_steps) + ret = tf.where(step_num < warmup_steps, warmup, decay) + optimizer_correction = 0.002 if "adam" in hparams.optimizer else 1.0 + tf.logging.info("Base learning rate: %f", hparams.learning_rate) + return ret * optimizer_correction * hparams.learning_rate + + +def _global_step(hparams): + """Adjust global step if a multi-step optimizer is used.""" + step = to_float(tf.train.get_or_create_global_step()) + multiplier = hparams.optimizer_multistep_accumulate_steps + if not multiplier: + return step + + tf.logging.info("Dividing global step by %d for multi-step optimizer." + % multiplier) + return step / to_float(multiplier) + + +def _legacy_sqrt_decay(step): + """Decay like 1 / sqrt(step), multiplied by 500 to normalize.""" + return 500.0 / tf.sqrt(tf.maximum(step, 1.0)) + + +def _piecewise_learning_rate(step, boundaries, values): + """Scale learning rate according to the given schedule. + + Multipliers are not cumulative. + + Args: + step: global step + boundaries: List of steps to transition on. + values: Multiplier to apply at each boundary transition. + + Returns: + Scaled value for the learning rate. + """ + values = [1.0] + values + boundaries = [float(x) for x in boundaries] + return tf.train.piecewise_constant( + step, boundaries, values, name="piecewise_lr") + + +def _learning_rate_decay(hparams, warmup_steps=0): + """Learning rate decay multiplier.""" + scheme = hparams.learning_rate_decay_scheme + warmup_steps = to_float(warmup_steps) + global_step = _global_step(hparams) + + if not scheme or scheme == "none": + return tf.constant(1.) + + tf.logging.info("Applying learning rate decay: %s.", scheme) + + if scheme == "exp": + decay_steps = hparams.learning_rate_decay_steps + p = (global_step - warmup_steps) / decay_steps + if hparams.learning_rate_decay_staircase: + p = tf.floor(p) + return tf.pow(hparams.learning_rate_decay_rate, p) + + if scheme == "piecewise": + return _piecewise_learning_rate(global_step, + hparams.learning_rate_boundaries, + hparams.learning_rate_multiples) + + if scheme == "cosine": + cycle_steps = hparams.learning_rate_cosine_cycle_steps + cycle_position = global_step % (2 * cycle_steps) + cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position) + return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps)) + + if scheme == "cyclelinear10x": + # Cycle the rate linearly by 10x every warmup_steps, up and down. + cycle_steps = warmup_steps + cycle_position = global_step % (2 * cycle_steps) + cycle_position = to_float( # Normalize to the interval [-1, 1]. + cycle_position - cycle_steps) / float(cycle_steps) + cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0. + return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3). + + if scheme == "sqrt": + return _legacy_sqrt_decay(global_step - warmup_steps) + + raise ValueError("Unrecognized learning rate decay scheme: %s" % + hparams.learning_rate_decay_scheme) + + +def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None): + """Learning rate warmup multiplier.""" + if not warmup_steps: + return tf.constant(1.) + + tf.logging.info("Applying %s learning rate warmup for %d steps", + warmup_schedule, warmup_steps) + + warmup_steps = to_float(warmup_steps) + global_step = _global_step(hparams) + + if warmup_schedule == "exp": + return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step) + else: + assert warmup_schedule == "linear" + start = tf.constant(0.35) + return ((tf.constant(1.) - start) / warmup_steps) * global_step + start diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/make_tf_configs.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/make_tf_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..6d482e3304e7d12a52ec4498585a03aa50806109 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/make_tf_configs.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Output command line arguments and json-encoded TF_CONFIGs. + +Usage: + +`t2t-make-tf-configs --masters="server1:1234" --ps="server3:2134,server4:2334"` + +Outputs 1 line per job to stdout, first the masters, then the parameter servers. +Each line has the TF_CONFIG, then a tab, then the command line flags for that +job. + +If there is a single master, it will have the `--sync` flag. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("masters", "", "Comma-separated list of master addresses") +flags.DEFINE_string("ps", "", "Comma-separated list of ps addresses") + + +def main(_): + if not (FLAGS.masters and FLAGS.ps): + raise ValueError("Must provide --masters and --ps") + + masters = FLAGS.masters.split(",") + ps = FLAGS.ps.split(",") + + is_sync = len(masters) == 1 + if is_sync: + print("Assuming SYNC distributed training with a single master and %d " + "workers" % len(ps)) + cluster = {"ps": ps, "master": masters} + else: + print("Assuming ASYNC distributed training with %d workers and %d " + "parameter servers" % (len(masters), len(ps))) + cluster = {"ps": ps, "chief": [masters[0]], "worker": masters[1:]} + + # Trainer configs + for idx, addr in enumerate(masters): + cmd_line_flags = [ + "--master=grpc://%s" % addr, + "--ps_replicas=%d" % len(ps), + "--worker_replicas=%d" % len(masters), + "--worker_gpu=%d" % (0 if is_sync else 1), + "--worker_id=%d" % idx, + "--ps_gpu=%d" % (1 if is_sync else 0), + "--sync" if is_sync else "", + "--schedule=train", + ] + if is_sync: + task_type = "master" + cmd_line_flags.append("--worker_job='/job:master'") + else: + if idx == 0: + task_type = "chief" + idx = 0 + cmd_line_flags.append("--worker_job='/job:chief'") + else: + task_type = "worker" + idx -= 1 + cmd_line_flags.append("--worker_job='/job:worker'") + + tf_config = json.dumps({ + "cluster": cluster, + "task": { + "type": task_type, + "index": idx + }, + "environment": "cloud", + }) + cmd_line_flags = " ".join(cmd_line_flags) + print("'%s'\t%s" % (tf_config, cmd_line_flags)) + + # Std server configs + for idx, addr in enumerate(ps): + tf_config = json.dumps({ + "cluster": cluster, + "task": { + "type": "ps", + "index": idx + }, + "environment": "cloud", + }) + cmd_line_flags = "--schedule=run_std_server" + print("'%s'\t%s" % (tf_config, cmd_line_flags)) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/metrics_hook.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/metrics_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..3b738b82d5ed9f73251b7192a11c8b93a0c861ce --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/metrics_hook.py @@ -0,0 +1,290 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Summary-based SessionRunHooks.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tensorflow.compat.v1 as tf + +from tensorboard.backend.event_processing import event_accumulator +from tensorboard.backend.event_processing import event_multiplexer + + +class MetricsBasedHook(tf.train.SessionRunHook): + """Base class for hooks based on summary metrics. + + Subclasses should override _process_metrics. + + If _process_metrics returns True, calls run_context.request_stop(). + + This can be used to something like "Stop after the loss has stopped decreasing + for 5000 steps. + """ + _RUN_NAME = "run%d" + + def __init__(self, events_dir, subdirs=None, tags=None, every_n_steps=1000): + """Construct MetricsBasedHook. + + Args: + events_dir: str, top-level directory containing events files. + subdirs: list, subdirectories of events_dir that also contain + events files. Use "" to specify the top-level directory. Defaults to + [""]. + tags: list, names of metrics to collect. Default will collect all + metrics. + every_n_steps: int, collect metrics every n steps. + """ + self._events_dir = events_dir + self._subdirs = subdirs or [""] + self._tags = tags + self._every_n_steps = every_n_steps + self._start_step = None + self._event_multiplexer = self._init_multiplexer() + + def _init_multiplexer(self): + dirs = [os.path.join(self._events_dir, subdir) for subdir in self._subdirs] + run_path_map = dict([(self._RUN_NAME % i, d) for i, d in enumerate(dirs)]) + return event_multiplexer.EventMultiplexer(run_path_map) + + def begin(self): + self._global_step_tensor = tf.train.get_global_step() + if self._global_step_tensor is None: + raise RuntimeError("Global step must be created to use MetricsBasedHook.") + + def after_create_session(self, session, coord): + del coord + if self._start_step is None: + self._start_step = session.run(self._global_step_tensor) + + def before_run(self, run_context): + del run_context + return tf.train.SessionRunArgs([self._global_step_tensor]) + + def after_run(self, run_context, run_values): + global_step = run_values.results[0] + if (global_step - self._start_step) % self._every_n_steps != 0: + return + metrics = self._collect_metrics() + self._after_run(run_context, run_values, global_step, metrics) + + def _after_run(self, run_context, run_values, global_step, metrics): + del run_values + if self._process_metrics(global_step, metrics): + run_context.request_stop() + + def _collect_metrics(self): + self._event_multiplexer.Reload() + subdir_data = {} + for i, subdir in enumerate(self._subdirs): + subdir_metrics = {} + + accum = self._event_multiplexer.GetAccumulator(self._RUN_NAME % i) + for tag in accum.Tags()[event_accumulator.SCALARS]: + steps, vals = zip(*[ + (event.step, event.value) for event in accum.Scalars(tag)]) + subdir_metrics[tag] = (steps, vals) + + subdir_data[subdir] = subdir_metrics + return subdir_data + + def _process_metrics(self, global_step, metrics): + """Process the collected metrics. + + Args: + global_step: int, the current global step value. + metrics: dict. The collected + metrics. subdir_metrics is a dict from tag name to tuple of lists. The + lists are a list of global steps and a list of values. + i.e. subdir_metrics: + `dict global steps, list values>>>` + + Returns: + should_stop: bool. If True, will request that the session stops. + """ + del global_step, metrics + return False + + +class EarlyStoppingHook(MetricsBasedHook): + """EarlyStoppingHook will stop training when a given metric has plateaued.""" + + def __init__(self, + events_dir, + tag, + num_plateau_steps=1000, + plateau_delta=0.1, + plateau_decrease=True, + every_n_steps=1000): + """Create an EarlyStoppingHook. + + This hook will stop training when the metric identified by tag has + plateaued. Plateaued is defined by the metric having stopped + increasing/decreasing (based on plateau_decrease) by plateau_delta for + num_plateau_steps. + + Args: + events_dir: Directory with events files. + tag: Name of metric in TensorBoard. + num_plateau_steps: Number of steps over which to check the plateau. + plateau_delta: delta to define a "plateau". + plateau_decrease: whether to check decrease or increase in the metric. + every_n_steps: how often to run this hook. + + Returns: + An instance of EarlyStoppingHook. + """ + super(EarlyStoppingHook, self).__init__( + events_dir=events_dir, tags=[tag], every_n_steps=every_n_steps) + self._num_plateau_steps = num_plateau_steps + self._plateau_delta = plateau_delta + self._plateau_decrease = plateau_decrease + + def _process_metrics(self, global_step, metrics): + if not metrics: + return None + + if not list(metrics.values())[0]: + return None + + # Metrics should have just a single subdir and a single tag + steps, vals = list(metrics.values())[0][self._tags[0]] + return has_metric_plateaued( + steps, + vals, + num_steps=self._num_plateau_steps, + delta=self._plateau_delta, + decrease=self._plateau_decrease) + + +class PlateauOpHook(MetricsBasedHook): + """Runs an op when a metric has plateaued.""" + + def __init__(self, + events_dir, + tag, + plateau_op, + num_plateau_steps=1000, + plateau_delta=0.1, + plateau_decrease=True, + every_n_steps=1000, + only_once=False): + """See EarlyStoppingHook for args. Runs plateau_op if plateaued.""" + super(PlateauOpHook, self).__init__( + events_dir=events_dir, tags=[tag], every_n_steps=every_n_steps) + self._num_plateau_steps = num_plateau_steps + self._plateau_delta = plateau_delta + self._plateau_decrease = plateau_decrease + self._plateau_op = plateau_op + self._only_once = only_once + self._should_run_op = False + self._ever_ran = False + self._last_metric_step_seen = 0 + + @property + def keep_alive(self): + if self._only_once and self._ever_ran: + return False + return True + + def before_run(self, run_context): + del run_context + + fetches = [self._global_step_tensor] + if self._should_run_op and self.keep_alive: + fetches.append(self._plateau_op) + self._should_run_op = False + self._ever_ran = True + + return tf.train.SessionRunArgs(fetches) + + def _after_run(self, run_context, run_values, global_step, metrics): + del run_context + del run_values + del global_step + + if not self.keep_alive: + return + + if not metrics: + return + + if not list(metrics.values())[0]: + return + + # There should be only a single subdir and a single tag + steps, vals = list(metrics.values())[0][self._tags[0]] + + if not steps: + return + + last_step = steps[-1] + if last_step == self._last_metric_step_seen: + return + self._last_metric_step_seen = last_step + + if has_metric_plateaued( + steps, + vals, + num_steps=self._num_plateau_steps, + delta=self._plateau_delta, + decrease=self._plateau_decrease): + self._should_run_op = True + + +def has_metric_plateaued(steps, values, num_steps=100, delta=0.1, + decrease=True): + """Check if metric has plateaued. + + A metric has plateaued if the value has not increased/decreased (depending on + `decrease`) by `delta` for at least `num_steps`. + + Args: + steps: list list of global steps for values. + values: list list of metric values. + num_steps: int, number of steps the metric has to have been plateaued for. + delta: float, how much the metric should have changed by over num_steps. + decrease: bool, whether to check if the metric has decreased by delta or + increased by delta. + + Returns: + bool, whether the metric has plateaued. + """ + assert num_steps > 0 + if len(steps) < 2: + return False + + steps_at_least_num_steps_ago = [ + s for s in steps if s <= (steps[-1] - num_steps) + ] + if not steps_at_least_num_steps_ago: + # Not enough steps yet + return False + delta_step_idx = len(steps_at_least_num_steps_ago) - 1 + + start_val = values[delta_step_idx] + values_to_check = values[delta_step_idx:] + observed_deltas = [] + for val in values_to_check: + if decrease: + observed_delta = start_val - val + else: + observed_delta = val - start_val + observed_deltas.append(observed_delta) + + within_range = [obs < delta for obs in observed_deltas] + return all(within_range) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/misc_utils.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/misc_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d26a586e85758a5cf119cd1b545207865c32e110 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/misc_utils.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Miscellaneous utilities.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pprint +import re + +# Camel case to snake case utils +_first_cap_re = re.compile("(.)([A-Z][a-z0-9]+)") +_all_cap_re = re.compile("([a-z0-9])([A-Z])") + + +def camelcase_to_snakecase(name): + s1 = _first_cap_re.sub(r"\1_\2", name) + return _all_cap_re.sub(r"\1_\2", s1).lower() + + +def snakecase_to_camelcase(name): + return "".join([w[0].upper() + w[1:] for w in name.split("_")]) + + +def pprint_hparams(hparams): + """Represents hparams using its dictionary and calls pprint.pformat on it.""" + return "\n{}".format(pprint.pformat(hparams.values(), width=1)) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/mpi.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/mpi.py new file mode 100644 index 0000000000000000000000000000000000000000..211b7c042e3ec17b813814f5faf07c740a8bee5d --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/mpi.py @@ -0,0 +1,15 @@ +############################################################################### +# Copyright (C) 2022 Habana Labs, Ltd. an Intel Company +############################################################################### +import os + +def MPI_is_distributed(): + return all([var in os.environ for var in ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]]) + +def MPI_world_rank(): + return os.environ.get("OMPI_COMM_WORLD_RANK", 0) + +def MPI_barrier(): + if MPI_is_distributed(): + from mpi4py import MPI + MPI.COMM_WORLD.Barrier() diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/multistep_optimizer.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/multistep_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..43172d0dc6e6ddef64544393981459c69a24297e --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/multistep_optimizer.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Multi-step optimizers simulating large batches. + +Optimizer variants which make it possible to use very large batch sizes with +limited GPU memory. Optimizers in this module accumulate the gradients for n +batches, and call the optimizer's update rule every n batches with the +accumulated gradients. + +See [Saunders et al., 2018](https://arxiv.org/abs/1805.00456) for details. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + + +class MultistepAdamOptimizer(tf.train.AdamOptimizer): + """Adam with SGD updates every n steps with accumulated gradients.""" + + def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, + use_locking=False, name="Adam", n=1): + super(MultistepAdamOptimizer, self).__init__( + learning_rate=learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon, + use_locking=use_locking, name=name) + self._n = n # Call Adam optimizer every n batches with accumulated grads + self._n_t = None # n as tensor + + def _create_slots(self, var_list): + """Create slot variables for Adam with accumulated gradients.""" + super(MultistepAdamOptimizer, self)._create_slots(var_list) + first_var = min(var_list, key=lambda x: x.name) + self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1, + name="iter", + colocate_with=first_var) + for v in var_list: + self._zeros_slot(v, "grad_acc", self._name) + + def _get_iter_variable(self): + graph = ( + None if tf.executing_eagerly() else tf.get_default_graph()) + return self._get_non_slot_variable("iter", graph=graph) + + def _prepare(self): + super(MultistepAdamOptimizer, self)._prepare() + self._n_t = tf.convert_to_tensor(self._n, name="n") + + def _apply_cond(self, apply_fn, grad, var, *args, **kwargs): + """Apply conditionally if counter is zero.""" + grad_acc = self.get_slot(var, "grad_acc") + + def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs): + total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype) + adam_op = apply_fn(total_grad, var, *args, **kwargs) + with tf.control_dependencies([adam_op]): + grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc), + use_locking=self._use_locking) + return tf.group(adam_op, grad_acc_to_zero_op) + + def accumulate_gradient(grad_acc, grad): + assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking) + return tf.group(assign_op) # Strip return value + + return tf.cond( + tf.equal(self._get_iter_variable(), 0), + lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs), + lambda: accumulate_gradient(grad_acc, grad)) + + def _apply_dense(self, grad, var): + return self._apply_cond( + super(MultistepAdamOptimizer, self)._apply_dense, grad, var) + + def _resource_apply_dense(self, grad, var): + return self._apply_cond( + super(MultistepAdamOptimizer, self)._resource_apply_dense, grad, var) + + def _apply_sparse_shared(self, grad, var, indices, scatter_add): + return self._apply_cond( + super(MultistepAdamOptimizer, self)._apply_sparse_shared, grad, var, + indices, scatter_add) + + def _apply_sparse(self, grad, var): + # TODO(fstahlberg): Implement a sparse version + tf.logging.warning("MultistepAdamOptimizer does not support sparse updates") + dense_grad = tf.convert_to_tensor(grad) + return self._apply_cond( + super(MultistepAdamOptimizer, self)._apply_dense, dense_grad, var) + + def _resource_apply_sparse_duplicate_indices(self, grad, var, indices): + tf.logging.warning("MultistepAdamOptimizer does not support sparse updates") + # Note that conversion to a dense Tensor handles duplicate `indices` + # correctly (summing them). A real sparse implementation will probably want + # to override _resource_apply_sparse instead so it gets them de-duplicated + # automatically. + dense_grad = tf.convert_to_tensor( + tf.IndexedSlices(values=grad, indices=indices, + dense_shape=tf.shape(var))) + return self._apply_cond( + super(MultistepAdamOptimizer, self)._resource_apply_dense, + dense_grad, var) + + def _finish(self, update_ops, name_scope): + """Updates beta_power variables every n batches and incrs counter.""" + iter_ = self._get_iter_variable() + beta1_power, beta2_power = self._get_beta_accumulators() + with tf.control_dependencies(update_ops): + with tf.colocate_with(iter_): + + def update_beta_op(): + update_beta1 = beta1_power.assign( + beta1_power * self._beta1_t, + use_locking=self._use_locking) + update_beta2 = beta2_power.assign( + beta2_power * self._beta2_t, + use_locking=self._use_locking) + return tf.group(update_beta1, update_beta2) + maybe_update_beta = tf.cond( + tf.equal(iter_, 0), update_beta_op, tf.no_op) + with tf.control_dependencies([maybe_update_beta]): + update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t), + use_locking=self._use_locking) + return tf.group( + *update_ops + [update_iter, maybe_update_beta], name=name_scope) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/optimize.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..9c66a314f4616056d263863a5f090feb6c566c33 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/optimize.py @@ -0,0 +1,367 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - removed hparams.summarize_vars, hparams.summarize_grads +# - remove unused optmimizers +# - added horovod support +# - added check if optimizer has get_gradients defined + +"""Optimization.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from TensorFlow.nlp.transformer.layers import common_layers +from TensorFlow.nlp.transformer.utils import contrib +from TensorFlow.nlp.transformer.utils import misc_utils +from TensorFlow.nlp.transformer.utils import multistep_optimizer +from TensorFlow.nlp.transformer.utils import registry + +import tensorflow.compat.v1 as tf + + +from tensorflow.python.framework import dtypes # pylint: disable=g-direct-tensorflow-import + + +def _mixed_precision_is_enabled(hparams): + """Should be the same as in common_attention, avoiding import.""" + activation_dtype = hparams.activation_dtype + weight_dtype = hparams.weight_dtype + return activation_dtype == tf.float16 and weight_dtype == tf.float32 + + +def optimize(loss, + learning_rate, + hparams, + use_tpu=False, + variables=None): + """Minimize loss.""" + loss = weight_decay_and_noise(loss, hparams, learning_rate) + loss = tf.identity(loss, name="total_loss") + if variables is None: + variables = tf.trainable_variables() + # Print trainable variables. + log_variable_sizes(variables) + # Print non-trainable variables. + non_trainable_variables = list( + set(tf.global_variables()) - set(variables)) + log_variable_sizes(non_trainable_variables, tag="Non-trainable variables") + diet_vars = [ + v for v in tf.global_variables() if v.dtype == dtypes.float16_ref + ] + log_variable_sizes( + diet_vars, "Diet Variables") + opt = ConditionalOptimizer(hparams.optimizer, learning_rate, hparams, use_tpu) + if use_tpu: + opt = contrib.tpu().CrossShardOptimizer(opt) + if hparams.use_horovod: + import horovod.tensorflow as hvd + opt = hvd.DistributedOptimizer(opt) + if getattr(hparams, "gpu_automatic_mixed_precision", False): + if use_tpu: + raise RuntimeError("GPU auto mixed precision cannot be used with TPU") + elif _mixed_precision_is_enabled(hparams): + raise RuntimeError( + "GPU auto mixed precision cannot be used with manual mixed precision") + else: + setattr(opt, "_use_locking", "True") + setattr(opt, "_name", "ConditionalOptimizer") + opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt) + + opt_summaries = [] + if common_layers.should_generate_summaries(): + tf.summary.scalar("learning_rate", learning_rate) + opt_summaries.append("loss") + + if hparams.clip_grad_norm: + tf.logging.info("Clipping gradients, norm: %0.5f", hparams.clip_grad_norm) + if hparams.grad_noise_scale: + tf.logging.info("Adding noise to gradients, noise scale: %0.5f", + hparams.grad_noise_scale) + + train_op = contrib.layers().optimize_loss( + name="training", + loss=loss, + global_step=tf.train.get_or_create_global_step(), + learning_rate=learning_rate, + clip_gradients=hparams.clip_grad_norm or None, + gradient_noise_scale=hparams.grad_noise_scale or None, + optimizer=opt, + summaries=opt_summaries, + colocate_gradients_with_ops=True, + variables=variables) + return train_op + + +@registry.register_optimizer +def adam(learning_rate, hparams): + """Return adam optimizer for the given params.""" + # We change the default epsilon for Adam. + # Using LazyAdam as it's much faster for large vocabulary embeddings. + if contrib.is_tf2: + # in TF2 beta1 -> beta_1 :/ + return contrib.opt().LazyAdamOptimizer( + learning_rate, + beta_1=hparams.optimizer_adam_beta1, + beta_2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon) + else: + return contrib.opt().LazyAdamOptimizer( + learning_rate, + beta1=hparams.optimizer_adam_beta1, + beta2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon) + + +@registry.register_optimizer +def multistep_adam(learning_rate, hparams): + return multistep_optimizer.MultistepAdamOptimizer( + learning_rate, + beta1=hparams.optimizer_adam_beta1, + beta2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon, + n=hparams.optimizer_multistep_accumulate_steps) + + +@registry.register_optimizer +def momentum(learning_rate, hparams): + return tf.train.MomentumOptimizer( + learning_rate, + momentum=hparams.optimizer_momentum_momentum, + use_nesterov=hparams.optimizer_momentum_nesterov) + + +@registry.register_optimizer +def true_adam(learning_rate, hparams): + return tf.train.AdamOptimizer( + learning_rate, + beta1=hparams.optimizer_adam_beta1, + beta2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon) + + +@registry.register_optimizer +def adam_w(learning_rate, hparams): + return contrib.opt().AdamWOptimizer( + weight_decay=hparams.weight_decay, + learning_rate=learning_rate, + beta1=hparams.optimizer_adam_beta1, + beta2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon) + + +def _register_base_optimizer(name, opt): + key = misc_utils.camelcase_to_snakecase(name) + if key in registry.Registries.optimizers: + return + registry.register_optimizer(key)( + lambda learning_rate, hparams: opt(learning_rate)) + + +for _name, _opt in contrib.layers().OPTIMIZER_CLS_NAMES.items(): + _register_base_optimizer(_name, _opt) + + +class ConditionalOptimizer(tf.train.Optimizer): + """Conditional optimizer.""" + + def __init__(self, optimizer_name, lr, hparams, use_tpu=False): # pylint: disable=super-init-not-called + self._opt = registry.optimizer(optimizer_name)(lr, hparams) + if _mixed_precision_is_enabled(hparams): + if not hparams.mixed_precision_optimizer_loss_scaler: + tf.logging.warning("Using mixed precision without a loss scaler will " + "likely cause numerical errors.") + elif hparams.mixed_precision_optimizer_loss_scaler != "exponential": + raise ValueError("Mixed precision training only supports the " + "exponential loss scaler") + else: + tf.logging.info( + ("Using Exponential Update Loss Scaler with", + "init loss scale of {}".format( + hparams.mixed_precision_optimizer_init_loss_scale))) + manager = contrib.mixed_precision().ExponentialUpdateLossScaleManager( + init_loss_scale=hparams.mixed_precision_optimizer_init_loss_scale, + incr_every_n_steps=2000, + decr_every_n_nan_or_inf=2, + incr_ratio=2, + decr_ratio=0.5) + self._opt = contrib.mixed_precision().LossScaleOptimizer( + self._opt, manager) + + self._zero_grads = hparams.optimizer_zero_grads + + def compute_gradients(self, loss, var_list=None, **kwargs): # pylint: disable=arguments-differ + if contrib.is_tf2 and getattr(self._opt, 'get_gradients', None) is not None: + gradients = self._opt.get_gradients(loss, var_list) + gradients = zip(gradients, var_list) + else: + gradients = self._opt.compute_gradients(loss, var_list, **kwargs) + + def cast_grad(g, v): + if v is not None and g is not None: + g = common_layers.cast_like(g, v) + if self._zero_grads and g is None: + g = tf.zeros_like(v) + return (g, v) + gradients = [cast_grad(g, v) for g, v in gradients] + return gradients + + def apply_gradients(self, grads_and_vars, global_step=None, name=None): + if contrib.is_tf2: + with tf.control_dependencies( + [tf.assign_add(tf.train.get_or_create_global_step(), 1)]): + return self._opt.apply_gradients(grads_and_vars, name=name) + else: + return self._opt.apply_gradients( + grads_and_vars, global_step=global_step, name=name) + + +def weight_decay_and_noise(loss, hparams, learning_rate, var_list=None): + """Apply weight decay and weight noise.""" + if var_list is None: + var_list = tf.trainable_variables() + + decay_vars = [v for v in var_list] + noise_vars = [v for v in var_list if "/body/" in v.name] + + weight_decay_loss = weight_decay(hparams.weight_decay, decay_vars) + if hparams.weight_decay and common_layers.should_generate_summaries(): + tf.summary.scalar("losses/weight_decay", weight_decay_loss) + weight_noise_ops = weight_noise(hparams.weight_noise, learning_rate, + noise_vars) + + with tf.control_dependencies(weight_noise_ops): + loss = tf.identity(loss) + + loss += weight_decay_loss + return loss + + +def weight_noise(noise_rate, learning_rate, var_list): + """Apply weight noise to vars in var_list.""" + if not noise_rate: + return [tf.no_op()] + + tf.logging.info("Applying weight noise scaled by learning rate, " + "noise_rate: %0.5f", noise_rate) + + noise_ops = [] + + for v in var_list: + with tf.device(v.device): # pylint: disable=protected-access + scale = noise_rate * learning_rate * 0.001 + if common_layers.should_generate_summaries(): + tf.summary.scalar("weight_noise_scale", scale) + noise = tf.truncated_normal(v.shape) * scale + noise_op = v.assign_add(noise) + noise_ops.append(noise_op) + + return noise_ops + + +def weight_decay(decay_rate, var_list, skip_biases=True): + """Apply weight decay to vars in var_list.""" + if not decay_rate: + return 0. + + tf.logging.info("Applying weight decay, decay_rate: %0.5f", decay_rate) + + weight_decays = [] + for v in var_list: + # Weight decay. + # This is a heuristic way to detect biases that works for main tf.layers. + is_bias = len(v.shape.as_list()) == 1 and v.name.endswith("bias:0") + if not (skip_biases and is_bias): + with tf.device(v.device): + v_loss = tf.nn.l2_loss(v) + weight_decays.append(v_loss) + + return tf.add_n(weight_decays) * decay_rate + + +def log_variable_sizes(var_list=None, tag=None, verbose=False): + """Log the sizes and shapes of variables, and the total size. + + Args: + var_list: a list of variables; defaults to trainable_variables + tag: a string; defaults to "Trainable Variables" + verbose: bool, if True, log every weight; otherwise, log total size only. + """ + if var_list is None: + var_list = tf.trainable_variables() + if tag is None: + tag = "Trainable Variables" + + if not var_list: + return + + name_to_var = {v.name: v for v in var_list} + total_size = 0 + for v_name in sorted(list(name_to_var)): + v = name_to_var[v_name] + v_size = int(np.prod(np.array(v.shape.as_list()))) + if verbose: + tf.logging.info("Weight %s\tshape %s\tsize %d", + v.name[:-2].ljust(80), + str(v.shape).ljust(20), v_size) + total_size += v_size + tf.logging.info("%s Total size: %d", tag, total_size) + + +def summarize_variables(var_list=None, tag=None): + """Summarize the variables. + + Args: + var_list: a list of variables; defaults to trainable_variables. + tag: name scope of the summary; defaults to training_variables/. + """ + if var_list is None: + var_list = tf.trainable_variables() + if tag is None: + tag = "training_variables/" + + name_to_var = {v.name: v for v in var_list} + for v_name in list(name_to_var): + v = name_to_var[v_name] + tf.summary.histogram(tag + v_name, v) + + +def get_variable_initializer(hparams): + """Get variable initializer from hparams.""" + if not hparams.initializer: + return None + + if not tf.executing_eagerly(): + tf.logging.info("Using variable initializer: %s", hparams.initializer) + if hparams.initializer == "orthogonal": + return tf.orthogonal_initializer(gain=hparams.initializer_gain) + elif hparams.initializer == "uniform": + max_val = 0.1 * hparams.initializer_gain + return tf.random_uniform_initializer(-max_val, max_val) + elif hparams.initializer == "normal_unit_scaling": + return tf.variance_scaling_initializer( + hparams.initializer_gain, mode="fan_avg", distribution="normal") + elif hparams.initializer == "uniform_unit_scaling": + return tf.variance_scaling_initializer( + hparams.initializer_gain, mode="fan_avg", distribution="uniform") + elif hparams.initializer == "xavier": + return tf.initializers.glorot_uniform() + else: + raise ValueError("Unrecognized initializer: %s" % hparams.initializer) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/profile.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/profile.py new file mode 100644 index 0000000000000000000000000000000000000000..3b89f99cfd8a55b5a151e0b4b4ef27ee0975129a --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/profile.py @@ -0,0 +1,34 @@ +""" +Copyright (C) 2022 Habana Labs, Ltd. an Intel Company +All Rights Reserved. + +Unauthorized copying of this file or any element(s) within it, via any medium +is strictly prohibited. +This file contains Habana Labs, Ltd. proprietary and confidential information +and is subject to the confidentiality and license agreements under which it +was provided. +""" + +import tensorflow.compat.v1 as tf +import tensorflow.profiler.experimental as profiler + + +class ProfilerHook(tf.train.SessionRunHook): + def __init__(self, steps, log_dir) -> None: + profile_steps = [int(i) for i in steps.split(',')] + if len(profile_steps) != 2: + raise ValueError( + "Step has to be a pair of numbers, got {} instead".format(steps)) + self._step_count = 0 + self._start_step = profile_steps[0] + self._end_step = profile_steps[1] + self._log_dir = log_dir + + def after_run(self, _, __): + if self._step_count == self._end_step: + profiler.stop() + + def before_run(self, _): + self._step_count += 1 + if self._step_count == self._start_step: + profiler.start(self._log_dir) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/quantization.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..67818d92fb4df86ff93fcd3ef5896690498c008b --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/quantization.py @@ -0,0 +1,323 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - added to_float util + +"""Utilities related to using bfloat16 activations and/or parameters.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import tensorflow.compat.v1 as tf + +from tensorflow.python.framework import function + +def to_float(x): + """Cast x to float; created because tf.to_float is deprecated.""" + return tf.cast(x, tf.float32) + +def bfloat16_activations_var_getter(getter, *args, **kwargs): + """A custom getter function for float32 parameters and bfloat16 activations. + + Args: + getter: custom getter + *args: arguments + **kwargs: keyword arguments + Returns: + variables with the correct dtype. + Raises: + KeyError: if "dtype" is not provided as a kwarg. + """ + requested_dtype = kwargs["dtype"] + if requested_dtype == tf.bfloat16: + kwargs["dtype"] = tf.float32 + var = getter(*args, **kwargs) + # This if statement is needed to guard the cast, because batch norm + # assigns directly to the return value of this custom getter. The cast + # makes the return value not a variable so it cannot be assigned. Batch + # norm variables are always in fp32 so this if statement is never + # triggered for them. + if var.dtype.base_dtype != requested_dtype: + var = tf.cast(var, requested_dtype) + return var + + +def float16_activations_var_getter(getter, *args, **kwargs): + """A custom getter function for float32 parameters and float16 activations. + + This function ensures the following: + 1. All variables requested with type fp16 are stored as type fp32. + 2. All variables requested with type fp32 are returned as type fp16. + See https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/ + #training_tensorflow for more information on this strategy. + + Args: + getter: custom getter + *args: arguments + **kwargs: keyword arguments + + Returns: + variables with the correct dtype. + + Raises: + KeyError: if "dtype" is not provided as a kwarg. + """ + requested_dtype = kwargs["dtype"] + + if requested_dtype == tf.float16: + kwargs["dtype"] = tf.float32 + + if requested_dtype == tf.float32: + requested_dtype = tf.float16 + var = getter(*args, **kwargs) + # This if statement is needed to guard the cast, because batch norm + # assigns directly to the return value of this custom getter. The cast + # makes the return value not a variable so it cannot be assigned. Batch + # norm variables are always in fp32 so this if statement is never + # triggered for them. + if var.dtype.base_dtype != requested_dtype: + var = tf.cast(var, requested_dtype) + return var + + +def simulated_quantize(x, num_bits, noise): + """Simulate quantization to num_bits bits, with externally-stored scale. + + num_bits is the number of bits used to store each value. + noise is a float32 Tensor containing values in [0, 1). + Each value in noise should take different values across + different steps, approximating a uniform distribution over [0, 1). + In the case of replicated TPU training, noise should be identical + across replicas in order to keep the parameters identical across replicas. + + The natural choice for noise would be tf.random_uniform(), + but this is not possible for TPU, since there is currently no way to seed + the different cores to produce identical values across replicas. Instead we + use noise_from_step_num() (see below). + + The quantization scheme is as follows: + + Compute the maximum absolute value by row (call this max_abs). + Store this either in an auxiliary variable or in an extra column. + + Divide the parameters by (max_abs / (2^(num_bits-1)-1)). This gives a + float32 value in the range [-2^(num_bits-1)-1, 2^(num_bits-1)-1] + + Unbiased randomized roundoff by adding noise and rounding down. + + This produces a signed integer with num_bits bits which can then be stored. + + Args: + x: a float32 Tensor + num_bits: an integer between 1 and 22 + noise: a float Tensor broadcastable to the shape of x. + + Returns: + a float32 Tensor + """ + shape = x.get_shape().as_list() + if not (len(shape) >= 2 and shape[-1] > 1): + return x + max_abs = tf.reduce_max(tf.abs(x), -1, keepdims=True) + 1e-9 + max_int = 2 ** (num_bits - 1) - 1 + scale = max_abs / max_int + x /= scale + x = tf.floor(x + noise) + # dequantize before storing (since this is a simulation) + x *= scale + return x + + +def noise_from_step_num(): + """Quantization noise equal to (phi * (step_num + 1)) mod 1.0. + + Not using random_uniform here due to a problem on TPU in that random seeds + are not respected, which may cause the parameters on different replicas + to go out-of-sync. + + Returns: + a float32 scalar + """ + step = tf.to_int32(tf.train.get_or_create_global_step()) + 1 + phi = ((5 ** 0.5) - 1) / 2 + # Naive computation tf.mod(phi * step, 1.0) in float32 would be disastrous + # due to loss of precision when the step number gets large. + # Computation in doubles does not work on TPU, so we use this complicated + # alternative computation which does not suffer from these roundoff errors. + ret = 0.0 + for i in range(30): + ret += (((phi * (2 ** i)) % 1.0) # double-precision computation in python + * to_float(tf.mod(step // (2 ** i), 2))) + return tf.mod(ret, 1.0) + + +def _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2): + """Round-off x to cand1 or to cand2 in an unbiased way. + + Cand1 and cand2 are the same shape as x. + For every element of x, the corresponding elements of cand1 and cand2 should + be the two closest bfloat16 values to x. Order does not matter. + cand1 and cand2 must differ from each other. + + Args: + x: A float32 Tensor. + noise: A Tensor broadcastable to the shape of x containing + random uniform values in [0.0, 1.0]. + cand1: A bfloat16 Tensor the same shape as x. + cand2: A bfloat16 Tensor the same shape as x. + + Returns: + A bfloat16 Tensor. + """ + cand1_f = to_float(cand1) + cand2_f = to_float(cand2) + step_size = cand2_f - cand1_f + fpart = (x - cand1_f) / step_size + ret = tf.where(tf.greater(fpart, noise), cand2, cand1) + return ret + + +def _to_bfloat16_unbiased(x, noise): + """Convert a float32 to a bfloat16 using randomized roundoff. + + Args: + x: A float32 Tensor. + noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x) + Returns: + A float32 Tensor. + """ + x_sign = tf.sign(x) + # Make sure x is positive. If it is zero, the two candidates are identical. + x = x * x_sign + 1e-30 + cand1 = tf.to_bfloat16(x) + cand1_f = to_float(cand1) + # This relies on the fact that for a positive bfloat16 b, + # b * 1.005 gives you the next higher bfloat16 and b*0.995 gives you the + # next lower one. Both 1.005 and 0.995 are ballpark estimation. + cand2 = tf.to_bfloat16( + tf.where(tf.greater(x, cand1_f), cand1_f * 1.005, cand1_f * 0.995)) + ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2) + return ret * tf.to_bfloat16(x_sign) + + +class ParameterEncoding(object): + """Helper class for encoding weights as bfloat16. + + For now, the parameters are always stored (encoded) as bfloat16 and decoded + to bfloat32. Confusingly, the custom getter then converts the bfloat32 back + to a bfloat16 to use as an activation, assuming that we use bfloat16 for + activations. + + TODO(noam): Add options for activation dtype=float32, and for different + storage dtypes. + """ + + def encode(self, x, noise): + """Encode float32 to bfloat16. + + Args: + x: a float32 Tensor + noise: a float32 Tensor with values in [0, 1), broadcastable to shape(x) + + Returns: + a bfloat16 Tensor + """ + raise NotImplementedError("encode not implemented") + + def decode(self, x): + """Decode bfloat16 to float32.""" + raise NotImplementedError("decode not implemented") + + def _decode_with_identity_gradient(self, x): + # identity backprop through the decoder. + # This means that the optimizer must call encode when updating weights. + @function.Defun(python_grad_func=lambda op, dy: dy, + shape_func=lambda op: [op.inputs[0].get_shape()]) + def my_fn(x): + return self.decode(x) + return my_fn(x) + + def custom_getter(self, activation_dtype=tf.bfloat16): + """A custom getter that uses the encoding for bfloat16 and float32 vars. + + When a bfloat16 or float32 variable is requsted, an encoded float16 + varaible is created, which is then decoded and cast to a bfloat16 + activation. + + Args: + activation_dtype: a dtype to which to convert the decoded value. + + Returns: + a function. + """ + def getter_fn(getter, *args, **kwargs): + requested_dtype = kwargs["dtype"] + if requested_dtype in (tf.bfloat16, tf.float32): + kwargs["dtype"] = tf.bfloat16 + kwargs["initializer"] = _EncodingInitializer( + kwargs["initializer"], self) + ret = self._decode_with_identity_gradient(getter(*args, **kwargs)) + return tf.cast(ret, activation_dtype) + return getter(*args, **kwargs) + return getter_fn + + +class _EncodingInitializer(object): + """Helper class for ParameterEncoding. + + Initializes variables by calling base initializer, then encoding. + """ + + def __init__(self, base_initializer, parameter_encoding): + self._base_initializer = base_initializer + self._parameter_encoding = parameter_encoding + + def __call__(self, shape, dtype, partition_info=None): + if self._base_initializer is None: + # mimic default initialization in tf.get_variable() + if dtype.is_floating: + ret = tf.glorot_uniform_initializer()(shape, dtype) + else: + ret = tf.zeros(shape, dtype) + else: + ret = self._base_initializer(shape, dtype, partition_info=partition_info) + noise = 0.0 # no random noise in the initializer. + return tf.cast(self._parameter_encoding.encode(ret, noise), dtype) + + +class EighthPowerEncoding(ParameterEncoding): + """enc(x) = sign(x) * (abs(x)*128)^8. + + This provides less range and more resolution. + The range of representable positive values is approximately [2^-23, 2^9] + Resolution is 8x better than bfloat16. + """ + + def encode(self, x, noise): + x = to_float(x) + # we can't use tf.pow(..., 8.0) because of a high-error approximation + # on TPU. Instead we square three times. + x = tf.sign(x) * tf.square(tf.square(tf.square(tf.abs(x) * 128.0))) + x = _to_bfloat16_unbiased(x, noise) + return x + + def decode(self, x): + x = to_float(x) + # we can't use tf.pow(..., 0.125) because of a high-error approximation + # on TPU. Instead we sqrt three times. + return tf.sign(x) * (tf.sqrt(tf.sqrt(tf.sqrt(tf.abs(x)))) / 128.0) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/registry.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..9d67c0c6649b146a5001524c81fa5ce11c68bd29 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/registry.py @@ -0,0 +1,613 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - removed mtf layer support + +"""Object registration. + +Registries are instances of `Registry`. + +See `Registries` for a centralized list of object registries +(models, problems, hyperparameter sets, etc.). + +New functions and classes can be registered using `.register`. The can be +accessed/queried similar to dictionaries, keyed by default by `snake_case` +equivalents. + +``` +@Registries.models.register +class MyModel(T2TModel): + ... + +'my_model' in Registries.models # True +for k in Registries.models: + print(k) # prints 'my_model' +model = Registries.models['my_model'](constructor_arg) +``` + +#### Legacy Support + +Define a new model by subclassing T2TModel and register it: + +``` +@register_model +class MyModel(T2TModel): + ... +``` + +Access by snake-cased name: `model("my_model")`. If you're using +`t2t_trainer.py`, you can pass on the command-line: `--model=my_model`. + +See all the models registered: `list_models()`. + +For hyperparameter sets: + * Register: `register_hparams` + * List: `list_hparams` + * Retrieve by name: `hparams` + * Command-line flag in `t2t_trainer.py`: `--hparams_set=name` + +For hyperparameter ranges: + * Register: `register_ranged_hparams` + * List: `list_ranged_hparams` + * Retrieve by name: `ranged_hparams` + * Command-line flag in `t2t_trainer.py`: `--hparams_range=name` +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from TensorFlow.nlp.transformer.utils import misc_utils +import tensorflow.compat.v1 as tf + +from tensorflow.python.util import tf_inspect as inspect # pylint: disable=g-direct-tensorflow-import + + +def default_name(class_or_fn): + """Default name for a class or function. + + This is the naming function by default for registries expecting classes or + functions. + + Args: + class_or_fn: class or function to be named. + + Returns: + Default name for registration. + """ + return misc_utils.camelcase_to_snakecase(class_or_fn.__name__) + + +default_object_name = lambda obj: default_name(type(obj)) + + +class Registry(object): + """Dict-like class for managing function registrations. + + ```python + my_registry = Registry("custom_name") + + @my_registry.register + def my_func(): + pass + + @my_registry.register() + def another_func(): + pass + + @my_registry.register("non_default_name") + def third_func(x, y, z): + pass + + def foo(): + pass + + my_registry.register()(foo) + my_registry.register("baz")(lambda (x, y): x + y) + my_register.register("bar") + + print(list(my_registry)) + # ["my_func", "another_func", "non_default_name", "foo", "baz"] + # (order may vary) + print(my_registry["non_default_name"] is third_func) # True + print("third_func" in my_registry) # False + print("bar" in my_registry) # False + my_registry["non-existent_key"] # raises KeyError + ``` + + Optional validation, on_set callback and value transform also supported. + See `__init__` doc. + """ + + def __init__(self, + registry_name, + default_key_fn=default_name, + validator=None, + on_set=None, + value_transformer=(lambda k, v: v)): + """Construct a new registry. + + Args: + registry_name: str identifier for the given registry. Used in error msgs. + default_key_fn (optional): function mapping value -> key for registration + when a key is not provided + validator (optional): if given, this is run before setting a given (key, + value) pair. Accepts (key, value) and should raise if there is a + problem. Overwriting existing keys is not allowed and is checked + separately. Values are also checked to be callable separately. + on_set (optional): callback function accepting (key, value) pair which is + run after an item is successfully set. + value_transformer (optional): if run, `__getitem__` will return + value_transformer(key, registered_value). + """ + self._registry = {} + self._name = registry_name + self._default_key_fn = default_key_fn + self._validator = validator + self._on_set = on_set + self._value_transformer = value_transformer + + def default_key(self, value): + """Default key used when key not provided. Uses function from __init__.""" + return self._default_key_fn(value) + + @property + def name(self): + return self._name + + def validate(self, key, value): + """Validation function run before setting. Uses function from __init__.""" + if self._validator is not None: + self._validator(key, value) + + def on_set(self, key, value): + """Callback called on successful set. Uses function from __init__.""" + if self._on_set is not None: + self._on_set(key, value) + + def __setitem__(self, key, value): + """Validate, set, and (if successful) call `on_set` for the given item. + + Args: + key: key to store value under. If `None`, `self.default_key(value)` is + used. + value: callable stored under the given key. + + Raises: + KeyError: if key is already in registry. + """ + if key is None: + key = self.default_key(value) + if key in self: + raise KeyError( + "key %s already registered in registry %s" % (key, self._name)) + if not callable(value): + raise ValueError("value must be callable") + self.validate(key, value) + self._registry[key] = value + self.on_set(key, value) + + def register(self, key_or_value=None): + """Decorator to register a function, or registration itself. + + This is primarily intended for use as a decorator, either with or without + a key/parentheses. + ```python + @my_registry.register('key1') + def value_fn(x, y, z): + pass + + @my_registry.register() + def another_fn(x, y): + pass + + @my_registry.register + def third_func(): + pass + ``` + + Note if key_or_value is provided as a non-callable, registration only + occurs once the returned callback is called with a callable as its only + argument. + ```python + callback = my_registry.register('different_key') + 'different_key' in my_registry # False + callback(lambda (x, y): x + y) + 'different_key' in my_registry # True + ``` + + Args: + key_or_value (optional): key to access the registered value with, or the + function itself. If `None` (default), `self.default_key` will be called + on `value` once the returned callback is called with `value` as the only + arg. If `key_or_value` is itself callable, it is assumed to be the value + and the key is given by `self.default_key(key)`. + + Returns: + decorated callback, or callback generated a decorated function. + """ + + def decorator(value, key): + self[key] = value + return value + + # Handle if decorator was used without parens + if callable(key_or_value): + return decorator(value=key_or_value, key=None) + else: + return lambda value: decorator(value, key=key_or_value) + + def __getitem__(self, key): + if key not in self: + raise KeyError("%s never registered with registry %s. Available:\n %s" % + (key, self.name, display_list_by_prefix(sorted(self), 4))) + value = self._registry[key] + return self._value_transformer(key, value) + + def __contains__(self, key): + return key in self._registry + + def keys(self): + return self._registry.keys() + + def values(self): + return (self[k] for k in self) # complicated because of transformer + + def items(self): + return ((k, self[k]) for k in self) # complicated because of transformer + + def __iter__(self): + return iter(self._registry) + + def __len__(self): + return len(self._registry) + + def _clear(self): + self._registry.clear() + + def get(self, key, default=None): + return self[key] if key in self else default + + +def _on_model_set(k, v): + v.REGISTERED_NAME = k + + +def _nargs_validator(nargs, message): + """Makes validator for function to ensure it takes nargs args.""" + if message is None: + message = "Registered function must take exactly %d arguments" % nargs + + def f(key, value): + del key + spec = inspect.getfullargspec(value) + if (len(spec.args) != nargs or spec.varargs is not None or + spec.varkw is not None): + raise ValueError(message) + + return f + + +ProblemSpec = collections.namedtuple("ProblemSpec", + ["base_name", "was_reversed", "was_copy"]) + + +def parse_problem_name(name): + """Determines if problem_name specifies a copy and/or reversal. + + Args: + name: str, problem name, possibly with suffixes. + + Returns: + ProblemSpec: namedtuple with ["base_name", "was_reversed", "was_copy"] + + Raises: + ValueError if name contains multiple suffixes of the same type + ('_rev' or '_copy'). One of each is ok. + """ + # Recursively strip tags until we reach a base name. + if name.endswith("_rev"): + base, was_reversed, was_copy = parse_problem_name(name[:-4]) + if was_reversed: + # duplicate rev + raise ValueError( + "Invalid problem name %s: multiple '_rev' instances" % name) + return ProblemSpec(base, True, was_copy) + elif name.endswith("_copy"): + base, was_reversed, was_copy = parse_problem_name(name[:-5]) + if was_copy: + raise ValueError( + "Invalid problem_name %s: multiple '_copy' instances" % name) + return ProblemSpec(base, was_reversed, True) + else: + return ProblemSpec(name, False, False) + + +def get_problem_name(base_name, was_reversed=False, was_copy=False): + """Construct a problem name from base and reversed/copy options. + + Inverse of `parse_problem_name`. + + Args: + base_name: base problem name. Should not end in "_rev" or "_copy" + was_reversed: if the problem is to be reversed + was_copy: if the problem is to be copied + + Returns: + string name consistent with use with `parse_problem_name`. + + Raises: + ValueError if `base_name` ends with "_rev" or "_copy" + """ + if any(base_name.endswith(suffix) for suffix in ("_rev", "_copy")): + raise ValueError("`base_name` cannot end in '_rev' or '_copy'") + name = base_name + if was_copy: + name = "%s_copy" % name + if was_reversed: + name = "%s_rev" % name + return name + + +def _problem_name_validator(k, v): + del v + if parse_problem_name(k).base_name != k: + raise KeyError( + "Invalid problem name: cannot end in %s or %s" % ("_rev", "_copy")) + + +def _on_problem_set(k, v): + v.name = k + + +def _call_value(k, v): + del k + return v() + + +def _hparams_value_transformer(key, value): + out = value() + if out is None: + raise TypeError("HParams %s is None. Make sure the registered function " + "returns the HParams object" % key) + return out + + +class Registries(object): + """Object holding `Registry` objects.""" + + def __init__(self): + raise RuntimeError("Registries is not intended to be instantiated") + + models = Registry("models", on_set=_on_model_set) + + optimizers = Registry( + "optimizers", + validator=_nargs_validator( + 2, "Registered optimizer functions must take exactly two arguments: " + "learning_rate (float) and hparams (HParams).")) + + hparams = Registry("hparams", value_transformer=_hparams_value_transformer) + + ranged_hparams = Registry( + "ranged_hparams", + validator=_nargs_validator( + 1, "Registered ranged_hparams functions must take a single argument, " + "the RangedHParams object.")) + + problems = Registry( + "problems", validator=_problem_name_validator, on_set=_on_problem_set) + + attacks = Registry("attacks", value_transformer=_call_value) + + attack_params = Registry("attack_params", value_transformer=_call_value) + + pruning_params = Registry("pruning_params", value_transformer=_call_value) + + pruning_strategies = Registry("pruning_strategies") + + env_problems = Registry("env_problems", on_set=_on_problem_set) + + +# consistent version of old API +model = Registries.models.__getitem__ +list_models = lambda: sorted(Registries.models) +register_model = Registries.models.register + + +def optimizer(name): + """Get pre-registered optimizer keyed by name. + + `name` should be snake case, though SGD -> sgd, RMSProp -> rms_prop and + UpperCamelCase -> snake_case conversions included for legacy support. + + Args: + name: name of optimizer used in registration. This should be a snake case + identifier, though others supported for legacy reasons. + + Returns: + optimizer + """ + warn_msg = ("Please update `registry.optimizer` callsite " + "(likely due to a `HParams.optimizer` value)") + if name == "SGD": + name = "sgd" + tf.logging.warning("'SGD' optimizer now keyed by 'sgd'. %s" % warn_msg) + elif name == "RMSProp": + name = "rms_prop" + tf.logging.warning( + "'RMSProp' optimizer now keyed by 'rms_prop'. %s" % warn_msg) + else: + snake_name = misc_utils.camelcase_to_snakecase(name) + if name != snake_name: + tf.logging.warning( + "optimizer names now keyed by snake_case names. %s" % warn_msg) + name = snake_name + return Registries.optimizers[name] + + +list_optimizers = lambda: sorted(Registries.optimizers) +register_optimizer = Registries.optimizers.register + +hparams = Registries.hparams.__getitem__ +register_hparams = Registries.hparams.register + +list_env_problems = lambda: sorted(Registries.env_problems) +register_env_problem = Registries.env_problems.register + + +def list_hparams(prefix=None): + hp_names = sorted(Registries.hparams) + if prefix: + hp_names = [name for name in hp_names if name.startswith(prefix)] + return hp_names + + +ranged_hparams = Registries.ranged_hparams.__getitem__ +list_ranged_hparams = lambda: sorted(Registries.ranged_hparams) +register_ranged_hparams = Registries.ranged_hparams.register + +base_problem = Registries.problems.__getitem__ +list_base_problems = lambda: sorted(Registries.problems) +register_base_problem = Registries.problems.register + +# Keeping for back-compatibility +list_problems = list_base_problems +register_problem = register_base_problem + + +def problem(problem_name, **kwargs): + """Get possibly copied/reversed problem in `base_registry` or `env_registry`. + + Args: + problem_name: string problem name. See `parse_problem_name`. + **kwargs: forwarded to env problem's initialize method. + + Returns: + possibly reversed/copied version of base problem registered in the given + registry. + """ + spec = parse_problem_name(problem_name) + try: + return Registries.problems[spec.base_name]( + was_copy=spec.was_copy, was_reversed=spec.was_reversed) + except KeyError: + # If name is not found in base problems then try creating an env problem + return env_problem(problem_name, **kwargs) + + +def env_problem(env_problem_name, **kwargs): + """Get and initialize the `EnvProblem` with the given name and batch size. + + Args: + env_problem_name: string name of the registered env problem. + **kwargs: forwarded to env problem's initialize method. + + Returns: + an initialized EnvProblem with the given batch size. + """ + + ep_cls = Registries.env_problems[env_problem_name] + ep = ep_cls() + ep.initialize(**kwargs) + return ep + + +attack = Registries.attacks.__getitem__ +list_attacks = lambda: sorted(Registries.attacks) +register_attack = Registries.attacks.register + +attack_params = Registries.attack_params.__getitem__ +list_attack_params = lambda: sorted(Registries.attack_params) +register_attack_params = Registries.attack_params.register + +pruning_params = Registries.pruning_params.__getitem__ +list_pruning_params = lambda: sorted(Registries.pruning_params) +register_pruning_params = Registries.pruning_params.register + +pruning_strategy = Registries.pruning_strategies.__getitem__ +list_pruning_strategies = lambda: sorted(Registries.pruning_strategies) +register_pruning_strategy = Registries.pruning_strategies.register + + +def display_list_by_prefix(names_list, starting_spaces=0): + """Creates a help string for names_list grouped by prefix.""" + cur_prefix, result_lines = None, [] + space = " " * starting_spaces + for name in sorted(names_list): + split = name.split("_", 1) + prefix = split[0] + if cur_prefix != prefix: + result_lines.append(space + prefix + ":") + cur_prefix = prefix + result_lines.append(space + " * " + name) + return "\n".join(result_lines) + + +def help_string(): + """Generate help string with contents of registry.""" + help_str = """ +Registry contents: +------------------ + + Models: +%s + + HParams: +%s + + RangedHParams: +%s + + Problems: +%s + + Optimizers: +%s + + Attacks: +%s + + Attack HParams: +%s + + Pruning HParams: +%s + + Pruning Strategies: +%s + + Env Problems: +%s +""" + lists = tuple( + display_list_by_prefix(entries, starting_spaces=4) for entries in [ # pylint: disable=g-complex-comprehension + list_models(), + list_hparams(), + list_ranged_hparams(), + list_base_problems(), + list_optimizers(), + list_attacks(), + list_attack_params(), + list_pruning_params(), + list_pruning_strategies(), + list_env_problems(), + ]) + return help_str % lists diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/rouge.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/rouge.py new file mode 100644 index 0000000000000000000000000000000000000000..706154e2cb1b5deb51f9a899ab0997c3e8ac1fb7 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/rouge.py @@ -0,0 +1,236 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# coding=utf-8 +"""ROUGE metric implementation. + +This is a modified and slightly extended version of +https://github.com/miso-belica/sumy/blob/dev/sumy/evaluation/rouge.py. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy as np + +import tensorflow.compat.v1 as tf + + +def _len_lcs(x, y): + """Returns the length of the Longest Common Subsequence between two seqs. + + Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence + + Args: + x: sequence of words + y: sequence of words + + Returns + integer: Length of LCS between x and y + """ + table = _lcs(x, y) + n, m = len(x), len(y) + return table[n, m] + + +def _lcs(x, y): + """Computes the length of the LCS between two seqs. + + The implementation below uses a DP programming algorithm and runs + in O(nm) time where n = len(x) and m = len(y). + Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence + + Args: + x: collection of words + y: collection of words + + Returns: + Table of dictionary of coord and len lcs + """ + n, m = len(x), len(y) + table = {} + for i in range(n + 1): + for j in range(m + 1): + if i == 0 or j == 0: + table[i, j] = 0 + elif x[i - 1] == y[j - 1]: + table[i, j] = table[i - 1, j - 1] + 1 + else: + table[i, j] = max(table[i - 1, j], table[i, j - 1]) + return table + + +def _f_lcs(llcs, m, n): + """Computes the LCS-based F-measure score. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Args: + llcs: Length of LCS + m: number of words in reference summary + n: number of words in candidate summary + + Returns: + Float. LCS-based F-measure score + """ + r_lcs = llcs / m + p_lcs = llcs / n + beta = p_lcs / (r_lcs + 1e-12) + num = (1 + (beta**2)) * r_lcs * p_lcs + denom = r_lcs + ((beta**2) * p_lcs) + f_lcs = num / (denom + 1e-12) + return f_lcs + + +def rouge_l_sentence_level(eval_sentences, ref_sentences): + """Computes ROUGE-L (sentence level) of two collections of sentences. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Calculated according to: + R_lcs = LCS(X,Y)/m + P_lcs = LCS(X,Y)/n + F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) + + where: + X = reference summary + Y = Candidate summary + m = length of reference summary + n = length of candidate summary + + Args: + eval_sentences: The sentences that have been picked by the summarizer + ref_sentences: The sentences from the reference set + + Returns: + A float: F_lcs + """ + + f1_scores = [] + for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): + m = len(ref_sentence) + n = len(eval_sentence) + lcs = _len_lcs(eval_sentence, ref_sentence) + f1_scores.append(_f_lcs(lcs, m, n)) + return np.mean(f1_scores, dtype=np.float32) + + +def rouge_l_fscore(predictions, labels, **unused_kwargs): + """ROUGE scores computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + predictions: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge_l_fscore: approx rouge-l f1 score. + """ + outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + # Convert the outputs and labels to a [batch_size, input_length] tensor. + outputs = tf.squeeze(outputs, axis=[-1, -2]) + labels = tf.squeeze(labels, axis=[-1, -2]) + rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), + tf.float32) + return rouge_l_f_score, tf.constant(1.0) + + +def _get_ngrams(n, text): + """Calculates n-grams. + + Args: + n: which n-grams to calculate + text: An array of tokens + + Returns: + A set of n-grams + """ + ngram_set = set() + text_length = len(text) + max_index_ngram_start = text_length - n + for i in range(max_index_ngram_start + 1): + ngram_set.add(tuple(text[i:i + n])) + return ngram_set + + +def rouge_n(eval_sentences, ref_sentences, n=2): + """Computes ROUGE-N f1 score of two text collections of sentences. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Args: + eval_sentences: The sentences that have been picked by the summarizer + ref_sentences: The sentences from the reference set + n: Size of ngram. Defaults to 2. + + Returns: + f1 score for ROUGE-N + """ + + f1_scores = [] + for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): + eval_ngrams = _get_ngrams(n, eval_sentence) + ref_ngrams = _get_ngrams(n, ref_sentence) + ref_count = len(ref_ngrams) + eval_count = len(eval_ngrams) + + # Gets the overlapping ngrams between evaluated and reference + overlapping_ngrams = eval_ngrams.intersection(ref_ngrams) + overlapping_count = len(overlapping_ngrams) + + # Handle edge case. This isn't mathematically correct, but it's good enough + if eval_count == 0: + precision = 0.0 + else: + precision = overlapping_count / eval_count + + if ref_count == 0: + recall = 0.0 + else: + recall = overlapping_count / ref_count + + f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8))) + + # return overlapping_count / reference_count + return np.mean(f1_scores, dtype=np.float32) + + +def rouge_2_fscore(predictions, labels, **unused_kwargs): + """ROUGE-2 F1 score computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + predictions: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge2_fscore: approx rouge-2 f1 score. + """ + + outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + # Convert the outputs and labels to a [batch_size, input_length] tensor. + outputs = tf.squeeze(outputs, axis=[-1, -2]) + labels = tf.squeeze(labels, axis=[-1, -2]) + rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32) + return rouge_2_f_score, tf.constant(1.0) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/sari_hook.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/sari_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..e10ac7fb9fa1515ccbf6a981ef1eaef7f15db70a --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/sari_hook.py @@ -0,0 +1,252 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SARI score for evaluating paraphrasing and other text generation models. + +The score is introduced in the following paper: + + Optimizing Statistical Machine Translation for Text Simplification + Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen and Chris Callison-Burch + In Transactions of the Association for Computational Linguistics (TACL) 2015 + http://cs.jhu.edu/~napoles/res/tacl2016-optimizing.pdf + +This implementation has two differences with the GitHub [1] implementation: + (1) Define 0/0=1 instead of 0 to give higher scores for predictions that match + a target exactly. + (2) Fix an alleged bug [2] in the deletion score computation. + +[1] https://github.com/cocoxu/simplification/blob/master/SARI.py + (commit 0210f15) +[2] https://github.com/cocoxu/simplification/issues/6 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +import numpy as np +import tensorflow.compat.v1 as tf + +# The paper that intoduces the SARI score uses only the precision of the deleted +# tokens (i.e. beta=0). To give more emphasis on recall, you may set, e.g., +# beta=1. +BETA_FOR_SARI_DELETION_F_MEASURE = 0 + + +def _get_ngram_counter(ids, n): + """Get a Counter with the ngrams of the given ID list. + + Args: + ids: np.array or a list corresponding to a single sentence + n: n-gram size + + Returns: + collections.Counter with ID tuples as keys and 1s as values. + """ + # Remove zero IDs used to pad the sequence. + ids = [token_id for token_id in ids if token_id != 0] + ngram_list = [tuple(ids[i:i + n]) for i in range(len(ids) + 1 - n)] + ngrams = set(ngram_list) + counts = collections.Counter() + for ngram in ngrams: + counts[ngram] = 1 + return counts + + +def _get_fbeta_score(true_positives, selected, relevant, beta=1): + """Compute Fbeta score. + + Args: + true_positives: Number of true positive ngrams. + selected: Number of selected ngrams. + relevant: Number of relevant ngrams. + beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only. + + Returns: + Fbeta score. + """ + precision = 1 + if selected > 0: + precision = true_positives / selected + if beta == 0: + return precision + recall = 1 + if relevant > 0: + recall = true_positives / relevant + if precision > 0 and recall > 0: + beta2 = beta * beta + return (1 + beta2) * precision * recall / (beta2 * precision + recall) + else: + return 0 + + +def get_addition_score(source_counts, prediction_counts, target_counts): + """Compute the addition score (Equation 4 in the paper).""" + added_to_prediction_counts = prediction_counts - source_counts + true_positives = sum((added_to_prediction_counts & target_counts).values()) + selected = sum(added_to_prediction_counts.values()) + # Note that in the paper the summation is done over all the ngrams in the + # output rather than the ngrams in the following set difference. Since the + # former does not make as much sense we compute the latter, which is also done + # in the GitHub implementation. + relevant = sum((target_counts - source_counts).values()) + return _get_fbeta_score(true_positives, selected, relevant) + + +def get_keep_score(source_counts, prediction_counts, target_counts): + """Compute the keep score (Equation 5 in the paper).""" + source_and_prediction_counts = source_counts & prediction_counts + source_and_target_counts = source_counts & target_counts + true_positives = sum((source_and_prediction_counts & + source_and_target_counts).values()) + selected = sum(source_and_prediction_counts.values()) + relevant = sum(source_and_target_counts.values()) + return _get_fbeta_score(true_positives, selected, relevant) + + +def get_deletion_score(source_counts, prediction_counts, target_counts, beta=0): + """Compute the deletion score (Equation 6 in the paper).""" + source_not_prediction_counts = source_counts - prediction_counts + source_not_target_counts = source_counts - target_counts + true_positives = sum((source_not_prediction_counts & + source_not_target_counts).values()) + selected = sum(source_not_prediction_counts.values()) + relevant = sum(source_not_target_counts.values()) + return _get_fbeta_score(true_positives, selected, relevant, beta=beta) + + +def get_sari_score(source_ids, prediction_ids, list_of_targets, + max_gram_size=4, beta_for_deletion=0): + """Compute the SARI score for a single prediction and one or more targets. + + Args: + source_ids: a list / np.array of SentencePiece IDs + prediction_ids: a list / np.array of SentencePiece IDs + list_of_targets: a list of target ID lists / np.arrays + max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams, + bigrams, and trigrams) + beta_for_deletion: beta for deletion F score. + + Returns: + the SARI score and its three components: add, keep, and deletion scores + """ + addition_scores = [] + keep_scores = [] + deletion_scores = [] + for n in range(1, max_gram_size + 1): + source_counts = _get_ngram_counter(source_ids, n) + prediction_counts = _get_ngram_counter(prediction_ids, n) + # All ngrams in the targets with count 1. + target_counts = collections.Counter() + # All ngrams in the targets with count r/num_targets, where r is the number + # of targets where the ngram occurs. + weighted_target_counts = collections.Counter() + num_nonempty_targets = 0 + for target_ids_i in list_of_targets: + target_counts_i = _get_ngram_counter(target_ids_i, n) + if target_counts_i: + weighted_target_counts += target_counts_i + num_nonempty_targets += 1 + for gram in weighted_target_counts.keys(): + weighted_target_counts[gram] /= num_nonempty_targets + target_counts[gram] = 1 + keep_scores.append(get_keep_score(source_counts, prediction_counts, + weighted_target_counts)) + deletion_scores.append(get_deletion_score(source_counts, prediction_counts, + weighted_target_counts, + beta_for_deletion)) + addition_scores.append(get_addition_score(source_counts, prediction_counts, + target_counts)) + + avg_keep_score = sum(keep_scores) / max_gram_size + avg_addition_score = sum(addition_scores) / max_gram_size + avg_deletion_score = sum(deletion_scores) / max_gram_size + sari = (avg_keep_score + avg_addition_score + avg_deletion_score) / 3.0 + return sari, avg_keep_score, avg_addition_score, avg_deletion_score + + +def get_sari(source_ids, prediction_ids, target_ids, max_gram_size=4): + """Computes the SARI scores from the given source, prediction and targets. + + Args: + source_ids: A 2D tf.Tensor of size (batch_size , sequence_length) + prediction_ids: A 2D tf.Tensor of size (batch_size, sequence_length) + target_ids: A 3D tf.Tensor of size (batch_size, number_of_targets, + sequence_length) + max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams, + bigrams, and trigrams) + + Returns: + A 4-tuple of 1D float Tensors of size (batch_size) for the SARI score and + the keep, addition and deletion scores. + """ + + def get_sari_numpy(source_ids, prediction_ids, target_ids): + """Iterate over elements in the batch and call the SARI function.""" + sari_scores = [] + keep_scores = [] + add_scores = [] + deletion_scores = [] + # Iterate over elements in the batch. + for source_ids_i, prediction_ids_i, target_ids_i in zip( + source_ids, prediction_ids, target_ids): + sari, keep, add, deletion = get_sari_score( + source_ids_i, prediction_ids_i, target_ids_i, max_gram_size, + BETA_FOR_SARI_DELETION_F_MEASURE) + sari_scores.append(sari) + keep_scores.append(keep) + add_scores.append(add) + deletion_scores.append(deletion) + return (np.asarray(sari_scores), np.asarray(keep_scores), + np.asarray(add_scores), np.asarray(deletion_scores)) + + sari, keep, add, deletion = tf.py_func( + get_sari_numpy, + [source_ids, prediction_ids, target_ids], + [tf.float64, tf.float64, tf.float64, tf.float64]) + return sari, keep, add, deletion + + +def sari_score(predictions, labels, features, **unused_kwargs): + """Computes the SARI scores from the given source, prediction and targets. + + An approximate SARI scoring method since we do not glue word pieces or + decode the ids and tokenize the output. By default, we use ngram order of 4. + Also, this does not have beam search. + + Args: + predictions: tensor, model predictions. + labels: tensor, gold output. + features: dict, containing inputs. + + Returns: + sari: int, approx sari score + """ + if "inputs" not in features: + raise ValueError("sari_score requires inputs feature") + + # Convert the inputs and outputs to a [batch_size, sequence_length] tensor. + inputs = tf.squeeze(features["inputs"], axis=[-1, -2]) + outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + outputs = tf.squeeze(outputs, axis=[-1, -2]) + + # Convert the labels to a [batch_size, 1, sequence_length] tensor. + labels = tf.squeeze(labels, axis=[-1, -2]) + labels = tf.expand_dims(labels, axis=1) + + score, _, _, _ = get_sari(inputs, outputs, labels) + return score, tf.constant(1.0) diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/scheduled_sampling.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/scheduled_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd019e8f1b3e1da367a19ac881cabd93840ffc4 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/scheduled_sampling.py @@ -0,0 +1,283 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports + +"""Scheduled Sampling. + +This module implemented scheduled sampling as described in (Bengio et al, 2015). +The entry points are two functions, + +`sequential_scheduled_sampling_for_t2tmodel()`: + scheduled sampling adapted to instances of T2TModel. + +`sequential_scheduled_sampling()`: + raw implementation of scheduled sampling. May be used independent of T2T. + +**WARNING** This code is VERY slow. Its runtime is at least O(n^2) for +sequences of length n. For models with self-attention, its runtime is O(n^3). + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy + +from TensorFlow.nlp.transformer.layers import common_layers +import tensorflow.compat.v1 as tf + +from tensorflow.python.ops import inplace_ops # pylint: disable=g-direct-tensorflow-import + + +def sequential_scheduled_sampling_for_t2tmodel(t2tmodel, features): + """Schedule Sampling for T2TModels. + + Args: + t2tmodel: T2TModel instance. + features: {str: Tensor}. Input features. + + Returns: + ss_logits: [batch_size, seq_len, 1, 1, vocab_size]. + losses_dict: {str: scalar Tensor}. Losses to minimize. + """ + targets = features["targets"] + targets_size = common_layers.shape_list(targets) + batch_size = targets_size[0] + seq_len = targets_size[1] + targets = tf.reshape(targets, [batch_size, seq_len]) + + adapter = ScheduledSamplingAdapter(t2tmodel, features) + ss_tokens, ss_logits, losses_dict = sequential_scheduled_sampling( + infer_fn=adapter.infer_fn, + mix_fn=adapter.mix_fn, + loss_fn=adapter.loss_fn, + targets=targets) + + _ = ss_tokens # unused. + targets_vocab_size = t2tmodel.problem_hparams.vocab_size["targets"] + ss_logits = tf.reshape(ss_logits, + [batch_size, seq_len, 1, 1, targets_vocab_size]) + + return ss_logits, losses_dict + + +def sequential_scheduled_sampling(infer_fn, mix_fn, loss_fn, targets): + """Scheduled Sampling. + + Args: + infer_fn: Function. Computes logits for all timesteps. + mix_fn: Function. Mixes gold and sample tokens. + loss_fn: Function. Computes loss between gold tokens and logits. + targets: Tensor of shape [batch_size, seq_len]. Gold tokens. + + Returns: + ss_tokens: Tensor of shape [batch_size, seq_len]. Scheduled sampling tokens. + ss_logits: Tensor of shape [batch_size, seq_len, vocab_size]. Logits for + next token when conditioning on ss_tokens. + losses_dict: {str: scalar Tensor}. Losses to optimize. + """ + targets_shape = common_layers.shape_list(targets) + batch_size = targets_shape[0] + seq_len = targets_shape[1] + + if not targets.shape.is_fully_defined(): + # TODO(duckworthd): When running on GPU, I get the following error. Solve + # it to enable use on other devices. + # + # Cannot use 'Identity_186' as input to + # 'transformer/parallel_0_7/transformer/transformer/symbol_modality_16282_512/shared/convert_gradient_to_tensor_HBc3xYw22Mw' + # because 'Identity_186' is in a while loop. + + raise ValueError( + "The following code only works on TPU. As targets.shape isn't fully " + "defined, I am assuming you are using a different device.") + + def cond_fn(i, ss_tokens): + """True if i < seq_len.""" + _ = ss_tokens + return i < seq_len + + def body_fn(i, ss_tokens): + """Constructs conditioning tokens for scheduled sampling.""" + # next_token_logits depends on timesteps 0...i-1. + # + # [batch_size, seq_len] -> [batch_size, seq_len, vocab_size] + ss_tokens_logits = infer_fn(ss_tokens) + + # Same as 'next_token_logits = ss_tokens_logits[:, i, :]'. + vocab_size = common_layers.shape_list(ss_tokens_logits)[2] + next_token_logits = tf.slice( + ss_tokens_logits, begin=[0, i, 0], size=[batch_size, 1, vocab_size]) + next_token_logits = tf.squeeze(next_token_logits, axis=[1]) + + # [batch_size, vocab_size] -> [batch_size] + sampled_next_tokens = _sample_next_tokens(next_token_logits) + + # Same as 'gold_next_tokens = targets[:, i]'. + gold_next_tokens = tf.slice(targets, begin=[0, i], size=[batch_size, 1]) + gold_next_tokens = tf.squeeze(gold_next_tokens, axis=[1]) + + next_tokens = mix_fn(gold_next_tokens, sampled_next_tokens) + ss_tokens = _update_timestep(ss_tokens, timestep=i, values=next_tokens) + + return i+1, tf.stop_gradient(ss_tokens) + + # tf.while_loop() over all timesteps. Generate scheduled sampling tokens. + i = 0 + ss_tokens = tf.zeros([batch_size, seq_len], dtype=tf.int32) + i, ss_tokens = tf.while_loop(cond_fn, body_fn, [i, ss_tokens]) + + ss_logits = infer_fn(ss_tokens) + return ss_tokens, ss_logits, loss_fn(targets, ss_logits) + + +def _mix_tokens(p_sample, gold_targets, sampled_targets): + """Interleave sampled and gold tokens randomly. + + Args: + p_sample: float in [0, 1]. Probability a token will come from + 'sampled_targets'. 0 means all-gold, 1 means all-sampled. + gold_targets: Tensor. Gold token IDs. + sampled_targets: Tensor. Sampled token IDs. Same shape as 'gold_targets'. + + Returns: + Tensor of same shape as 'gold_targets' containing a mix of tokens from + 'gold_targets' and 'sampled_targets'. + """ + targets_shape = common_layers.shape_list(sampled_targets) + return tf.where( + tf.less(tf.random_uniform(targets_shape), p_sample), + sampled_targets, gold_targets) + + +def _sample_next_tokens(logits): + """Sample tokens for next timestep.""" + batch_size = common_layers.shape_list(logits)[0] + next_tokens = tf.random.categorical(logits, 1) + next_tokens = tf.cast(next_tokens, tf.int32) + next_tokens = tf.reshape(next_tokens, [batch_size]) + return next_tokens + + +def _update_timestep(x, timestep, values): + """Set x[:, timestep] = values. + + This operation is **NOT** differentiable. + + Args: + x: Tensor of shape [batch_size, seq_len, ...] + timestep: int or scalar Tensor. Index to update in x. + values: Tensor of shape [batch_size, ...]. New values for x[:, i]. + + Returns: + Copy of 'x' after setting x[:, timestep] = values. + """ + perm = range(x.shape.ndims) + perm[0], perm[1] = perm[1], perm[0] + x = tf.transpose(x, perm) + x = inplace_ops.alias_inplace_update(x, timestep, values) + x = tf.transpose(x, perm) + return x + + +def inverse_decay_mix_prob(warmup_schedule_name, p_max, num_warmup_steps): + """Interpolate from 0.001 to 'p_max' over 'num_warmup_steps'.""" + warmup_schedule_fn = { + "exp": common_layers.inverse_exp_decay, + "linear": common_layers.inverse_lin_decay, + "sigmoid": common_layers.inverse_sigmoid_decay, + }[warmup_schedule_name] + return p_max * warmup_schedule_fn(num_warmup_steps, min_value=0.001) + + +class ScheduledSamplingAdapter(object): + """Adapts T2TModel for sequential_scheduled_sampling().""" + + def __init__(self, t2tmodel, features): + self._t2tmodel = t2tmodel + self._features = features + + hparams = self._t2tmodel.hparams + assert hparams.mode == tf.estimator.ModeKeys.TRAIN, hparams.mode + + def infer_fn(self, partial_targets): + """Computes logits for all timesteps. + + Args: + partial_targets: [batch_size, seq_len]. Targets to condition on. + + Returns: + next_token_logits: [batch_size, seq_len, vocab_size] + """ + batch_size, seq_len = common_layers.shape_list(partial_targets) + partial_targets = tf.reshape(partial_targets, [batch_size, seq_len, 1, 1]) + features = copy.copy(self._features) + features["targets"] = partial_targets + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + transformed_features = self._t2tmodel.bottom(features) + + with tf.variable_scope("body"): + body_outputs, losses = self._t2tmodel._normalize_body_output( # pylint: disable=protected-access + self._t2tmodel.body(transformed_features)) + assert losses == {"extra": 0.0}, ( + "Auxiliary losses are not propagated in this code. %s" + % (losses,)) + + logits = self._t2tmodel.top(body_outputs, features) + + vocab_size = self._t2tmodel.problem_hparams.vocab_size["targets"] + logits = tf.reshape(logits, [batch_size, seq_len, vocab_size]) + return logits + + def mix_fn(self, gold_tokens, sampled_tokens): + """Mixes gold and sampled tokens randomly.""" + hparams = self._t2tmodel.hparams + p_sample = inverse_decay_mix_prob( + hparams.scheduled_sampling_warmup_schedule, + hparams.scheduled_sampling_gold_mixin_prob, + hparams.scheduled_sampling_warmup_steps) + return _mix_tokens( + p_sample=p_sample, + gold_targets=gold_tokens, + sampled_targets=sampled_tokens) + + def loss_fn(self, targets, logits): + """Constructs loss dict. + + Args: + targets: [batch_size, seq_len] + logits: [batch_size, seq_len, vocab_size] + + Returns: + {str: Tensor of shape []}. Losses. + """ + batch_size, seq_len, vocab_size = common_layers.shape_list(logits) + targets = tf.reshape(targets, [batch_size, seq_len, 1, 1]) + logits = tf.reshape(logits, [batch_size, seq_len, 1, 1, vocab_size]) + features = copy.copy(self._features) + features["targets"] = targets + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + losses = { + "training": self._t2tmodel.loss(logits, features), + } + + return losses diff --git a/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/trainer_lib.py b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/trainer_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..82d45b7b3514a35d0f76447486a2626da2459101 --- /dev/null +++ b/docker/bloom13b/Model-References/TensorFlow/nlp/transformer/utils/trainer_lib.py @@ -0,0 +1,819 @@ +# coding=utf-8 +# Copyright 2021 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company +############################################################################### +# Changes: +# - updated imports +# - removed tpu related functionality +# - set default save_summary_steps to 100 +# - changed contrib.distribute to tf.distribute +# - removed support for mlperf +# - removed support for mtf_mode +# - added support for horovod +# - removed exporter +# - added ExamplesPerSecondEstimatorHook +# - added ProfilerHook + +"""Library for training. See t2t_trainer.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import contextlib +import json +import os +import random +import numpy as np + +from TensorFlow.nlp.transformer.utils import contrib +from TensorFlow.nlp.transformer.utils import decoding +from TensorFlow.nlp.transformer.utils import devices +from TensorFlow.nlp.transformer.utils import hparams_lib +from TensorFlow.nlp.transformer.utils import metrics_hook +from TensorFlow.nlp.transformer.utils import profile +from TensorFlow.nlp.transformer.utils import registry +from TensorFlow.nlp.transformer.utils import t2t_model + +import tensorflow.compat.v1 as tf + +from tensorflow.core.protobuf import rewriter_config_pb2 +from tensorflow.python import debug + +from TensorFlow.common.tb_utils import ExamplesPerSecondEstimatorHook + + +create_hparams = hparams_lib.create_hparams +add_problem_hparams = hparams_lib.add_problem_hparams + + +def next_checkpoint(model_dir, timeout_mins=240): + """Yields successive checkpoints from model_dir. + + Args: + model_dir: The directory in which checkpoints are saved. + timeout_mins: The maximum amount of time in minutes to wait + between checkpoints. Set this to -1 to wait indefinitely. + Yields: + last_ckpt: a new checkpoint path, or None if the timeout was reached. + """ + last_ckpt = None + timeout_secs = None + if timeout_mins != -1: + timeout_secs = timeout_mins * 60 + while True: + last_ckpt = contrib.training().wait_for_new_checkpoint( + model_dir, last_ckpt, seconds_to_sleep=60, timeout=timeout_secs) + + if last_ckpt is None: + tf.logging.info( + "Eval timeout: no new checkpoints within %dm" % timeout_mins) + break + + yield last_ckpt + + +def next_undecoded_checkpoint(model_dir, timeout_mins=240): + """Yields successive checkpoints from model_dir.""" + last_ckpt = None + last_step = 0 + while True: + # Get the latest checkpoint. + last_ckpt = contrib.training().wait_for_new_checkpoint( + model_dir, last_ckpt, seconds_to_sleep=60, timeout=60 * timeout_mins) + # Get all the checkpoint from the model dir. + ckpt_path = tf.train.get_checkpoint_state(model_dir) + all_model_checkpoint_paths = ckpt_path.all_model_checkpoint_paths + ckpt_step = np.inf + next_ckpt = None + # Find the next checkpoint to eval based on last_step. + for ckpt in all_model_checkpoint_paths: + step = int(os.path.basename(ckpt).split("-")[1]) + if step > last_step and step < ckpt_step: + ckpt_step = step + next_ckpt = ckpt + + # If all the checkpoints have been evaluated. + if last_ckpt is None and next_ckpt is None: + tf.logging.info( + "Eval timeout: no new checkpoints within %dm" % timeout_mins) + break + + if next_ckpt is not None: + last_step = ckpt_step + last_ckpt = next_ckpt + + yield last_ckpt + + +def create_session_config(log_device_placement=False, + enable_graph_rewriter=False, + gpu_mem_fraction=0.95, + use_tpu=False, + xla_jit_level=tf.OptimizerOptions.OFF, + inter_op_parallelism_threads=0, + intra_op_parallelism_threads=0): + """The TensorFlow Session config to use.""" + if use_tpu: + graph_options = tf.GraphOptions() + else: + if enable_graph_rewriter: + rewrite_options = rewriter_config_pb2.RewriterConfig() + rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON + graph_options = tf.GraphOptions(rewrite_options=rewrite_options) + else: + graph_options = tf.GraphOptions( + optimizer_options=tf.OptimizerOptions( + opt_level=tf.OptimizerOptions.L1, + do_function_inlining=False, + global_jit_level=xla_jit_level)) + + gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction) + + config = tf.ConfigProto( + allow_soft_placement=True, + graph_options=graph_options, + gpu_options=gpu_options, + log_device_placement=log_device_placement, + inter_op_parallelism_threads=inter_op_parallelism_threads, + intra_op_parallelism_threads=intra_op_parallelism_threads, + isolate_session_state=True) + return config + + +def is_cloud_async_distributed(): + return ("chief" in + json.loads(os.environ.get("TF_CONFIG", "{}")).get("cluster", {})) + + +def create_run_config(model_name, + master="", + model_dir=None, + iterations_per_loop=1000, + num_shards=8, + log_device_placement=False, + save_checkpoints_steps=1000, + save_checkpoints_secs=None, + keep_checkpoint_max=20, + keep_checkpoint_every_n_hours=10000, + num_gpus=1, + gpu_order="", + num_async_replicas=1, + enable_graph_rewriter=False, + gpu_mem_fraction=0.95, + no_data_parallelism=False, + optionally_use_dist_strat=False, + daisy_chain_variables=True, + schedule="continuous_train_and_eval", + worker_job="/job:localhost", + worker_id=0, + ps_replicas=0, + ps_job="/job:ps", + ps_gpu=0, + random_seed=None, + sync=False, + tpu_infeed_sleep_secs=None, + use_tpu=False, + use_tpu_estimator=False, + xla_jit_level=tf.OptimizerOptions.OFF, + inter_op_parallelism_threads=0, + log_step_count_steps=100, + intra_op_parallelism_threads=0, + save_summary_steps=100, + use_hpu=False): + """Create RunConfig, TPUConfig, and Parallelism object.""" + session_config = create_session_config( + log_device_placement=log_device_placement, + enable_graph_rewriter=enable_graph_rewriter, + gpu_mem_fraction=gpu_mem_fraction, + use_tpu=use_tpu, + xla_jit_level=xla_jit_level, + inter_op_parallelism_threads=inter_op_parallelism_threads, + intra_op_parallelism_threads=intra_op_parallelism_threads) + run_config_args = { + "master": master, + "evaluation_master": master, + "model_dir": model_dir, + "session_config": session_config, + "save_summary_steps": save_summary_steps, + "save_checkpoints_steps": save_checkpoints_steps, + "save_checkpoints_secs": save_checkpoints_secs, + "keep_checkpoint_max": keep_checkpoint_max, + "keep_checkpoint_every_n_hours": keep_checkpoint_every_n_hours, + "tf_random_seed": random_seed, + "log_step_count_steps": log_step_count_steps, + } + if save_checkpoints_secs: + del run_config_args["save_checkpoints_steps"] + run_config_cls = contrib.learn().RunConfig + + # tf.estimator RunConfig construction got totally broken in TF2. + # we now have to specify master in a global environment variable + if contrib.is_tf2: + del run_config_args["evaluation_master"] + del run_config_args["master"] + + config = run_config_cls(**run_config_args) + + # If not using TPU, add device info for data_parallelism + config.use_tpu = use_tpu + if not use_tpu: + config.t2t_device_info = { + "num_async_replicas": num_async_replicas, + } + use_distribution_strategy = ( + optionally_use_dist_strat and + t2t_model.T2TModel.has_symmetric_shards(model_name) and + not no_data_parallelism and ps_replicas == 0 and ps_gpu == 0 and + num_async_replicas == 1) + + if use_distribution_strategy: + tf.logging.info( + "Configuring MirroredStrategy DistributionStrategy to replicate the " + "model." + ) + distribution = tf.distribute.MirroredStrategy() + config = config.replace(train_distribute=distribution) + config.data_parallelism = None + else: + tf.logging.info("Configuring DataParallelism to replicate the model.") + config.data_parallelism = devices.data_parallelism( + daisy_chain_variables=daisy_chain_variables, + ps_replicas=ps_replicas, + ps_job=ps_job, + ps_gpu=ps_gpu, + schedule=schedule, + sync=sync, + worker_gpu=num_gpus, + worker_replicas=num_async_replicas, + worker_id=worker_id, + gpu_order=gpu_order, + worker_job=worker_job, + no_data_parallelism=no_data_parallelism, + use_hpu=use_hpu) + + return config + + +def create_estimator(model_name, + hparams, + run_config, + schedule="train_and_evaluate", + decode_hparams=None, + use_tpu=False, + use_tpu_estimator=False, + use_xla=False, + export_saved_model_api_version=1, + use_guarantee_const_getter=False): + """Create a T2T Estimator.""" + model_fn = t2t_model.T2TModel.make_estimator_model_fn( + model_name, hparams, decode_hparams=decode_hparams, use_tpu=use_tpu) + + + del use_xla + if use_tpu or use_tpu_estimator: + from tensorflow.contrib.tpu.python.tpu import tpu_estimator # pylint: disable=g-import-not-at-top + problem = hparams.problem + batch_size = ( + problem.tpu_batch_size_per_shard(hparams) * + run_config.tpu_config.num_shards) + predict_batch_size = batch_size + if decode_hparams and decode_hparams.batch_size: + predict_batch_size = decode_hparams.batch_size + if decode_hparams and run_config.tpu_config: + decode_hparams.add_hparam("iterations_per_loop", + run_config.tpu_config.iterations_per_loop) + if export_saved_model_api_version == 1: + api_version_enum_name = tpu_estimator.ExportSavedModelApiVersion.V1 + estimator_model_fn = model_fn + elif export_saved_model_api_version == 2: + api_version_enum_name = tpu_estimator.ExportSavedModelApiVersion.V2 + + def maybe_use_guarantee_const_getter_model_fn(features, labels, mode, + params): + """Wrapper model_fn with guarantee_const getter.""" + if not use_guarantee_const_getter: + return model_fn(features, labels, mode, params) + + # It marks all weights as constant, which may improves TPU inference + # performance because it prevents the weights being transferred to the + # TPU. It will increase HBM "program" usage and reduce HBM "arguments" + # usage during TPU model serving. + def guarantee_const_getter(getter, name, *args, **kwargs): + with tf.control_dependencies(None): + return tf.guarantee_const( + getter(name, *args, **kwargs), name=name + "/GuaranteeConst") + + @contextlib.contextmanager + def guarantee_const_scope(): + var_scope = tf.get_variable_scope() + prev_custom_getter = var_scope.custom_getter + prev_caching_device = var_scope.caching_device + var_scope.set_custom_getter(guarantee_const_getter) + var_scope.set_caching_device(lambda op: op.device) + yield + var_scope.set_custom_getter(prev_custom_getter) + var_scope.set_caching_device(prev_caching_device) + + with guarantee_const_scope(): + return model_fn(features, labels, mode, params) + + def tpu_model_fn(features, labels, mode, params): + """Wrapper model_fn with tpu.rewrite / TPUPartitionedCall.""" + if mode == tf.estimator.ModeKeys.PREDICT and params["use_tpu"]: + batch_config = tpu_estimator.BatchConfig( + num_batch_threads=2, + max_batch_size=predict_batch_size, + batch_timeout_micros=60 * 1000, + allowed_batch_sizes=[predict_batch_size]) + return tpu_estimator.model_fn_inference_on_tpu( + maybe_use_guarantee_const_getter_model_fn, + features=features, + labels=labels, + config=None, + params=params, + batch_config=batch_config) + else: + return model_fn(features, labels, mode, params) + + estimator_model_fn = tpu_model_fn + else: + raise ValueError("Flag export_saved_model_api_version must be 1 or 2.") + estimator = contrib.tpu().TPUEstimator( + model_fn=estimator_model_fn, + model_dir=run_config.model_dir, + config=run_config, + use_tpu=use_tpu, + train_batch_size=batch_size, + eval_batch_size=batch_size if "eval" in schedule else None, + predict_batch_size=predict_batch_size, + export_saved_model_api_version=api_version_enum_name) + else: + estimator = tf.estimator.Estimator( + model_fn=model_fn, + model_dir=run_config.model_dir, + config=run_config, + ) + return estimator + + +def create_hooks(use_tfdbg=False, + use_dbgprofile=False, + dbgprofile_kwargs=None, + use_validation_monitor=False, + validation_monitor_kwargs=None, + use_early_stopping=False, + early_stopping_kwargs=None, + use_horovod=False, + use_hpu=False): + """Create train and eval hooks for Experiment.""" + train_hooks = [] + eval_hooks = [] + + if use_tfdbg: + hook = debug.LocalCLIDebugHook() + train_hooks.append(hook) + eval_hooks.append(hook) + + if use_dbgprofile: + # Recorded traces can be visualized with chrome://tracing/ + # The memory/tensor lifetime is also profiled + tf.logging.info("Using ProfilerHook") + defaults = dict(save_steps=10, show_dataflow=True, show_memory=True) + defaults.update(dbgprofile_kwargs) + train_hooks.append(tf.train.ProfilerHook(**defaults)) + + if use_validation_monitor: + tf.logging.info("Using ValidationMonitor") + train_hooks.append( + contrib.learn().monitors.ValidationMonitor( + hooks=eval_hooks, **validation_monitor_kwargs)) + + if use_early_stopping: + tf.logging.info("Using EarlyStoppingHook") + hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs) + # Adding to both training and eval so that eval aborts as well + train_hooks.append(hook) + eval_hooks.append(hook) + + if use_horovod: + import horovod.tensorflow as hvd + hook = hvd.BroadcastGlobalVariablesHook(0) + train_hooks.append(hook) + + return train_hooks, eval_hooks + + +class HookContext(collections.namedtuple( + "HookContext", + ["estimator", "problem", "hparams"])): + pass + + +class T2TExperiment(object): + """Custom Experiment class for running distributed experiments.""" + + def __init__(self, estimator, hparams, train_spec, eval_spec, + use_validation_monitor, decode_hparams=None): + self._train_spec = train_spec + self._eval_spec = eval_spec + self._hparams = hparams + self._decode_hparams = decode_hparams + self._estimator = estimator + self._use_validation_monitor = use_validation_monitor + + @property + def estimator(self): + return self._estimator + + @property + def train_steps(self): + return self._train_spec.max_steps + + @property + def eval_steps(self): + return self._eval_spec.steps + + def continuous_train_and_eval(self, continuous_eval_predicate_fn=None): + del continuous_eval_predicate_fn + tf.estimator.train_and_evaluate(self._estimator, self._train_spec, + self._eval_spec) + return self.evaluate() + + def train_and_evaluate(self): + if self._use_validation_monitor: + tf.logging.warning("EvalSpec not provided. Estimator will not manage " + "model evaluation. Assuming ValidationMonitor present " + "in train_hooks.") + self.train() + + def train(self, max_steps=None): + self._estimator.train( + self._train_spec.input_fn, + hooks=self._train_spec.hooks, + max_steps=max_steps or self._train_spec.max_steps) + + def train_eval_and_decode(self): + """Does eval and decode after training every eval_freq_in_steps.""" + eval_steps = self._hparams.eval_freq_in_steps + packed_dataset = "_packed" in self._hparams.problem.name + for i in range(0, self._train_spec.max_steps, eval_steps): + if packed_dataset and i > 0: + problem = registry.problem(self._hparams.problem.name + "_packed") + p_hparams = problem.get_hparams(self._hparams) + self._hparams.problem = problem + self._hparams.problem_hparams = p_hparams + self._estimator.train( + self._train_spec.input_fn, + steps=eval_steps, + hooks=self._train_spec.hooks) + self._set_eval_dir_name("eval") + self._estimator.evaluate( + self._eval_spec.input_fn, + steps=self._eval_spec.steps, + hooks=self._eval_spec.hooks, + name="eval") + if packed_dataset: + problem = registry.problem( + self._hparams.problem.name.replace("_packed", "")) + p_hparams = problem.get_hparams(self._hparams) + self._hparams.problem = problem + self._hparams.problem_hparams = p_hparams + self.decode(dataset_split=tf.estimator.ModeKeys.EVAL) + d_hparams = self._decode_hparams + + d_hparams = self._decode_hparams + + def _set_eval_dir_name(self, eval_dir_name): + attr = "eval_dir_name" + hp = self._hparams + if attr not in hp: + hp.add_hparam(attr, "") + hp.eval_dir_name = eval_dir_name + + def evaluate(self): + name = "eval" + self._set_eval_dir_name("eval") + return self._estimator.evaluate( + self._eval_spec.input_fn, + steps=self._eval_spec.steps, + hooks=self._eval_spec.hooks, + name=name) + + def evaluate_on_train_data(self): + name = "eval_train" + self._set_eval_dir_name(name) + self._estimator.evaluate( + self._train_spec.input_fn, + steps=self._eval_spec.steps, + hooks=self._eval_spec.hooks, + name=name) + + def continuous_eval(self): + """Evaluate until checkpoints stop being produced.""" + for ckpt_path in next_checkpoint(self._hparams.model_dir, + self._hparams.eval_timeout_mins): + # Skip zero'th step. + train_step = decoding.get_step_from_ckpt_path(ckpt_path) + if train_step == 0: + tf.logging.info("Skipping evaluation at step 0") + continue + self.evaluate() + + def continuous_eval_on_train_data(self): + """Evaluate on train data until checkpoints stop being produced.""" + for ckpt_path in next_checkpoint(self._hparams.model_dir, + self._hparams.eval_timeout_mins): + # Skip zero'th step. + train_step = decoding.get_step_from_ckpt_path(ckpt_path) + if train_step == 0: + tf.logging.info("Skipping evaluation at step 0") + continue + self.evaluate_on_train_data() + + def test(self): + """Perform 1 train step and 1 eval step.""" + if self._use_validation_monitor: + return self.train_and_evaluate() + + self._estimator.train( + self._train_spec.input_fn, hooks=self._train_spec.hooks, max_steps=1) + + self._estimator.evaluate( + self._eval_spec.input_fn, steps=1, hooks=self._eval_spec.hooks) + + def run_std_server(self): + """Starts a TensorFlow server and joins the serving thread. + + Typically used for parameter servers. + + Raises: + ValueError: if not enough information is available in the estimator's + config to create a server. + """ + config = tf.estimator.RunConfig() + server = tf.train.Server( + config.cluster_spec, + job_name=config.task_type, + task_index=config.task_id, + protocol=config.protocol) + server.join() + + def decode(self, + dataset_split=None, + decode_from_file=False, + checkpoint_path=None): + """Decodes from dataset or file.""" + if decode_from_file: + decoding.decode_from_file(self._estimator, + self._decode_hparams.decode_from_file, + self._hparams, + self._decode_hparams, + self._decode_hparams.decode_to_file) + else: + decoding.decode_from_dataset( + self._estimator, + self._hparams.problem.name, + self._hparams, + self._decode_hparams, + dataset_split=dataset_split, + checkpoint_path=checkpoint_path) + + def continuous_decode(self): + """Decode from dataset on new checkpoint.""" + for _ in next_checkpoint(self._hparams.model_dir, + self._decode_hparams.decode_timeout_mins): + self.decode() + + def continuous_decode_on_train_data(self): + """Decode from dataset on new checkpoint.""" + for _ in next_checkpoint(self._hparams.model_dir, + self._decode_hparams.decode_timeout_mins): + self.decode(dataset_split=tf.estimator.ModeKeys.TRAIN) + + def continuous_decode_on_eval_data(self): + """Decode from dataset on new checkpoint.""" + ckpt_generator = next_checkpoint(self._hparams.model_dir, + self._decode_hparams.decode_timeout_mins) + + for ckpt in ckpt_generator: + current_step = decoding.get_step_from_ckpt_path(ckpt) + tf.logging.info("Decoding step %d" % current_step) + # Skip checkpoint 0. + if current_step == 0: + continue + # Decode the latest checkpoint by default. + checkpoint_path = None + + self.decode( + dataset_split=tf.estimator.ModeKeys.EVAL, + checkpoint_path=checkpoint_path) + d_hparams = self._decode_hparams + + d_hparams = self._decode_hparams + + def continuous_decode_from_file(self): + """Decode from file on new checkpoint.""" + for _ in next_checkpoint(self._hparams.model_dir, + self._decode_hparams.decode_timeout_mins): + self.decode(decode_from_file=True) + + +def create_experiment( + run_config, + hparams, + model_name, + problem_name, + data_dir, + train_steps, + eval_steps, + min_eval_frequency=2000, + eval_throttle_seconds=600, + schedule="train_and_evaluate", + export=False, + decode_hparams=None, + use_tfdbg=False, + use_dbgprofile=False, + eval_early_stopping_steps=None, + eval_early_stopping_metric=None, + eval_early_stopping_metric_delta=None, + eval_early_stopping_metric_minimize=True, + eval_timeout_mins=240, + eval_use_test_set=False, + use_tpu=False, + use_tpu_estimator=False, + use_xla=False, + export_saved_model_api_version=1, + use_guarantee_const_getter=False, + additional_train_hooks=None, + additional_eval_hooks=None, + warm_start_from=None, + decode_from_file="", + decode_to_file="", + decode_reference="", + std_server_protocol=None, + use_horovod=False, + use_hpu=False): + """Create Experiment.""" + # HParams + hparams.add_hparam("model_dir", run_config.model_dir) + hparams.add_hparam("data_dir", data_dir) + hparams.add_hparam("train_steps", train_steps) + hparams.add_hparam("eval_steps", eval_steps) + hparams.add_hparam("schedule", schedule) + hparams.add_hparam("warm_start_from", warm_start_from) + hparams.add_hparam("std_server_protocol", std_server_protocol) + hparams.add_hparam("eval_freq_in_steps", min_eval_frequency) + hparams.add_hparam("eval_timeout_mins", eval_timeout_mins) + if decode_hparams is not None: + decode_hparams.add_hparam("decode_from_file", decode_from_file) + if decode_to_file and not decode_hparams.decode_to_file: + decode_hparams.decode_to_file = decode_to_file + if decode_reference and not decode_hparams.decode_reference: + decode_hparams.decode_reference = decode_reference + add_problem_hparams(hparams, problem_name) + + # Estimator + estimator = create_estimator( + model_name, + hparams, + run_config, + schedule=schedule, + decode_hparams=decode_hparams, + use_tpu=use_tpu, + use_tpu_estimator=use_tpu_estimator, + use_xla=use_xla, + export_saved_model_api_version=export_saved_model_api_version, + use_guarantee_const_getter=use_guarantee_const_getter) + + # Input fns from Problem + problem = hparams.problem + train_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.TRAIN, + hparams) + + dataset_split = "test" if eval_use_test_set else None + dataset_kwargs = {"dataset_split": dataset_split} + eval_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL, + hparams, + dataset_kwargs=dataset_kwargs) + + # Hooks + validation_monitor_kwargs = dict( + input_fn=eval_input_fn, + eval_steps=eval_steps, + every_n_steps=min_eval_frequency, + early_stopping_rounds=eval_early_stopping_steps, + early_stopping_metric=eval_early_stopping_metric, + early_stopping_metric_minimize=eval_early_stopping_metric_minimize) + dbgprofile_kwargs = {"output_dir": run_config.model_dir} + early_stopping_kwargs = dict( + events_dir=os.path.join(run_config.model_dir, "eval_continuous"), + tag=eval_early_stopping_metric, + num_plateau_steps=eval_early_stopping_steps, + plateau_decrease=eval_early_stopping_metric_minimize, + plateau_delta=eval_early_stopping_metric_delta, + every_n_steps=min_eval_frequency) + + # Eval on TPU Pods is not supported yet + if use_tpu and run_config.tpu_config.num_shards > 8 and "eval" in schedule: + raise ValueError("Eval is not currently supported on a TPU Pod") + + # In-process eval (and possible early stopping) + if schedule == "continuous_train_and_eval" and min_eval_frequency: + tf.logging.warn("ValidationMonitor only works with " + "--schedule=train_and_evaluate") + use_validation_monitor = ( + schedule == "train_and_evaluate" and min_eval_frequency) + # Distributed early stopping + local_schedules = ["train_and_evaluate", "continuous_train_and_eval"] + use_early_stopping = ( + schedule not in local_schedules and eval_early_stopping_steps) + train_hooks, eval_hooks = create_hooks( + use_tfdbg=use_tfdbg, + use_dbgprofile=use_dbgprofile, + dbgprofile_kwargs=dbgprofile_kwargs, + use_validation_monitor=use_validation_monitor, + validation_monitor_kwargs=validation_monitor_kwargs, + use_early_stopping=use_early_stopping, + early_stopping_kwargs=early_stopping_kwargs, + use_horovod=use_horovod, + use_hpu=use_hpu) + + hook_context = HookContext( + estimator=estimator, problem=problem, hparams=hparams) + + train_hooks += [ExamplesPerSecondEstimatorHook( + batch_size=hparams.batch_size, + every_n_steps=run_config.save_summary_steps, + output_dir=run_config.model_dir)] + + train_hooks += t2t_model.T2TModel.get_train_hooks(model_name, hook_context) + eval_hooks += t2t_model.T2TModel.get_eval_hooks(model_name, hook_context) + if additional_train_hooks: + train_hooks += additional_train_hooks + if additional_eval_hooks: + eval_hooks += additional_eval_hooks + + if hparams.profile_steps is not None: + train_hooks += [profile.ProfilerHook(hparams.profile_steps, run_config.model_dir)] + + train_hooks = contrib.learn().monitors.replace_monitors_with_hooks( + train_hooks, estimator) + eval_hooks = contrib.learn().monitors.replace_monitors_with_hooks( + eval_hooks, estimator) + + train_spec = tf.estimator.TrainSpec( + train_input_fn, max_steps=train_steps, hooks=train_hooks) + eval_spec = tf.estimator.EvalSpec( + eval_input_fn, + steps=eval_steps, + hooks=eval_hooks, + start_delay_secs=0 if hparams.schedule == "evaluate" else 120, + throttle_secs=eval_throttle_seconds) + + return T2TExperiment(estimator, hparams, train_spec, eval_spec, + use_validation_monitor, decode_hparams) + + +def create_experiment_fn(*args, **kwargs): + """Wrapper for canonical experiment_fn. See create_experiment.""" + + def experiment_fn(run_config, hparams): + return create_experiment(run_config, hparams, *args, **kwargs) + + return experiment_fn + + +def set_random_seed(seed): + tf.set_random_seed(seed) + random.seed(seed) + np.random.seed(seed) + + +def restore_checkpoint(ckpt_dir, saver, sess, must_restore=False): + """Restore from a checkpoint.""" + ckpt = tf.train.get_checkpoint_state(ckpt_dir) + if must_restore and not ckpt: + raise ValueError("No checkpoint found in %s" % ckpt_dir) + if not ckpt: + return 0 + + path = ckpt.model_checkpoint_path + tf.logging.info("Restoring checkpoint %s", path) + saver.restore(sess, path) + step = int(path.split("-")[-1]) + return step