applied-ai-018 commited on
Commit
eb49b41
·
verified ·
1 Parent(s): f653bfd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. docker/bloom13b/Model-References/TensorFlow/nlp/bert/LICENSE +203 -0
  2. docker/bloom13b/Model-References/TensorFlow/nlp/bert/bert_loss.png +3 -0
  3. docker/bloom13b/Model-References/TensorFlow/nlp/bert/bf16_config/bert.json +60 -0
  4. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data/sample_text.txt +33 -0
  5. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BooksDownloader.py +38 -0
  6. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BookscorpusTextFormatting.py +32 -0
  7. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/Downloader.py +68 -0
  8. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/GooglePretrainedWeightDownloader.py +158 -0
  9. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/TextSharding.py +331 -0
  10. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikiDownloader.py +68 -0
  11. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikicorpusTextFormatting.py +46 -0
  12. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_datasets_from_start.sh +68 -0
  13. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_pretraining_data.py +512 -0
  14. docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/pack_pretraining_data_tfrec.py +531 -0
  15. docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_dataset.py +31 -0
  16. docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_glue_data.py +141 -0
  17. docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_pretrained_model.py +75 -0
  18. docker/bloom13b/Model-References/TensorFlow/nlp/bert/optimization.py +458 -0
  19. docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/__init__.py +0 -0
  20. docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/fused_layer_norm.py +141 -0
  21. docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/gpu_environment.py +36 -0
  22. docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/utils.py +64 -0
  23. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/LICENSE +203 -0
  24. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/NOTICE +5 -0
  25. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/README.md +338 -0
  26. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/bf16_config/transformer.json +153 -0
  27. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/build_vocab.py +77 -0
  28. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/compute_bleu.py +42 -0
  29. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/__init__.py +15 -0
  30. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/all_problems.py +74 -0
  31. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/cleaner_en_xx.py +176 -0
  32. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/generator_utils.py +1259 -0
  33. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder.py +1064 -0
  34. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder_build_subword.py +79 -0
  35. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/tokenizer.py +194 -0
  36. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_encs.py +99 -0
  37. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_ende.py +218 -0
  38. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enfr.py +235 -0
  39. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enro.py +142 -0
  40. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_envi.py +58 -0
  41. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enzh.py +280 -0
  42. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/datagen.py +252 -0
  43. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/decoder.py +269 -0
  44. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/__init__.py +15 -0
  45. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/area_attention.py +433 -0
  46. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_attention.py +0 -0
  47. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_layers.py +0 -0
  48. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/modalities.py +302 -0
  49. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_layers.py +366 -0
  50. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_memory.py +393 -0
docker/bloom13b/Model-References/TensorFlow/nlp/bert/LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2021 Habana Labs, Ltd. an Intel Company
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright [yyyy] [name of copyright owner]
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
docker/bloom13b/Model-References/TensorFlow/nlp/bert/bert_loss.png ADDED

Git LFS Details

  • SHA256: d6b8d1935b2fd6bba39446fa0f2f332fcd4dd390301fd830c51c3a75edfbc175
  • Pointer size: 130 Bytes
  • Size of remote file: 23.7 kB
docker/bloom13b/Model-References/TensorFlow/nlp/bert/bf16_config/bert.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "allowlist": [
3
+ "_ScopedAllocatorSplit",
4
+ "_ScopedAllocatorConcat",
5
+ "_ScopedAllocator",
6
+ "BatchMatMul",
7
+ "BatchMatMulV2",
8
+ "BiasAdd",
9
+ "BiasAddGrad",
10
+ "EuclideanNorm",
11
+ "Exp",
12
+ "HabanaDropout",
13
+ "HabanaDropoutGrad",
14
+ "HabanaDropoutStateful",
15
+ "HabanaGelu",
16
+ "HabanaGeluGrad",
17
+ "HabanaLayerNorm",
18
+ "HabanaLayerNormV2",
19
+ "HabanaLayerNormGrad",
20
+ "HabanaLayerNormGradV2",
21
+ "HabanaMaskedSoftmax",
22
+ "HabanaSoftmaxGrad",
23
+ "HabanaLogSoftmaxGrad",
24
+ "HorovodAllgather",
25
+ "HorovodAllreduce",
26
+ "L2Loss",
27
+ "Log",
28
+ "LogSoftmax",
29
+ "MatMul",
30
+ "Softmax",
31
+ "Sum",
32
+ "Tanh",
33
+ "TanhGrad"
34
+ ],
35
+ "conditional_list": [
36
+ "Add",
37
+ "AddV2",
38
+ "AddN",
39
+ "ExpandDims",
40
+ "Identity",
41
+ "Neg",
42
+ "Reshape",
43
+ "Slice",
44
+ "Split",
45
+ "StridedSliceGrad",
46
+ "Transpose"
47
+ ],
48
+ "strict_conditional_list": [],
49
+ "non_convertible_exceptions": [
50
+ [".*KEEP_FP32_PRECISION.*", ""]
51
+ ],
52
+ "convertible_exceptions": [
53
+ ["bert/encoder/layer_[0-9]+/attention/self/add", "AddV2"],
54
+ ["bert/encoder/layer_[0-9]+/attention/self/Mul", "Mul"],
55
+ ["clip_by_global_norm/mul", "Mul"],
56
+ ["global_norm/mul", "Mul"],
57
+ ["global_norm/global_norm", "Sqrt"],
58
+ [".*FORCE_BF16_PRECISION.*", ""]
59
+ ]
60
+ }
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data/sample_text.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত
2
+ Text should be one-sentence-per-line, with empty lines between documents.
3
+ This sample text is public domain and was randomly selected from Project Guttenberg.
4
+
5
+ The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors.
6
+ Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity.
7
+ Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them.
8
+ "Cass" Beard had risen early that morning, but not with a view to discovery.
9
+ A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets.
10
+ The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency.
11
+ This was nearly opposite.
12
+ Mr. Cassius crossed the highway, and stopped suddenly.
13
+ Something glittered in the nearest red pool before him.
14
+ Gold, surely!
15
+ But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring.
16
+ Looking at it more attentively, he saw that it bore the inscription, "May to Cass."
17
+ Like most of his fellow gold-seekers, Cass was superstitious.
18
+
19
+ The fountain of classic wisdom, Hypatia herself.
20
+ As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge.
21
+ From my youth I felt in me a soul above the matter-entangled herd.
22
+ She revealed to me the glorious fact, that I am a spark of Divinity itself.
23
+ A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's.
24
+ There is a philosophic pleasure in opening one's treasures to the modest young.
25
+ Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street.
26
+ Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide;
27
+ but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind.
28
+ Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now.
29
+ His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert;
30
+ while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts.
31
+ At last they reached the quay at the opposite end of the street;
32
+ and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers.
33
+ He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him.
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BooksDownloader.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+
14
+ ###############################################################################
15
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
16
+ #
17
+ # Changes:
18
+ # - Modified path hard-coded for Nvidia container
19
+ ###############################################################################
20
+
21
+ import subprocess
22
+ import sys
23
+
24
+ class BooksDownloader:
25
+ def __init__(self, save_path):
26
+ self.save_path = save_path
27
+ pass
28
+
29
+
30
+ def download(self):
31
+ import os
32
+ working_dir = os.getcwd()
33
+ args = '--list ' + working_dir + '/bookcorpus/url_list.jsonl --out'
34
+ bookscorpus_download_command = f'{sys.executable} ' + working_dir + '/bookcorpus/download_files.py ' + args
35
+ bookscorpus_download_command += ' ' + self.save_path + '/bookscorpus'
36
+ bookscorpus_download_command += ' --trash-bad-count'
37
+ print("Downloading BookCorpus command: ", bookscorpus_download_command)
38
+ bookscorpus_download_process = subprocess.run(bookscorpus_download_command, shell=True, check=True)
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/BookscorpusTextFormatting.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+
14
+ import glob
15
+ import os
16
+
17
+ class BookscorpusTextFormatting:
18
+ def __init__(self, books_path, output_filename, recursive = False):
19
+ self.books_path = books_path
20
+ self.recursive = recursive
21
+ self.output_filename = output_filename
22
+
23
+
24
+ # This puts one book per line
25
+ def merge(self):
26
+ with open(self.output_filename, mode='w', newline='\n') as ofile:
27
+ for filename in glob.glob(self.books_path + '/' + '*.txt', recursive=True):
28
+ with open(filename, mode='r', encoding='utf-8-sig', newline='\n') as file:
29
+ for line in file:
30
+ if line.strip() != '':
31
+ ofile.write(line.strip() + ' ')
32
+ ofile.write("\n\n")
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/Downloader.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+
14
+ ###############################################################################
15
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
16
+ #
17
+ # Changes:
18
+ # - Removed downloading datasets that are not related to BERT pretrain
19
+ ###############################################################################
20
+
21
+ from GooglePretrainedWeightDownloader import GooglePretrainedWeightDownloader
22
+ from WikiDownloader import WikiDownloader
23
+ from BooksDownloader import BooksDownloader
24
+
25
+ class Downloader:
26
+ def __init__(self, dataset_name, save_path):
27
+ self.dataset_name = dataset_name
28
+ self.save_path = save_path
29
+
30
+
31
+ def download(self):
32
+ if self.dataset_name == 'bookscorpus':
33
+ self.download_bookscorpus()
34
+
35
+ elif self.dataset_name == 'wikicorpus_en':
36
+ self.download_wikicorpus('en')
37
+
38
+ elif self.dataset_name == 'wikicorpus_zh':
39
+ self.download_wikicorpus('zh')
40
+
41
+ elif self.dataset_name == 'google_pretrained_weights':
42
+ self.download_google_pretrained_weights()
43
+
44
+ elif self.dataset_name == 'all':
45
+ self.download_bookscorpus()
46
+ self.download_wikicorpus('en')
47
+ self.download_wikicorpus('zh')
48
+ self.download_google_pretrained_weights()
49
+
50
+ else:
51
+ print(self.dataset_name)
52
+ assert False, 'Unknown dataset_name provided to downloader'
53
+
54
+
55
+ def download_bookscorpus(self):
56
+ downloader = BooksDownloader(self.save_path)
57
+ downloader.download()
58
+
59
+
60
+ def download_wikicorpus(self, language):
61
+ downloader = WikiDownloader(language, self.save_path)
62
+ downloader.download()
63
+
64
+
65
+ def download_google_pretrained_weights(self):
66
+ downloader = GooglePretrainedWeightDownloader(self.save_path)
67
+ downloader.download()
68
+
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/GooglePretrainedWeightDownloader.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+
14
+ import hashlib
15
+ import os
16
+ import urllib.request
17
+ import zipfile
18
+
19
+ class GooglePretrainedWeightDownloader:
20
+ def __init__(self, save_path):
21
+ self.save_path = save_path + '/google_pretrained_weights'
22
+
23
+ if not os.path.exists(self.save_path):
24
+ os.makedirs(self.save_path)
25
+
26
+ # Download urls
27
+ self.model_urls = {
28
+ 'bert_base_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-12_H-768_A-12.zip'),
29
+ 'bert_large_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'uncased_L-24_H-1024_A-16.zip'),
30
+ 'bert_base_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'cased_L-12_H-768_A-12.zip'),
31
+ 'bert_large_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip', 'cased_L-24_H-1024_A-16.zip'),
32
+ 'bert_base_multilingual_cased': ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'),
33
+ 'bert_large_multilingual_uncased': ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'),
34
+ 'bert_base_chinese': ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip')
35
+ }
36
+
37
+ # SHA256sum verification for file download integrity (and checking for changes from the download source over time)
38
+ self.bert_base_uncased_sha = {
39
+ 'bert_config.json': '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc',
40
+ 'bert_model.ckpt.data-00000-of-00001': '58580dc5e0bf0ae0d2efd51d0e8272b2f808857f0a43a88aaf7549da6d7a8a84',
41
+ 'bert_model.ckpt.index': '04c1323086e2f1c5b7c0759d8d3e484afbb0ab45f51793daab9f647113a0117b',
42
+ 'bert_model.ckpt.meta': 'dd5682170a10c3ea0280c2e9b9a45fee894eb62da649bbdea37b38b0ded5f60e',
43
+ 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3',
44
+ }
45
+
46
+ self.bert_large_uncased_sha = {
47
+ 'bert_config.json': 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb',
48
+ 'bert_model.ckpt.data-00000-of-00001': 'bc6b3363e3be458c99ecf64b7f472d2b7c67534fd8f564c0556a678f90f4eea1',
49
+ 'bert_model.ckpt.index': '68b52f2205ffc64dc627d1120cf399c1ef1cbc35ea5021d1afc889ffe2ce2093',
50
+ 'bert_model.ckpt.meta': '6fcce8ff7628f229a885a593625e3d5ff9687542d5ef128d9beb1b0c05edc4a1',
51
+ 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3',
52
+ }
53
+
54
+ self.bert_base_cased_sha = {
55
+ 'bert_config.json': 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc',
56
+ 'bert_model.ckpt.data-00000-of-00001': '734d5a1b68bf98d4e9cb6b6692725d00842a1937af73902e51776905d8f760ea',
57
+ 'bert_model.ckpt.index': '517d6ef5c41fc2ca1f595276d6fccf5521810d57f5a74e32616151557790f7b1',
58
+ 'bert_model.ckpt.meta': '5f8a9771ff25dadd61582abb4e3a748215a10a6b55947cbb66d0f0ba1694be98',
59
+ 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02',
60
+ }
61
+
62
+ self.bert_large_cased_sha = {
63
+ 'bert_config.json': '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57',
64
+ 'bert_model.ckpt.data-00000-of-00001': '6ff33640f40d472f7a16af0c17b1179ca9dcc0373155fb05335b6a4dd1657ef0',
65
+ 'bert_model.ckpt.index': 'ef42a53f577fbe07381f4161b13c7cab4f4fc3b167cec6a9ae382c53d18049cf',
66
+ 'bert_model.ckpt.meta': 'd2ddff3ed33b80091eac95171e94149736ea74eb645e575d942ec4a5e01a40a1',
67
+ 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02',
68
+ }
69
+
70
+ self.bert_base_multilingual_cased_sha = {
71
+ 'bert_config.json': 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0',
72
+ 'bert_model.ckpt.data-00000-of-00001': '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5',
73
+ 'bert_model.ckpt.index': '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37',
74
+ 'bert_model.ckpt.meta': '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa',
75
+ 'vocab.txt': 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c',
76
+ }
77
+
78
+ self.bert_large_multilingual_uncased_sha = {
79
+ 'bert_config.json': '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624',
80
+ 'bert_model.ckpt.data-00000-of-00001': '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429',
81
+ 'bert_model.ckpt.index': '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7',
82
+ 'bert_model.ckpt.meta': '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29',
83
+ 'vocab.txt': '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f',
84
+ }
85
+
86
+ self.bert_base_chinese_sha = {
87
+ 'bert_config.json': '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015',
88
+ 'bert_model.ckpt.data-00000-of-00001': '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba',
89
+ 'bert_model.ckpt.index': '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e',
90
+ 'bert_model.ckpt.meta': 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047',
91
+ 'vocab.txt': '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c',
92
+ }
93
+
94
+ # Relate SHA to urls for loop below
95
+ self.model_sha = {
96
+ 'bert_base_uncased': self.bert_base_uncased_sha,
97
+ 'bert_large_uncased': self.bert_large_uncased_sha,
98
+ 'bert_base_cased': self.bert_base_cased_sha,
99
+ 'bert_large_cased': self.bert_large_cased_sha,
100
+ 'bert_base_multilingual_cased': self.bert_base_multilingual_cased_sha,
101
+ 'bert_large_multilingual_uncased': self.bert_large_multilingual_uncased_sha,
102
+ 'bert_base_chinese': self.bert_base_chinese_sha
103
+ }
104
+
105
+ # Helper to get sha256sum of a file
106
+ def sha256sum(self, filename):
107
+ h = hashlib.sha256()
108
+ b = bytearray(128*1024)
109
+ mv = memoryview(b)
110
+ with open(filename, 'rb', buffering=0) as f:
111
+ for n in iter(lambda : f.readinto(mv), 0):
112
+ h.update(mv[:n])
113
+
114
+ return h.hexdigest()
115
+
116
+ def download(self):
117
+ # Iterate over urls: download, unzip, verify sha256sum
118
+ found_mismatch_sha = False
119
+ for model in self.model_urls:
120
+ url = self.model_urls[model][0]
121
+ file = self.save_path + '/' + self.model_urls[model][1]
122
+
123
+ print('Downloading', url)
124
+ response = urllib.request.urlopen(url)
125
+ with open(file, 'wb') as handle:
126
+ handle.write(response.read())
127
+
128
+ print('Unzipping', file)
129
+ zip = zipfile.ZipFile(file, 'r')
130
+ zip.extractall(self.save_path)
131
+ zip.close()
132
+
133
+ sha_dict = self.model_sha[model]
134
+ for extracted_file in sha_dict:
135
+ sha = sha_dict[extracted_file]
136
+ if sha != self.sha256sum(file[:-4] + '/' + extracted_file):
137
+ found_mismatch_sha = True
138
+ print('SHA256sum does not match on file:', extracted_file, 'from download url:', url)
139
+ else:
140
+ print(file[:-4] + '/' + extracted_file, '\t', 'verified')
141
+
142
+ if not found_mismatch_sha:
143
+ print("All downloads pass sha256sum verification.")
144
+
145
+ def serialize(self):
146
+ pass
147
+
148
+ def deserialize(self):
149
+ pass
150
+
151
+ def listAvailableWeights(self):
152
+ print("Available Weight Datasets")
153
+ for item in self.model_urls:
154
+ print(item)
155
+
156
+ def listLocallyStoredWeights(self):
157
+ pass
158
+
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/TextSharding.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+
14
+ from collections import defaultdict
15
+ from itertools import islice
16
+
17
+ import multiprocessing
18
+ import os
19
+ import statistics
20
+
21
+ class Sharding:
22
+ def __init__(self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set):
23
+ assert len(input_files) > 0, 'The input file list must contain at least one file.'
24
+ assert n_training_shards > 0, 'There must be at least one output shard.'
25
+ assert n_test_shards > 0, 'There must be at least one output shard.'
26
+
27
+ self.n_training_shards = n_training_shards
28
+ self.n_test_shards = n_test_shards
29
+ self.fraction_test_set = fraction_test_set
30
+
31
+ self.input_files = input_files
32
+
33
+ self.output_name_prefix = output_name_prefix
34
+ self.output_training_identifier = '_training'
35
+ self.output_test_identifier = '_test'
36
+ self.output_file_extension = '.txt'
37
+
38
+ self.articles = {} # key: integer identifier, value: list of articles
39
+ self.sentences = {} # key: integer identifier, value: list of sentences
40
+ self.output_training_files = {} # key: filename, value: list of articles to go into file
41
+ self.output_test_files = {} # key: filename, value: list of articles to go into file
42
+
43
+ self.init_output_files()
44
+
45
+
46
+ # Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines)
47
+ def load_articles(self):
48
+ print('Start: Loading Articles')
49
+
50
+ global_article_count = 0
51
+ for input_file in self.input_files:
52
+ print('input file:', input_file)
53
+ with open(input_file, mode='r', newline='\n') as f:
54
+ for i, line in enumerate(f):
55
+ if line.strip():
56
+ self.articles[global_article_count] = line.rstrip()
57
+ global_article_count += 1
58
+
59
+ print('End: Loading Articles: There are', len(self.articles), 'articles.')
60
+
61
+
62
+ def segment_articles_into_sentences(self, segmenter):
63
+ print('Start: Sentence Segmentation')
64
+ if len(self.articles) is 0:
65
+ self.load_articles()
66
+
67
+ assert len(self.articles) is not 0, 'Please check that input files are present and contain data.'
68
+
69
+ # TODO: WIP: multiprocessing (create independent ranges and spawn processes)
70
+ use_multiprocessing = 'serial'
71
+
72
+ def chunks(data, size=len(self.articles)):
73
+ it = iter(data)
74
+ for i in range(0, len(data), size):
75
+ yield {k: data[k] for k in islice(it, size)}
76
+
77
+ if use_multiprocessing == 'manager':
78
+ manager = multiprocessing.Manager()
79
+ return_dict = manager.dict()
80
+ jobs = []
81
+ n_processes = 7 # in addition to the main process, total = n_proc+1
82
+
83
+ def work(articles, return_dict):
84
+ sentences = {}
85
+ for i, article in enumerate(articles):
86
+ sentences[i] = segmenter.segment_string(articles[article])
87
+
88
+ if i % 5000 == 0:
89
+ print('Segmenting article', i)
90
+
91
+ return_dict.update(sentences)
92
+
93
+ for item in chunks(self.articles, len(self.articles)):
94
+ p = multiprocessing.Process(target=work, args=(item, return_dict))
95
+
96
+ # Busy wait
97
+ while len(jobs) >= n_processes:
98
+ pass
99
+
100
+ jobs.append(p)
101
+ p.start()
102
+
103
+ for proc in jobs:
104
+ proc.join()
105
+
106
+ elif use_multiprocessing == 'queue':
107
+ work_queue = multiprocessing.Queue()
108
+ jobs = []
109
+
110
+ for item in chunks(self.articles, len(self.articles)):
111
+ pass
112
+
113
+ else: # serial option
114
+ for i, article in enumerate(self.articles):
115
+ self.sentences[i] = segmenter.segment_string(self.articles[article])
116
+
117
+ if i % 5000 == 0:
118
+ print('Segmenting article', i)
119
+
120
+ print('End: Sentence Segmentation')
121
+
122
+
123
+ def init_output_files(self):
124
+ print('Start: Init Output Files')
125
+ assert len(self.output_training_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
126
+ assert len(self.output_test_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
127
+
128
+ for i in range(self.n_training_shards):
129
+ name = self.output_name_prefix + self.output_training_identifier + '_' + str(i) + self.output_file_extension
130
+ self.output_training_files[name] = []
131
+
132
+ for i in range(self.n_test_shards):
133
+ name = self.output_name_prefix + self.output_test_identifier + '_' + str(i) + self.output_file_extension
134
+ self.output_test_files[name] = []
135
+
136
+ print('End: Init Output Files')
137
+
138
+
139
+ def get_sentences_per_shard(self, shard):
140
+ result = 0
141
+ for article_id in shard:
142
+ result += len(self.sentences[article_id])
143
+
144
+ return result
145
+
146
+
147
+ def distribute_articles_over_shards(self):
148
+ print('Start: Distribute Articles Over Shards')
149
+ assert len(self.articles) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.'
150
+
151
+ # Create dictionary with - key: sentence count per article, value: article id number
152
+ sentence_counts = defaultdict(lambda: [])
153
+
154
+ max_sentences = 0
155
+ total_sentences = 0
156
+
157
+ for article_id in self.sentences:
158
+ current_length = len(self.sentences[article_id])
159
+ sentence_counts[current_length].append(article_id)
160
+ max_sentences = max(max_sentences, current_length)
161
+ total_sentences += current_length
162
+
163
+ n_sentences_assigned_to_training = int((1 - self.fraction_test_set) * total_sentences)
164
+ nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards
165
+ nominal_sentences_per_test_shard = (total_sentences - n_sentences_assigned_to_training) // self.n_test_shards
166
+
167
+ consumed_article_set = set({})
168
+ unused_article_set = set(self.articles.keys())
169
+
170
+ # Make first pass and add one article worth of lines per file
171
+ for file in self.output_training_files:
172
+ current_article_id = sentence_counts[max_sentences][-1]
173
+ sentence_counts[max_sentences].pop(-1)
174
+ self.output_training_files[file].append(current_article_id)
175
+ consumed_article_set.add(current_article_id)
176
+ unused_article_set.remove(current_article_id)
177
+
178
+ # Maintain the max sentence count
179
+ while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
180
+ max_sentences -= 1
181
+
182
+ if len(self.sentences[current_article_id]) > nominal_sentences_per_training_shard:
183
+ nominal_sentences_per_training_shard = len(self.sentences[current_article_id])
184
+ print('Warning: A single article contains more than the nominal number of sentences per training shard.')
185
+
186
+ for file in self.output_test_files:
187
+ current_article_id = sentence_counts[max_sentences][-1]
188
+ sentence_counts[max_sentences].pop(-1)
189
+ self.output_test_files[file].append(current_article_id)
190
+ consumed_article_set.add(current_article_id)
191
+ unused_article_set.remove(current_article_id)
192
+
193
+ # Maintain the max sentence count
194
+ while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
195
+ max_sentences -= 1
196
+
197
+ if len(self.sentences[current_article_id]) > nominal_sentences_per_test_shard:
198
+ nominal_sentences_per_test_shard = len(self.sentences[current_article_id])
199
+ print('Warning: A single article contains more than the nominal number of sentences per test shard.')
200
+
201
+ training_counts = []
202
+ test_counts = []
203
+
204
+ for shard in self.output_training_files:
205
+ training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard]))
206
+
207
+ for shard in self.output_test_files:
208
+ test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
209
+
210
+ training_median = statistics.median(training_counts)
211
+ test_median = statistics.median(test_counts)
212
+
213
+ # Make subsequent passes over files to find articles to add without going over limit
214
+ history_remaining = []
215
+ n_history_remaining = 4
216
+
217
+ while len(consumed_article_set) < len(self.articles):
218
+ for fidx, file in enumerate(self.output_training_files):
219
+ nominal_next_article_size = min(nominal_sentences_per_training_shard - training_counts[fidx], max_sentences)
220
+
221
+ # Maintain the max sentence count
222
+ while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
223
+ max_sentences -= 1
224
+
225
+ while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0:
226
+ nominal_next_article_size -= 1
227
+
228
+ if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or training_counts[fidx] > training_median:
229
+ continue # skip adding to this file, will come back later if no file can accept unused articles
230
+
231
+ current_article_id = sentence_counts[nominal_next_article_size][-1]
232
+ sentence_counts[nominal_next_article_size].pop(-1)
233
+
234
+ self.output_training_files[file].append(current_article_id)
235
+ consumed_article_set.add(current_article_id)
236
+ unused_article_set.remove(current_article_id)
237
+
238
+ for fidx, file in enumerate(self.output_test_files):
239
+ nominal_next_article_size = min(nominal_sentences_per_test_shard - test_counts[fidx], max_sentences)
240
+
241
+ # Maintain the max sentence count
242
+ while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
243
+ max_sentences -= 1
244
+
245
+ while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0:
246
+ nominal_next_article_size -= 1
247
+
248
+ if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or test_counts[fidx] > test_median:
249
+ continue # skip adding to this file, will come back later if no file can accept unused articles
250
+
251
+ current_article_id = sentence_counts[nominal_next_article_size][-1]
252
+ sentence_counts[nominal_next_article_size].pop(-1)
253
+
254
+ self.output_test_files[file].append(current_article_id)
255
+ consumed_article_set.add(current_article_id)
256
+ unused_article_set.remove(current_article_id)
257
+
258
+ # If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed
259
+ if len(history_remaining) == n_history_remaining:
260
+ history_remaining.pop(0)
261
+ history_remaining.append(len(unused_article_set))
262
+
263
+ history_same = True
264
+ for i in range(1, len(history_remaining)):
265
+ history_same = history_same and (history_remaining[i-1] == history_remaining[i])
266
+
267
+ if history_same:
268
+ nominal_sentences_per_training_shard += 1
269
+ # nominal_sentences_per_test_shard += 1
270
+
271
+ training_counts = []
272
+ test_counts = []
273
+ for shard in self.output_training_files:
274
+ training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard]))
275
+
276
+ for shard in self.output_test_files:
277
+ test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
278
+
279
+ training_median = statistics.median(training_counts)
280
+ test_median = statistics.median(test_counts)
281
+
282
+ print('Distributing data over shards:', len(unused_article_set), 'articles remaining.')
283
+
284
+
285
+ if len(unused_article_set) != 0:
286
+ print('Warning: Some articles did not make it into output files.')
287
+
288
+
289
+ for shard in self.output_training_files:
290
+ print('Training shard:', self.get_sentences_per_shard(self.output_training_files[shard]))
291
+
292
+ for shard in self.output_test_files:
293
+ print('Test shard:', self.get_sentences_per_shard(self.output_test_files[shard]))
294
+
295
+ print('End: Distribute Articles Over Shards')
296
+
297
+
298
+ def write_shards_to_disk(self):
299
+ print('Start: Write Shards to Disk')
300
+ for shard in self.output_training_files:
301
+ self.write_single_shard(shard, self.output_training_files[shard], 'training')
302
+
303
+ for shard in self.output_test_files:
304
+ self.write_single_shard(shard, self.output_test_files[shard], 'test')
305
+
306
+ print('End: Write Shards to Disk')
307
+
308
+
309
+ def write_single_shard(self, shard_name, shard, split):
310
+ shard_split = os.path.split(shard_name)
311
+ shard_name = shard_split[0] + '/' + split + '/' + shard_split[1]
312
+
313
+ with open(shard_name, mode='w', newline='\n') as f:
314
+ for article_id in shard:
315
+ for line in self.sentences[article_id]:
316
+ f.write(line + '\n')
317
+
318
+ f.write('\n') # Line break between articles
319
+
320
+
321
+ import nltk
322
+
323
+ nltk.download('punkt')
324
+
325
+ class NLTKSegmenter:
326
+ def __init(self):
327
+ pass
328
+
329
+ def segment_string(self, article):
330
+ return nltk.tokenize.sent_tokenize(article)
331
+
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikiDownloader.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ ###############################################################################
14
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
15
+ #
16
+ # Changes:
17
+ # - Replaced bzip2 with lbzip2 that uses multiprocessing during decompression
18
+ # and provides linear speedup.
19
+ # - Added timing code to measure the decompression process duration.
20
+ # - Removed unused imports.
21
+ ###############################################################################
22
+
23
+ import os
24
+ import subprocess
25
+ import time
26
+
27
+ class WikiDownloader:
28
+ def __init__(self, language, save_path):
29
+ self.save_path = save_path + '/wikicorpus_' + language
30
+
31
+ if not os.path.exists(self.save_path):
32
+ os.makedirs(self.save_path)
33
+
34
+ self.language = language
35
+ self.download_urls = {
36
+ 'en' : 'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2',
37
+ 'zh' : 'https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2'
38
+ }
39
+
40
+ self.output_files = {
41
+ 'en' : 'wikicorpus_en.xml.bz2',
42
+ 'zh' : 'wikicorpus_zh.xml.bz2'
43
+ }
44
+
45
+
46
+ def download(self):
47
+ if self.language in self.download_urls:
48
+ url = self.download_urls[self.language]
49
+ filename = self.output_files[self.language]
50
+
51
+ print('Downloading:', url)
52
+ if os.path.isfile(self.save_path + '/' + filename):
53
+ print('** Download file already exists, skipping download')
54
+ else:
55
+ cmd = ['wget', url, '--output-document={}'.format(self.save_path + '/' + filename)]
56
+ print('Running:', cmd)
57
+ status = subprocess.run(cmd)
58
+ if status.returncode != 0:
59
+ raise RuntimeError('Wiki download not successful')
60
+
61
+ # Always unzipping since this is relatively fast and will overwrite
62
+ start = time.time()
63
+ print('Unzipping:', self.output_files[self.language])
64
+ subprocess.run('lbzip2 -dk ' + self.save_path + '/' + filename, shell=True, check=True)
65
+ print("Unzip time:", time.time() - start)
66
+
67
+ else:
68
+ assert False, 'WikiDownloader not implemented for this language yet.'
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/WikicorpusTextFormatting.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+
14
+ import glob
15
+ import os
16
+
17
+ class WikicorpusTextFormatting:
18
+ def __init__(self, wiki_path, output_filename, recursive = False):
19
+ self.wiki_path = wiki_path
20
+ self.recursive = recursive
21
+ self.output_filename = output_filename
22
+
23
+
24
+ # This puts one article per line
25
+ def merge(self):
26
+ with open(self.output_filename, mode='w', newline='\n') as ofile:
27
+ for dirname in glob.glob(self.wiki_path + '/*/', recursive=False):
28
+ for filename in glob.glob(dirname + 'wiki_*', recursive=self.recursive):
29
+ print(filename)
30
+ article_lines = []
31
+ article_open = False
32
+
33
+ with open(filename, mode='r', newline='\n') as file:
34
+ for line in file:
35
+ if '<doc id=' in line:
36
+ article_open = True
37
+ elif '</doc>' in line:
38
+ article_open = False
39
+ for oline in article_lines[1:]:
40
+ if oline != '\n':
41
+ ofile.write(oline.rstrip() + " ")
42
+ ofile.write("\n\n")
43
+ article_lines = []
44
+ else:
45
+ if article_open:
46
+ article_lines.append(line)
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_datasets_from_start.sh ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ ###############################################################################
17
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
18
+ #
19
+ # Changes:
20
+ # - Removed downloading and preprocessing datasets that are not related to BERT pretrain
21
+ # - Modified file structures originally for NVidia container
22
+ # - Added downloading WikiExtractor and bookcorpus repositories
23
+ ###############################################################################
24
+
25
+
26
+ to_download=${1:-"wiki_only"} # By default, we don't download BooksCorpus dataset due to recent issues with the host server
27
+
28
+ data_dir=$(pwd)
29
+ BERT_PREP_WORKING_DIR=${2:-"/data/tensorflow/bert/books_wiki_en_corpus"}
30
+ export BERT_PREP_WORKING_DIR="${BERT_PREP_WORKING_DIR}"
31
+
32
+ echo "Checkout WikiExtractor repository"
33
+ # checkout WikiExtractor scripts
34
+ git clone https://github.com/attardi/wikiextractor.git && cd wikiextractor && git checkout 6408a430fc504a38b04d37ce5e7fc740191dee16 && cd ..
35
+
36
+ # Download Wikipedia dataset and/or Bookscorpus dataset
37
+ echo "Download dataset ${to_download}"
38
+ if [ "$to_download" = "wiki_books" ] ; then
39
+ # checkout BookCorpus download scripts
40
+ git clone https://github.com/soskek/bookcorpus.git
41
+ $PYTHON ${data_dir}/bertPrep.py --action download --dataset bookscorpus
42
+ fi
43
+ $PYTHON ${data_dir}/bertPrep.py --action download --dataset wikicorpus_en
44
+
45
+ echo "Download pretrained weights"
46
+ echo "${data_dir}"
47
+ $PYTHON ${data_dir}/bertPrep.py --action download --dataset google_pretrained_weights # Includes vocab
48
+
49
+ DATASET="wikicorpus_en"
50
+
51
+ # Properly format the text files
52
+ if [ "$to_download" = "wiki_books" ] ; then
53
+ $PYTHON ${data_dir}/bertPrep.py --action text_formatting --dataset bookscorpus
54
+ DATASET="books_wiki_en_corpus"
55
+ fi
56
+ $PYTHON ${data_dir}/bertPrep.py --action text_formatting --dataset wikicorpus_en
57
+
58
+ # Shard the text files
59
+ $PYTHON ${data_dir}/bertPrep.py --action sharding --dataset ${DATASET}
60
+
61
+ # Create TFRecord files Phase 1
62
+ $PYTHON ${data_dir}/bertPrep.py --action create_tfrecord_files --dataset ${DATASET} --max_seq_length 128 \
63
+ --max_predictions_per_seq 20 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt
64
+
65
+
66
+ # Create TFRecord files Phase 2
67
+ $PYTHON ${data_dir}/bertPrep.py --action create_tfrecord_files --dataset ${DATASET} --max_seq_length 512 \
68
+ --max_predictions_per_seq 80 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/create_pretraining_data.py ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
3
+ # Copyright 2018 The Google AI Language Team Authors.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ ###############################################################################
18
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
19
+ #
20
+ # Changes:
21
+ # - Fixed compatibility issues with TensorFlow 2
22
+ ###############################################################################
23
+
24
+ """Create masked LM/next sentence masked_lm TF examples for BERT."""
25
+
26
+ from __future__ import absolute_import, division, print_function, unicode_literals
27
+
28
+ import argparse
29
+ import logging
30
+ import os
31
+ import random
32
+ from io import open
33
+ import h5py
34
+ import tensorflow as tf
35
+ import numpy as np
36
+ from tqdm import tqdm, trange
37
+
38
+ from TensorFlow.nlp.bert.data_preprocessing.tokenization import BertTokenizer
39
+ import tokenization as tokenization
40
+
41
+ import random
42
+ import collections
43
+
44
+ class TrainingInstance(object):
45
+ """A single training instance (sentence pair)."""
46
+
47
+ def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
48
+ is_random_next):
49
+ self.tokens = tokens
50
+ self.segment_ids = segment_ids
51
+ self.is_random_next = is_random_next
52
+ self.masked_lm_positions = masked_lm_positions
53
+ self.masked_lm_labels = masked_lm_labels
54
+
55
+ def __str__(self):
56
+ s = ""
57
+ s += "tokens: %s\n" % (" ".join(
58
+ [tokenization.printable_text(x) for x in self.tokens]))
59
+ s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
60
+ s += "is_random_next: %s\n" % self.is_random_next
61
+ s += "masked_lm_positions: %s\n" % (" ".join(
62
+ [str(x) for x in self.masked_lm_positions]))
63
+ s += "masked_lm_labels: %s\n" % (" ".join(
64
+ [tokenization.printable_text(x) for x in self.masked_lm_labels]))
65
+ s += "\n"
66
+ return s
67
+
68
+ def __repr__(self):
69
+ return self.__str__()
70
+
71
+
72
+ def write_instance_to_example_files(instances, tokenizer, max_seq_length,
73
+ max_predictions_per_seq, output_files, output_formats="tfrecord"):
74
+ """Create TF example files from `TrainingInstance`s."""
75
+ writers = []
76
+ for output_file in output_files:
77
+ writers.append(tf.compat.v1.python_io.TFRecordWriter(output_file))
78
+
79
+ writer_index = 0
80
+
81
+ total_written = 0
82
+ if 'hdf5' in output_formats:
83
+ features_hdf5 = collections.OrderedDict()
84
+ num_instances = len(instances)
85
+ features_hdf5["input_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
86
+ features_hdf5["input_mask"] = np.zeros([num_instances, max_seq_length], dtype="int32")
87
+ features_hdf5["segment_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32")
88
+ features_hdf5["masked_lm_positions"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
89
+ features_hdf5["masked_lm_ids"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32")
90
+ features_hdf5["next_sentence_labels"] = np.zeros(num_instances, dtype="int32")
91
+
92
+ for (inst_index, instance) in enumerate(instances):
93
+ input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
94
+ input_mask = [1] * len(input_ids)
95
+ segment_ids = list(instance.segment_ids)
96
+ assert len(input_ids) <= max_seq_length
97
+
98
+ while len(input_ids) < max_seq_length:
99
+ input_ids.append(0)
100
+ input_mask.append(0)
101
+ segment_ids.append(0)
102
+
103
+ assert len(input_ids) == max_seq_length
104
+ assert len(input_mask) == max_seq_length
105
+ assert len(segment_ids) == max_seq_length
106
+
107
+ masked_lm_positions = list(instance.masked_lm_positions)
108
+ masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
109
+ masked_lm_weights = [1.0] * len(masked_lm_ids)
110
+
111
+ while len(masked_lm_positions) < max_predictions_per_seq:
112
+ masked_lm_positions.append(0)
113
+ masked_lm_ids.append(0)
114
+ masked_lm_weights.append(0.0)
115
+
116
+ next_sentence_label = 1 if instance.is_random_next else 0
117
+
118
+ features = collections.OrderedDict()
119
+ features["input_ids"] = create_int_feature(input_ids)
120
+ features["input_mask"] = create_int_feature(input_mask)
121
+ features["segment_ids"] = create_int_feature(segment_ids)
122
+ features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
123
+ features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
124
+ features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
125
+ features["next_sentence_labels"] = create_int_feature([next_sentence_label])
126
+
127
+ if 'tfrecord' in output_formats:
128
+ tf_example = tf.train.Example(features=tf.train.Features(feature=features))
129
+ writers[writer_index].write(tf_example.SerializeToString())
130
+ if 'hdf5' in output_formats:
131
+ features_hdf5["input_ids"][inst_index] = input_ids
132
+ features_hdf5["input_mask"][inst_index] = input_mask
133
+ features_hdf5["segment_ids"][inst_index] = segment_ids
134
+ features_hdf5["masked_lm_positions"][inst_index] = masked_lm_positions
135
+ features_hdf5["masked_lm_ids"][inst_index] = masked_lm_ids
136
+ features_hdf5["next_sentence_labels"][inst_index] = next_sentence_label
137
+ if 'tfrecord' not in output_formats and 'hdf5' not in output_formats:
138
+ assert False, 'Either empty output_formats list or unsupported type specified. Try: tfrecord or hdf5'
139
+
140
+ writer_index = (writer_index + 1) % len(writers)
141
+
142
+ total_written += 1
143
+
144
+ if inst_index < 20:
145
+ tf.compat.v1.logging.info("*** Example ***")
146
+ tf.compat.v1.logging.info("tokens: %s" % " ".join(
147
+ [tokenization.printable_text(x) for x in instance.tokens]))
148
+
149
+ for feature_name in features.keys():
150
+ feature = features[feature_name]
151
+ values = []
152
+ if feature.int64_list.value:
153
+ values = feature.int64_list.value
154
+ elif feature.float_list.value:
155
+ values = feature.float_list.value
156
+ tf.compat.v1.logging.info(
157
+ "%s: %s" % (feature_name, " ".join([str(x) for x in values])))
158
+
159
+ for writer in writers:
160
+ writer.close()
161
+
162
+ if 'hdf5' in output_formats:
163
+ f = h5py.File(output_file, 'w')
164
+ f.create_dataset("input_ids", data=features_hdf5["input_ids"], dtype='i4', compression='gzip')
165
+ f.create_dataset("input_mask", data=features_hdf5["input_mask"], dtype='i1', compression='gzip')
166
+ f.create_dataset("segment_ids", data=features_hdf5["segment_ids"], dtype='i1', compression='gzip')
167
+ f.create_dataset("masked_lm_positions", data=features_hdf5["masked_lm_positions"], dtype='i4', compression='gzip')
168
+ f.create_dataset("masked_lm_ids", data=features_hdf5["masked_lm_ids"], dtype='i4', compression='gzip')
169
+ f.create_dataset("next_sentence_labels", data=features_hdf5["next_sentence_labels"], dtype='i1', compression='gzip')
170
+ f.flush()
171
+ f.close()
172
+
173
+ tf.compat.v1.logging.info("Wrote %d total instances", total_written)
174
+
175
+
176
+ def create_int_feature(values):
177
+ feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
178
+ return feature
179
+
180
+
181
+ def create_float_feature(values):
182
+ feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
183
+ return feature
184
+
185
+
186
+ def create_training_instances(input_files, tokenizer, max_seq_length,
187
+ dupe_factor, short_seq_prob, masked_lm_prob,
188
+ max_predictions_per_seq, rng):
189
+ """Create `TrainingInstance`s from raw text."""
190
+ all_documents = [[]]
191
+
192
+ # Input file format:
193
+ # (1) One sentence per line. These should ideally be actual sentences, not
194
+ # entire paragraphs or arbitrary spans of text. (Because we use the
195
+ # sentence boundaries for the "next sentence prediction" task).
196
+ # (2) Blank lines between documents. Document boundaries are needed so
197
+ # that the "next sentence prediction" task doesn't span between documents.
198
+ for input_file in input_files:
199
+ print("creating instance from {}".format(input_file))
200
+ with open(input_file, "r") as reader:
201
+ while True:
202
+ line = tokenization.convert_to_unicode(reader.readline())
203
+ if not line:
204
+ break
205
+ line = line.strip()
206
+
207
+ # Empty lines are used as document delimiters
208
+ if not line:
209
+ all_documents.append([])
210
+ tokens = tokenizer.tokenize(line)
211
+ if tokens:
212
+ all_documents[-1].append(tokens)
213
+
214
+ # Remove empty documents
215
+ all_documents = [x for x in all_documents if x]
216
+ rng.shuffle(all_documents)
217
+
218
+ vocab_words = list(tokenizer.vocab.keys())
219
+ instances = []
220
+ for _ in range(dupe_factor):
221
+ for document_index in range(len(all_documents)):
222
+ instances.extend(
223
+ create_instances_from_document(
224
+ all_documents, document_index, max_seq_length, short_seq_prob,
225
+ masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
226
+
227
+ rng.shuffle(instances)
228
+ return instances
229
+
230
+
231
+ def create_instances_from_document(
232
+ all_documents, document_index, max_seq_length, short_seq_prob,
233
+ masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
234
+ """Creates `TrainingInstance`s for a single document."""
235
+ document = all_documents[document_index]
236
+
237
+ # Account for [CLS], [SEP], [SEP]
238
+ max_num_tokens = max_seq_length - 3
239
+
240
+ # We *usually* want to fill up the entire sequence since we are padding
241
+ # to `max_seq_length` anyways, so short sequences are generally wasted
242
+ # computation. However, we *sometimes*
243
+ # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
244
+ # sequences to minimize the mismatch between pre-training and fine-tuning.
245
+ # The `target_seq_length` is just a rough target however, whereas
246
+ # `max_seq_length` is a hard limit.
247
+ target_seq_length = max_num_tokens
248
+ if rng.random() < short_seq_prob:
249
+ target_seq_length = rng.randint(2, max_num_tokens)
250
+
251
+ # We DON'T just concatenate all of the tokens from a document into a long
252
+ # sequence and choose an arbitrary split point because this would make the
253
+ # next sentence prediction task too easy. Instead, we split the input into
254
+ # segments "A" and "B" based on the actual "sentences" provided by the user
255
+ # input.
256
+ instances = []
257
+ current_chunk = []
258
+ current_length = 0
259
+ i = 0
260
+ while i < len(document):
261
+ segment = document[i]
262
+ current_chunk.append(segment)
263
+ current_length += len(segment)
264
+ if i == len(document) - 1 or current_length >= target_seq_length:
265
+ if current_chunk:
266
+ # `a_end` is how many segments from `current_chunk` go into the `A`
267
+ # (first) sentence.
268
+ a_end = 1
269
+ if len(current_chunk) >= 2:
270
+ a_end = rng.randint(1, len(current_chunk) - 1)
271
+
272
+ tokens_a = []
273
+ for j in range(a_end):
274
+ tokens_a.extend(current_chunk[j])
275
+
276
+ tokens_b = []
277
+ # Random next
278
+ is_random_next = False
279
+ if len(current_chunk) == 1 or rng.random() < 0.5:
280
+ is_random_next = True
281
+ target_b_length = target_seq_length - len(tokens_a)
282
+
283
+ # This should rarely go for more than one iteration for large
284
+ # corpora. However, just to be careful, we try to make sure that
285
+ # the random document is not the same as the document
286
+ # we're processing.
287
+ for _ in range(10):
288
+ random_document_index = rng.randint(0, len(all_documents) - 1)
289
+ if random_document_index != document_index:
290
+ break
291
+
292
+ #If picked random document is the same as the current document
293
+ if random_document_index == document_index:
294
+ is_random_next = False
295
+
296
+ random_document = all_documents[random_document_index]
297
+ random_start = rng.randint(0, len(random_document) - 1)
298
+ for j in range(random_start, len(random_document)):
299
+ tokens_b.extend(random_document[j])
300
+ if len(tokens_b) >= target_b_length:
301
+ break
302
+ # We didn't actually use these segments so we "put them back" so
303
+ # they don't go to waste.
304
+ num_unused_segments = len(current_chunk) - a_end
305
+ i -= num_unused_segments
306
+ # Actual next
307
+ else:
308
+ is_random_next = False
309
+ for j in range(a_end, len(current_chunk)):
310
+ tokens_b.extend(current_chunk[j])
311
+ truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
312
+
313
+ assert len(tokens_a) >= 1
314
+ assert len(tokens_b) >= 1
315
+
316
+ tokens = []
317
+ segment_ids = []
318
+ tokens.append("[CLS]")
319
+ segment_ids.append(0)
320
+ for token in tokens_a:
321
+ tokens.append(token)
322
+ segment_ids.append(0)
323
+
324
+ tokens.append("[SEP]")
325
+ segment_ids.append(0)
326
+
327
+ for token in tokens_b:
328
+ tokens.append(token)
329
+ segment_ids.append(1)
330
+ tokens.append("[SEP]")
331
+ segment_ids.append(1)
332
+
333
+ (tokens, masked_lm_positions,
334
+ masked_lm_labels) = create_masked_lm_predictions(
335
+ tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
336
+ instance = TrainingInstance(
337
+ tokens=tokens,
338
+ segment_ids=segment_ids,
339
+ is_random_next=is_random_next,
340
+ masked_lm_positions=masked_lm_positions,
341
+ masked_lm_labels=masked_lm_labels)
342
+ instances.append(instance)
343
+ current_chunk = []
344
+ current_length = 0
345
+ i += 1
346
+
347
+ return instances
348
+
349
+
350
+ MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
351
+ ["index", "label"])
352
+
353
+
354
+ def create_masked_lm_predictions(tokens, masked_lm_prob,
355
+ max_predictions_per_seq, vocab_words, rng):
356
+ """Creates the predictions for the masked LM objective."""
357
+
358
+ cand_indexes = []
359
+ for (i, token) in enumerate(tokens):
360
+ if token == "[CLS]" or token == "[SEP]":
361
+ continue
362
+ cand_indexes.append(i)
363
+
364
+ rng.shuffle(cand_indexes)
365
+
366
+ output_tokens = list(tokens)
367
+
368
+ num_to_predict = min(max_predictions_per_seq,
369
+ max(1, int(round(len(tokens) * masked_lm_prob))))
370
+
371
+ masked_lms = []
372
+ covered_indexes = set()
373
+ for index in cand_indexes:
374
+ if len(masked_lms) >= num_to_predict:
375
+ break
376
+ if index in covered_indexes:
377
+ continue
378
+ covered_indexes.add(index)
379
+
380
+ masked_token = None
381
+ # 80% of the time, replace with [MASK]
382
+ if rng.random() < 0.8:
383
+ masked_token = "[MASK]"
384
+ else:
385
+ # 10% of the time, keep original
386
+ if rng.random() < 0.5:
387
+ masked_token = tokens[index]
388
+ # 10% of the time, replace with random word
389
+ else:
390
+ masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
391
+
392
+ output_tokens[index] = masked_token
393
+
394
+ masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
395
+
396
+ masked_lms = sorted(masked_lms, key=lambda x: x.index)
397
+
398
+ masked_lm_positions = []
399
+ masked_lm_labels = []
400
+ for p in masked_lms:
401
+ masked_lm_positions.append(p.index)
402
+ masked_lm_labels.append(p.label)
403
+
404
+ return (output_tokens, masked_lm_positions, masked_lm_labels)
405
+
406
+
407
+ def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
408
+ """Truncates a pair of sequences to a maximum sequence length."""
409
+ while True:
410
+ total_length = len(tokens_a) + len(tokens_b)
411
+ if total_length <= max_num_tokens:
412
+ break
413
+
414
+ trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
415
+ assert len(trunc_tokens) >= 1
416
+
417
+ # We want to sometimes truncate from the front and sometimes from the
418
+ # back to add more randomness and avoid biases.
419
+ if rng.random() < 0.5:
420
+ del trunc_tokens[0]
421
+ else:
422
+ trunc_tokens.pop()
423
+
424
+
425
+ def main():
426
+ parser = argparse.ArgumentParser()
427
+ ## Required parameters
428
+ parser.add_argument("--vocab_file",
429
+ default=None,
430
+ type=str,
431
+ required=True,
432
+ help="The vocabulary the BERT model will train on.")
433
+ parser.add_argument("--input_file",
434
+ default=None,
435
+ type=str,
436
+ required=True,
437
+ help="The input train corpus. can be directory with .txt files or a path to a single file")
438
+ parser.add_argument("--output_file",
439
+ default=None,
440
+ type=str,
441
+ required=True,
442
+ help="The output file where the model checkpoints will be written.")
443
+
444
+ ## Other parameters
445
+ # int
446
+ parser.add_argument("--max_seq_length",
447
+ default=128,
448
+ type=int,
449
+ help="The maximum total input sequence length after WordPiece tokenization. \n"
450
+ "Sequences longer than this will be truncated, and sequences shorter \n"
451
+ "than this will be padded.")
452
+ parser.add_argument("--dupe_factor",
453
+ default=10,
454
+ type=int,
455
+ help="Number of times to duplicate the input data (with different masks).")
456
+ parser.add_argument("--max_predictions_per_seq",
457
+ default=20,
458
+ type=int,
459
+ help="Maximum sequence length.")
460
+
461
+ # floats
462
+
463
+ parser.add_argument("--masked_lm_prob",
464
+ default=0.15,
465
+ type=float,
466
+ help="Masked LM probability.")
467
+
468
+ parser.add_argument("--short_seq_prob",
469
+ default=0.1,
470
+ type=float,
471
+ help="Probability to create a sequence shorter than maximum sequence length")
472
+
473
+ parser.add_argument("--do_lower_case",
474
+ action='store_true',
475
+ default=True,
476
+ help="Whether to lower case the input text. True for uncased models, False for cased models.")
477
+ parser.add_argument('--random_seed',
478
+ type=int,
479
+ default=12345,
480
+ help="random seed for initialization")
481
+
482
+ args = parser.parse_args()
483
+
484
+ tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case)
485
+
486
+ input_files = []
487
+ if os.path.isfile(args.input_file):
488
+ input_files.append(args.input_file)
489
+ elif os.path.isdir(args.input_file):
490
+ input_files = [os.path.join(args.input_file, f) for f in os.listdir(args.input_file) if
491
+ (os.path.isfile(os.path.join(args.input_file, f)) and f.endswith('.txt'))]
492
+ else:
493
+ raise ValueError("{} is not a valid path".format(args.input_file))
494
+
495
+ rng = random.Random(args.random_seed)
496
+ instances = create_training_instances(
497
+ input_files, tokenizer, args.max_seq_length, args.dupe_factor,
498
+ args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq,
499
+ rng)
500
+
501
+ output_files = args.output_file.split(",")
502
+ print("*** Writing to output files ***")
503
+ for output_file in output_files:
504
+ print(output_file)
505
+
506
+
507
+ write_instance_to_example_files(instances, tokenizer, args.max_seq_length,
508
+ args.max_predictions_per_seq, output_files)
509
+
510
+
511
+ if __name__ == "__main__":
512
+ main()
docker/bloom13b/Model-References/TensorFlow/nlp/bert/data_preprocessing/pack_pretraining_data_tfrec.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ ###############################################################################
15
+ # Copyright (C) 2022 Habana Labs, Ltd. an Intel Company
16
+ ###############################################################################
17
+ # Changes:
18
+ # - Added functionality for saving parameters of packing algorithm to metadata file.
19
+ # - Added checks for output_dir parameter. It will be created automatically if passed location does not exist.
20
+
21
+
22
+ import argparse
23
+ import gc
24
+ import json
25
+ import os
26
+ import random
27
+ import time
28
+ from collections import OrderedDict, defaultdict, deque
29
+ from concurrent.futures import ProcessPoolExecutor
30
+ from functools import lru_cache
31
+ from itertools import chain, repeat
32
+ from sys import getsizeof, stderr
33
+
34
+ import numpy as np
35
+ import tensorflow as tf
36
+ from scipy import optimize
37
+
38
+ @lru_cache(maxsize=None)
39
+ def packing_strategies(start, previous, target, depth):
40
+ gap = target - start
41
+
42
+ # The collection of possible strategies given the
43
+ # starting sum, the target sum, and the available depth
44
+ # strategy search is limited to increments greater or equal to previous
45
+ strategies = []
46
+ # Complete the packing with exactly 1 number
47
+ if depth == 1:
48
+ if gap >= previous:
49
+ strategies.append([gap])
50
+
51
+ # Complete the sample in "depth" steps, recursively
52
+ else:
53
+ for new in range(previous, gap + 1):
54
+
55
+ new_gap = target - start - new
56
+ if new_gap == 0:
57
+ strategies.append([new])
58
+ else:
59
+ options = packing_strategies(start + new, new, target, depth - 1)
60
+
61
+ for option in options:
62
+ if len(option) > 0:
63
+ strategies.append([new] + option)
64
+ return strategies
65
+
66
+
67
+ def get_metadata_file_path(output_dir):
68
+ """Returns path for metadata file one direcotry above output_dir.
69
+ File will be called the same way as directory with training dataset
70
+ with appended metadata.json as below:
71
+ ├── training
72
+ └── training_metadata.json"""
73
+ norm_path = os.path.normpath(output_dir)
74
+ base_path, metadata_file_name = os.path.split(norm_path)
75
+ metadata_file_name = metadata_file_name + '_metadata.json'
76
+ return os.path.join(base_path, metadata_file_name)
77
+
78
+ def get_packing_recipe(output_dir, sequence_lengths, max_sequence_length, max_sequences_per_pack=3):
79
+ # Histogram of sequence lengths
80
+ histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, max_sequence_length + 2))
81
+ print("Begin packing pass".center(80, "_"))
82
+ print(f"Unpacked mean sequence length: {sequence_lengths.mean():3.2f}")
83
+
84
+ # Make sure all strategies are recipes to pack to the correct sequence length
85
+ strategy_set = packing_strategies(0, 1, max_sequence_length, max_sequences_per_pack)
86
+ for strategy in strategy_set:
87
+ assert(sum(strategy) == max_sequence_length)
88
+ num_strategies = len(strategy_set)
89
+ print(f"Found {num_strategies} unique packing strategies.")
90
+
91
+ # Solve the packing equation A@mixture = histogram
92
+ A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32)
93
+ for i in range(num_strategies):
94
+ strategy = strategy_set[i]
95
+ for seq_len in strategy:
96
+ A[seq_len - 1, i] += 1
97
+
98
+ # short sequences are inexpensive to add, so should have low residual weights
99
+ # to exactly minimize padding use w0 = np.arange(1, max_sequence_length + 1)
100
+ # in practice the difference is negligible, but this converges faster
101
+ padding_cutoff = 8
102
+ w0 = np.ones([max_sequence_length])
103
+ # w0 = np.linspace(1, max_sequence_length+1, max_sequence_length)/max_sequence_length # padding minimization weight
104
+ w0[:padding_cutoff] = padding_cutoff / (2 * max_sequence_length)
105
+ w0 = np.sqrt(w0)
106
+
107
+ # Starting values for the padding and the mixture
108
+ padding = np.zeros([max_sequence_length], dtype=np.int32)
109
+ mixture = np.zeros([num_strategies], dtype=np.int32)
110
+ b = histogram + padding
111
+
112
+ # Pack sequences as best as possible, then increase padding accordingly and repeat
113
+ for i in range(0, 20):
114
+ print(f"\nIteration: {i}: sequences still to pack: ", b.sum())
115
+ start = time.time()
116
+ partial_mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * b)
117
+ print(f"Solving nnls took {time.time() - start:3.2f} seconds.")
118
+ print(f"Residual norm: {rnorm:3.5e}")
119
+
120
+ # Update mixture (round the floating point solution to integers)
121
+ partial_mixture = np.where(partial_mixture < 2, np.rint(partial_mixture), np.floor(partial_mixture))
122
+
123
+ # If partial mixture is empty (due to rounding) we follow the gradient
124
+ # this usually happens when the number of examples is small i.e. ~100
125
+ if partial_mixture.max() == 0:
126
+ grad = A.T @ (b * np.arange(1, max_sequence_length + 1))
127
+ k = int(b.sum() // 2) + 1
128
+ topk = np.argsort(-grad)[:k]
129
+ partial_mixture[topk] += 1
130
+
131
+ # Update mixture
132
+ mixture = mixture + partial_mixture
133
+
134
+ # Compute the residuals
135
+ residual = b - A @ partial_mixture
136
+ print(f"Max residual: {abs(residual).max()}")
137
+ print(f"Residual on first 8 categories: {np.around(residual[:8], 4)}")
138
+ print(f"Residual on last 8 categories: {np.around(residual[-8:], 4)}")
139
+
140
+ # Add padding based on deficit (negative residual)
141
+ partial_padding = np.where(residual < 0, -residual, 0)
142
+ print(f"Added {(partial_padding*np.arange(1,max_sequence_length+1)).sum():3.2e} tokens of padding.")
143
+ padding = padding + partial_padding
144
+
145
+ # Update the rhs vector (remaining surplus sequences)
146
+ b = histogram + padding - A @ mixture
147
+ assert np.all(b >= 0), b
148
+
149
+ # Done iterating
150
+ if b.sum() < 100:
151
+ break
152
+
153
+ # Make sure there is no remainder
154
+ unpacked_seqlen = np.arange(1, args.max_sequence_length + 1)[b > 0]
155
+ # Update the mixture to also covered the unpacked sequences
156
+ for l in unpacked_seqlen:
157
+ # Get the depth 1 strategy
158
+ strategy = sorted([l, args.max_sequence_length - l])
159
+ strategy_index = strategy_set.index(strategy)
160
+ mixture[strategy_index] += b[l-1]
161
+ b = histogram - A @ mixture
162
+ padding = np.where(b < 0, -b, 0)
163
+ b = histogram + padding - A @ mixture
164
+ assert b.sum() == 0
165
+
166
+ # Analyze result
167
+ print("Done solving for packing order".center(80, "_"))
168
+ num_padding_tokens = (np.arange(1, max_sequence_length + 1) * padding).sum()
169
+ num_padding_tokens_original = (max_sequence_length - sequence_lengths).sum()
170
+ number_of_sequences_dropped = b.sum()
171
+ print(f"Number of sequences dropped: {number_of_sequences_dropped}")
172
+ number_of_strategies_utilized = np.count_nonzero(mixture)
173
+ print(f"Number of strategies utilized: {number_of_strategies_utilized}")
174
+ new_number_of_samples = int(mixture.sum())
175
+ original_number_of_samples = len(sequence_lengths)
176
+ compression = 1 - new_number_of_samples / original_number_of_samples
177
+ print(f"New number of samples: {new_number_of_samples:3.2f}, original {original_number_of_samples}. A compression ratio of {compression:3.3f}")
178
+ expected_speedup_from_packing = 1 / (1 - compression)
179
+ print(f"The expected speed-up from packing: {expected_speedup_from_packing}")
180
+ upper_bound = 1.0 / (1 - ((1 - sequence_lengths / max_sequence_length).mean()))
181
+ print(f"Theoretical upper bound on speed-up: {upper_bound:3.3f}")
182
+ avg_sequences_per_sample = ((A.sum(0) * mixture).sum() - padding.sum()) / new_number_of_samples
183
+ print(f"Average sequences/sample {avg_sequences_per_sample:3.5f}")
184
+ print(f"Added {num_padding_tokens:3.2e} padding tokens. Original dataset used {num_padding_tokens_original:3.2e} padding tokens")
185
+ efficiency = (new_number_of_samples*max_sequence_length - num_padding_tokens)/(new_number_of_samples*max_sequence_length)
186
+ print(f"Packing efficiency (fraction of real tokens): {efficiency:3.4f}")
187
+
188
+ print(f"Top 8 strategies")
189
+ topK = np.argsort(-mixture)[:8]
190
+ for i in topK:
191
+ print(f"Strategy {strategy_set[i]} which is used {int(mixture[i])} times")
192
+ print("".center(80, "_"))
193
+
194
+ # Figure out the slicing that each strategy should use
195
+ slicing = np.zeros_like(A)
196
+ slicing[:, 1:] = np.cumsum(A * mixture, axis=1)[:, :-1]
197
+ slicing = slicing.T
198
+
199
+ mixture = mixture.astype(np.int64)
200
+
201
+ # Save packing parameters to metadata file
202
+ metadata_file_path = get_metadata_file_path(output_dir)
203
+ print(f"Saving metadata to file: {metadata_file_path}")
204
+
205
+ packing_metadata = {
206
+ "sequences_dropped": int(number_of_sequences_dropped),
207
+ "num_strategies_utilized": number_of_strategies_utilized,
208
+ "new_number_of_samples": new_number_of_samples,
209
+ "original_number_of_samples": original_number_of_samples,
210
+ "compression_ratio": compression,
211
+ "expected_speedup": expected_speedup_from_packing,
212
+ "theoretical_speedup": float(upper_bound),
213
+ "avg_seq_per_sample": float(avg_sequences_per_sample),
214
+ "padding_tokens_original_dataset": int(num_padding_tokens_original),
215
+ "padding_tokens_packed_dataset": float(num_padding_tokens),
216
+ "packing_efficiency": float(efficiency),
217
+ "top_8_strategies": topK.tolist()
218
+ }
219
+ with open(metadata_file_path, mode='w') as json_file:
220
+ json_file.write(json.dumps(packing_metadata, sort_keys=True, indent=2))
221
+ return strategy_set, mixture, padding, slicing
222
+
223
+
224
+ def slice_examples(examples_by_length, slicing, strategy_set, repeat_counts):
225
+ # Divide the work, firstly between the strategies and then into chunks of 50k
226
+ slices = []
227
+ strategies = []
228
+ part_idx = []
229
+ for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, repeat_counts):
230
+ if repeat_count == 0:
231
+ continue
232
+ # Slice out the sequences allocated to this strategy in increments of 50k
233
+ num_sample_per_slice=4480
234
+ num_parts = repeat_count // num_sample_per_slice
235
+ num_parts = num_parts + int(repeat_count != num_parts * num_sample_per_slice)
236
+ subcounts = (min(num_sample_per_slice, repeat_count - num_sample_per_slice * (i - 1)) for i in range(1, num_parts + 1))
237
+ for part_id, part_count in enumerate(subcounts):
238
+ examples = []
239
+ for k, seq_len in enumerate(strategy):
240
+ slice_start = int(slice_offsets[seq_len - 1])
241
+ slice_end = slice_start + int(part_count)
242
+ slice_offsets[seq_len - 1] = slice_end
243
+ examples.append(examples_by_length[seq_len][slice_start:slice_end])
244
+ slices.append(examples)
245
+ strategies.append(strategy)
246
+ part_idx.append(part_id)
247
+ examples_by_length = None
248
+ return slices, strategies, part_idx
249
+
250
+
251
+ def parallel_pack_according_to_strategy(args, part_idx, strategy, examples):
252
+ # Pack the sequences according to the strategy and write them to disk
253
+ try:
254
+ base_filename = os.path.join(args.output_dir, "strategy_" + "_".join(map(str, strategy)))
255
+ filename = base_filename + f"_part_{part_idx}"
256
+ print(filename)
257
+ writer = tf.compat.v1.python_io.TFRecordWriter(filename)
258
+ for i, multi_sequence in enumerate(zip(*examples)):
259
+ features = create_multi_sequence_example(multi_sequence, args.max_predictions_per_sequence,
260
+ args.max_sequence_length, args.max_sequences_per_pack)
261
+ # Write to file
262
+ tf_example = tf.train.Example(features=tf.train.Features(feature=features))
263
+ writer.write(tf_example.SerializeToString())
264
+ writer.close()
265
+ except:
266
+ print('failed to write: ',strategy,part_idx)
267
+ base_filename = os.path.join(args.output_dir, "FAIL_strategy_" + "_".join(map(str, strategy)))
268
+ filename = base_filename + f"_part_{part_idx}"
269
+ print('saved failed examples to: ','FAIL_'+filename)
270
+
271
+
272
+
273
+ def create_multi_sequence_example(multi_sequence, max_predictions_per_sequence, max_sequence_length, max_sequences_per_pack):
274
+ # SEQ
275
+ packed_input_ids = np.zeros(max_sequence_length, dtype=np.int32)
276
+ packed_input_mask = np.zeros(max_sequence_length, dtype=np.int32)
277
+ packed_segment_ids = np.zeros(max_sequence_length, dtype=np.int32)
278
+ packed_positions = np.zeros(max_sequence_length, dtype=np.int32)
279
+
280
+ # MLM
281
+ # we are packing up to max_sequences_per_pack, each with a certain percentage of masked tokens
282
+ # in case that percentege is rounded up for all sequences in the pack, need to add an extra token for
283
+ # each sequence in the pack
284
+ packed_masked_lm_positions = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
285
+ packed_masked_lm_ids = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
286
+ packed_masked_lm_weights = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
287
+
288
+ # NSP
289
+ packed_next_sentence_positions = np.zeros(max_sequences_per_pack, dtype=np.int32)
290
+ packed_next_sentence_labels = np.zeros(max_sequences_per_pack, dtype=np.int32)
291
+ packed_next_sentence_weights = np.zeros(max_sequences_per_pack, dtype=np.int32)
292
+
293
+ offset = 0
294
+ mlm_offset = 0
295
+ sequence_index = 1 # used in the input mask
296
+ for sequence in multi_sequence:
297
+ # Padding sequences are donoted with None
298
+ if sequence is not None:
299
+ example = tf.train.Example()
300
+ example.ParseFromString(sequence.numpy())
301
+
302
+ input_ids = np.array(example.features.feature['input_ids'].int64_list.value)
303
+ input_mask = np.array(example.features.feature['input_mask'].int64_list.value)
304
+ segment_ids = np.array(example.features.feature['segment_ids'].int64_list.value)
305
+ masked_lm_positions = np.array(example.features.feature['masked_lm_positions'].int64_list.value)
306
+ masked_lm_ids = np.array(example.features.feature['masked_lm_ids'].int64_list.value)
307
+ masked_lm_weights = np.array(example.features.feature['masked_lm_weights'].float_list.value)
308
+ next_sentence_labels = np.array(example.features.feature['next_sentence_labels'].int64_list.value)
309
+ seq_len = input_mask.sum()
310
+
311
+ del example
312
+
313
+ # SEQ
314
+ packed_input_ids[offset:offset + seq_len] = input_ids[:seq_len]
315
+ packed_input_mask[offset:offset + seq_len] = sequence_index
316
+ packed_segment_ids[offset:offset + seq_len] = segment_ids[:seq_len]
317
+ packed_positions[offset:offset + seq_len] = np.arange(0, seq_len)
318
+
319
+ # MLM
320
+ mlm_len = int(masked_lm_weights.sum())
321
+ assert mlm_offset + mlm_len < max_predictions_per_sequence + max_sequences_per_pack, "Too many LM predictions per sequences"
322
+ max_mlm = mlm_offset + mlm_len
323
+ packed_masked_lm_positions[mlm_offset:max_mlm] = offset + masked_lm_positions[:mlm_len]
324
+ packed_masked_lm_ids[mlm_offset:max_mlm] = masked_lm_ids[:mlm_len]
325
+ packed_masked_lm_weights[mlm_offset:max_mlm] = sequence_index
326
+ # NSP
327
+ packed_next_sentence_positions[sequence_index - 1] = offset
328
+ packed_next_sentence_labels[sequence_index - 1] = next_sentence_labels
329
+ packed_next_sentence_weights[sequence_index - 1] = 1
330
+
331
+ # Update offsets
332
+ sequence_index += 1
333
+ offset += seq_len
334
+ mlm_offset = max_mlm
335
+ input_ids = None; input_mask = None; segment_ids = None; masked_lm_positions = None;
336
+ masked_lm_ids = None; masked_lm_weights = None; next_sentence_labels = None; seq_len = None;
337
+ # Pack into binary format and write it
338
+
339
+ features = OrderedDict()
340
+
341
+ features["input_ids"] = create_int_feature(packed_input_ids)
342
+ features["input_mask"] = create_int_feature(packed_input_mask)
343
+ features["segment_ids"] = create_int_feature(packed_segment_ids)
344
+ features["positions"] = create_int_feature(packed_positions)
345
+ features["masked_lm_positions"] = create_int_feature(packed_masked_lm_positions)
346
+ features["masked_lm_ids"] = create_int_feature(packed_masked_lm_ids)
347
+ features["masked_lm_weights"] = create_float_feature(packed_masked_lm_weights)
348
+ features["next_sentence_positions"] = create_int_feature(packed_next_sentence_positions)
349
+ features["next_sentence_labels"] = create_int_feature(packed_next_sentence_labels)
350
+ features["next_sentence_weights"] = create_float_feature(packed_next_sentence_weights)
351
+ del packed_input_ids; del packed_input_mask; del packed_segment_ids; del packed_positions; del packed_masked_lm_positions
352
+ del packed_masked_lm_weights; del packed_next_sentence_positions; del packed_next_sentence_labels; del packed_next_sentence_weights
353
+
354
+ return features
355
+
356
+ def create_bytes_feature(value):
357
+ """Returns a bytes_list from a string / byte."""
358
+ if isinstance(value, type(tf.constant(0))):
359
+ value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
360
+ return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
361
+
362
+ def create_int_feature(values):
363
+ feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
364
+ return feature
365
+
366
+ def create_float_feature(values):
367
+ feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
368
+ return feature
369
+
370
+ def total_size(o, handlers={}, verbose=False):
371
+ """ Returns the approximate memory footprint an object and all of its contents.
372
+
373
+ Automatically finds the contents of the following builtin containers and
374
+ their subclasses: tuple, list, deque, dict, set and frozenset.
375
+ To search other containers, add handlers to iterate over their contents:
376
+
377
+ handlers = {SomeContainerClass: iter,
378
+ OtherContainerClass: OtherContainerClass.get_elements}
379
+
380
+ """
381
+ dict_handler = lambda d: chain.from_iterable(d.items())
382
+ all_handlers = {tuple: iter,
383
+ list: iter,
384
+ deque: iter,
385
+ dict: dict_handler,
386
+ set: iter,
387
+ frozenset: iter,
388
+ }
389
+ all_handlers.update(handlers) # user handlers take precedence
390
+ seen = set() # track which object id's have already been seen
391
+ default_size = getsizeof(0) # estimate sizeof object without __sizeof__
392
+
393
+ def sizeof(o):
394
+ if id(o) in seen: # do not double count the same object
395
+ return 0
396
+ seen.add(id(o))
397
+ s = getsizeof(o, default_size)
398
+
399
+ if verbose:
400
+ print(s, type(o), repr(o), file=stderr)
401
+
402
+ for typ, handler in all_handlers.items():
403
+ if isinstance(o, typ):
404
+ s += sum(map(sizeof, handler(o)))
405
+ break
406
+ return s
407
+
408
+ return sizeof(o)
409
+ def compress_zeros(input):
410
+ return input[0:np.where(input)[0][-1]+1]
411
+
412
+ def decompress_zeros(input,list_size):
413
+ output = np.zeros(list_size)
414
+ output[0:len(input)]=input
415
+ return output
416
+
417
+ def compress_seg_ids(segment_ids):
418
+ tmp=np.where(segment_ids)[0]
419
+ return np.array([tmp[0],tmp[-1]-tmp[0]])
420
+
421
+ def decompress_seg_ids(segment_ids):
422
+ output = np.zeros(512)
423
+ output[segment_ids[0],segment_ids[0]+segment_ids[1]]=1
424
+ return output
425
+
426
+ def getCurrentMemoryUsage():
427
+ # Getting all memory using os.popen()
428
+ total_memory, used_memory, free_memory = map(
429
+ int, os.popen('free -t -m').readlines()[-1].split()[1:])
430
+
431
+ # Memory usage
432
+ print("RAM memory % used:", round((used_memory/total_memory) * 100, 2))
433
+ return used_memory/total_memory
434
+
435
+ def parallel_record_loader(record):
436
+ example = tf.train.Example()
437
+ example.ParseFromString(record.numpy())
438
+ im_length = sum(example.features.feature['input_mask'].int64_list.value)
439
+ return record, im_length
440
+
441
+
442
+ def parallel_data_loader(path,filename):
443
+ sequence_lengths_part = []
444
+ examples_by_length_part = defaultdict(list)
445
+ for record in tf.data.TFRecordDataset(path+filename):
446
+ example = tf.train.Example()
447
+ example.ParseFromString(record.numpy())
448
+ im_length = sum(example.features.feature['input_mask'].int64_list.value)
449
+ examples_by_length_part[im_length].append(record)
450
+ sequence_lengths_part.append(im_length)
451
+ del example
452
+ return sequence_lengths_part,examples_by_length_part
453
+
454
+ if __name__ == "__main__":
455
+ tf.compat.v1.enable_eager_execution()
456
+ parser = argparse.ArgumentParser()
457
+ parser.add_argument("--input-glob", help="A glob expression for the input files to read in and pack", required=True, type=str)
458
+ parser.add_argument("--output-dir", help="The destination folder for the output files", required=True)
459
+ parser.add_argument("--max-files", help="At most how many files to process (limited by RAM)", default=100,type=int)
460
+ parser.add_argument("--duplication-factor", help="Same as the one passed to create input data", default=1, type=int)
461
+ parser.add_argument("--max-sequence-length", help="The maximum number of tokens in an example", default=512, type=int)
462
+ parser.add_argument("--max-predictions-per-sequence", help="The maximum number of masked tokens in an un-packed example", default=76, type=int)
463
+ parser.add_argument("--max-sequences-per-pack", help="The maximum number of sequences per packed example.", choices=[2, 3], default=3, type=int)
464
+ args = parser.parse_args()
465
+
466
+ logger = tf.get_logger()
467
+ logger.propagate = False
468
+
469
+ if not os.path.exists(args.output_dir):
470
+ logger.warning(
471
+ f"Output directory: {args.output_dir} does not exists, creating..."
472
+ )
473
+ try:
474
+ os.makedirs(args.output_dir, exist_ok=True)
475
+ except IOError as error:
476
+ logger.error(error)
477
+ raise
478
+
479
+ # Input files
480
+ print("Looping through dataset to collect sequence length information...")
481
+ input_files = np.random.choice(os.listdir(args.input_glob), size=args.max_files, replace=False)
482
+ sequence_lengths = []
483
+ examples_by_length = defaultdict(list)
484
+
485
+ with ProcessPoolExecutor(25) as executor:
486
+ work = repeat(args.input_glob), input_files.tolist()
487
+ for sequence_lengths_part,examples_by_length_part in executor.map(parallel_data_loader, *work):
488
+ pass
489
+ sequence_lengths += sequence_lengths_part
490
+ examples_by_length = { key:examples_by_length.get(key,[])+examples_by_length_part.get(key,[]) for key in set(list(examples_by_length.keys())+list(examples_by_length_part.keys())) }
491
+ del examples_by_length_part
492
+ sequence_lengths_part=None; examples_by_length_part=None
493
+ sequence_lengths = np.array(sequence_lengths)
494
+ print('Done extracting sequance length !!!')
495
+ del executor
496
+ gc.collect()
497
+ # Pass the array of sequence lengths to the packing algorithm
498
+ strategy_set, mixture, padding, slicing = get_packing_recipe(args.output_dir, sequence_lengths, args.max_sequence_length, args.max_sequences_per_pack)
499
+ print('Done get_packing_recipe !!!')
500
+ # Add the calculated padding
501
+ for i in range(1, args.max_sequence_length + 1):
502
+ if i not in examples_by_length.keys():
503
+ examples_by_length[i]=[]
504
+ examples_by_length[i].extend([None] * int(padding[i - 1]))
505
+
506
+ # Shuffle the data
507
+ for key in examples_by_length:
508
+ random.shuffle(examples_by_length[key])
509
+
510
+ # Pack and store the data
511
+ print(f"\nPacking and writing packed dataset to {args.output_dir}.")
512
+
513
+ # Slice the data into chunks of max 50k packed examples
514
+ example_slices, strategies, part_idx = slice_examples(examples_by_length, slicing, strategy_set, mixture)
515
+ gc.collect()
516
+ print('Done slice_examples !!!')
517
+ del examples_by_length; del slicing; del strategy_set; del mixture
518
+ gc.collect()
519
+ start = time.time()
520
+ print(f"Splitting work into {len(part_idx)} parts.")
521
+ for rr in range(1+len(strategies)//500):
522
+ str_idx,stp_idx=rr*500,min((rr+1)*500,len(strategies))
523
+ part_idx_prt, strategies_prt, example_slices_prt = part_idx[str_idx:stp_idx], strategies[str_idx:stp_idx], example_slices[str_idx:stp_idx]
524
+ with ProcessPoolExecutor(25) as executor:
525
+ work = repeat(args), part_idx_prt, strategies_prt, example_slices_prt
526
+ for partial_result in executor.map(parallel_pack_according_to_strategy, *work):
527
+ pass
528
+ del work
529
+ print(f"\nDone. Took: {time.time() - start:3.2f} seconds to pack and write dataset.")
530
+ print('-------------',str_idx,stp_idx)
531
+ print('Done Cleaning')
docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_dataset.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+
5
+ import os
6
+ from pathlib import Path
7
+ import sys
8
+ import socket
9
+ import subprocess
10
+
11
+ def download_dataset_r(dataset_path):
12
+ host_name = socket.gethostname()
13
+ try:
14
+ if not os.path.isdir(dataset_path):
15
+ print(f"{host_name}: *** Downloading dataset...\n\n")
16
+ os.makedirs(dataset_path, exist_ok=True)
17
+ download_script = Path(__file__).parent.joinpath("download_glue_data.py")
18
+ sys.stdout.flush()
19
+ sys.stderr.flush()
20
+ with subprocess.Popen(f"{sys.executable} {str(download_script)} --data_dir {dataset_path} --tasks MRPC", shell=True, executable='/bin/bash') as proc:
21
+ proc.wait()
22
+ except Exception as exc:
23
+ raise Exception(f"{host_name}: Error in {__file__} download_dataset_r({dataset_path})") from exc
24
+
25
+ if __name__ == "__main__":
26
+ host_name = socket.gethostname()
27
+ print(f"{host_name}: In {sys.argv[0]}")
28
+ print(f"{host_name}: called with arguments: \"{sys.argv[1]}\"")
29
+ dataset_path = sys.argv[1]
30
+ print(f"{host_name}: MULTI_HLS_IPS = {os.environ.get('MULTI_HLS_IPS')}")
31
+ download_dataset_r(dataset_path)
docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_glue_data.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ''' Script for downloading all GLUE data.
2
+
3
+ Note: for legal reasons, we are unable to host MRPC.
4
+ You can either use the version hosted by the SentEval team, which is already tokenized,
5
+ or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually.
6
+ For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example).
7
+ You should then rename and place specific files in a folder (see below for an example).
8
+
9
+ mkdir MRPC
10
+ cabextract MSRParaphraseCorpus.msi -d MRPC
11
+ cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt
12
+ cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt
13
+ rm MRPC/_*
14
+ rm MSRParaphraseCorpus.msi
15
+
16
+ 1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now.
17
+ 2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray!
18
+ '''
19
+
20
+ import os
21
+ import sys
22
+ import shutil
23
+ import argparse
24
+ import tempfile
25
+ import urllib.request
26
+ import zipfile
27
+
28
+ TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"]
29
+ TASK2PATH = {"CoLA":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4',
30
+ "SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8',
31
+ "MRPC":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc',
32
+ "QQP":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5',
33
+ "STS":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5',
34
+ "MNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce',
35
+ "SNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df',
36
+ "QNLI": 'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601',
37
+ "RTE":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb',
38
+ "WNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf',
39
+ "diagnostic":'https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D'}
40
+
41
+ MRPC_TRAIN = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt'
42
+ MRPC_TEST = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt'
43
+
44
+ def download_and_extract(task, data_dir):
45
+ print("Downloading and extracting %s..." % task)
46
+ data_file = os.path.join(tempfile.gettempdir(), "%s.zip" % task)
47
+ urllib.request.urlretrieve(TASK2PATH[task], data_file)
48
+ with zipfile.ZipFile(data_file) as zip_ref:
49
+ zip_ref.extractall(data_dir)
50
+ os.remove(data_file)
51
+ print("\tCompleted!")
52
+
53
+ def format_mrpc(data_dir, path_to_data):
54
+ print("Processing MRPC...")
55
+ mrpc_dir = os.path.join(data_dir, "MRPC")
56
+ if not os.path.isdir(mrpc_dir):
57
+ os.mkdir(mrpc_dir)
58
+ if path_to_data:
59
+ mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt")
60
+ mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt")
61
+ else:
62
+ print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN)
63
+ mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
64
+ mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
65
+ urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
66
+ urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
67
+ assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file
68
+ assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file
69
+ urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv"))
70
+
71
+ dev_ids = []
72
+ with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh:
73
+ for row in ids_fh:
74
+ dev_ids.append(row.strip().split('\t'))
75
+
76
+ with open(mrpc_train_file, encoding="utf8") as data_fh, \
77
+ open(os.path.join(mrpc_dir, "train.tsv"), 'w', encoding="utf8") as train_fh, \
78
+ open(os.path.join(mrpc_dir, "dev.tsv"), 'w', encoding="utf8") as dev_fh:
79
+ header = data_fh.readline()
80
+ train_fh.write(header)
81
+ dev_fh.write(header)
82
+ for row in data_fh:
83
+ label, id1, id2, s1, s2 = row.strip().split('\t')
84
+ if [id1, id2] in dev_ids:
85
+ dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
86
+ else:
87
+ train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
88
+
89
+ with open(mrpc_test_file, encoding="utf8") as data_fh, \
90
+ open(os.path.join(mrpc_dir, "test.tsv"), 'w', encoding="utf8") as test_fh:
91
+ header = data_fh.readline()
92
+ test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
93
+ for idx, row in enumerate(data_fh):
94
+ label, id1, id2, s1, s2 = row.strip().split('\t')
95
+ test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
96
+ print("\tCompleted!")
97
+
98
+ def download_diagnostic(data_dir):
99
+ print("Downloading and extracting diagnostic...")
100
+ if not os.path.isdir(os.path.join(data_dir, "diagnostic")):
101
+ os.mkdir(os.path.join(data_dir, "diagnostic"))
102
+ data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv")
103
+ urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file)
104
+ print("\tCompleted!")
105
+ return
106
+
107
+ def get_tasks(task_names):
108
+ task_names = task_names.split(',')
109
+ if "all" in task_names:
110
+ tasks = TASKS
111
+ else:
112
+ tasks = []
113
+ for task_name in task_names:
114
+ assert task_name in TASKS, "Task %s not found!" % task_name
115
+ tasks.append(task_name)
116
+ return tasks
117
+
118
+ def main(arguments):
119
+ parser = argparse.ArgumentParser()
120
+ parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data')
121
+ parser.add_argument('--tasks', help='tasks to download data for as a comma separated string',
122
+ type=str, default='all')
123
+ parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt',
124
+ type=str, default='')
125
+ args = parser.parse_args(arguments)
126
+
127
+ if not os.path.isdir(args.data_dir):
128
+ os.mkdir(args.data_dir)
129
+ tasks = get_tasks(args.tasks)
130
+
131
+ for task in tasks:
132
+ if task == 'MRPC':
133
+ format_mrpc(args.data_dir, args.path_to_mrpc)
134
+ elif task == 'diagnostic':
135
+ download_diagnostic(args.data_dir)
136
+ else:
137
+ download_and_extract(task, args.data_dir)
138
+
139
+
140
+ if __name__ == '__main__':
141
+ sys.exit(main(sys.argv[1:]))
docker/bloom13b/Model-References/TensorFlow/nlp/bert/download/download_pretrained_model.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+
5
+ import os
6
+ from pathlib import Path
7
+ import sys
8
+ import socket
9
+ import urllib.request
10
+ import zipfile
11
+ import subprocess
12
+
13
+
14
+ def run_cmd_as_subprocess(cmd=str):
15
+ print(cmd)
16
+ sys.stdout.flush()
17
+ sys.stderr.flush()
18
+ with subprocess.Popen(cmd, shell=True, executable='/bin/bash') as proc:
19
+ proc.wait()
20
+
21
+
22
+ def download_pretrained_model_r(pretrained_url, pretrained_model, flatten_archive=False):
23
+ host_name = socket.gethostname()
24
+ this_dir = os.getcwd()
25
+ try:
26
+ os.chdir(Path(__file__).parent.parent)
27
+ if not os.path.isdir(pretrained_model):
28
+ _wget = False
29
+ if os.path.exists(pretrained_model + ".zip") == False:
30
+ _wget = True
31
+ else:
32
+ if os.path.getsize(pretrained_model + ".zip") == 0:
33
+ print(f"{host_name}: *** Broken file, needs download ...\n\n")
34
+ _wget = True
35
+ if _wget == True:
36
+ print(f"{host_name}: *** Downloading pre-trained model...\n\n")
37
+ inf = urllib.request.urlopen(pretrained_url + pretrained_model + ".zip")
38
+ with open(pretrained_model + ".zip", "wb") as outf:
39
+ outf.write(inf.read())
40
+
41
+ print(f"{host_name}: *** Extracting pre-trained model...\n\n")
42
+ with zipfile.ZipFile(pretrained_model + ".zip", 'r') as zip_ref:
43
+ if flatten_archive:
44
+ # large model is zipped with subdirectory, flatten archive tree structure
45
+ for member in zip_ref.infolist():
46
+ # skip directories
47
+ if member.is_dir():
48
+ continue
49
+ zip_ref.extract(member)
50
+ else:
51
+ zip_ref.extractall(pretrained_model)
52
+
53
+ if _wget == True:
54
+ cmd = f"rm -f {pretrained_model}.zip"
55
+ run_cmd_as_subprocess(cmd)
56
+ else:
57
+ print(f"{host_name}: Reusing existing pre-trained model directory \'{pretrained_model}\'")
58
+ os.chdir(this_dir)
59
+ except Exception as exc:
60
+ os.chdir(this_dir)
61
+ raise Exception(f"{host_name}: Error in {__file__} download_pretrained_model()") from exc
62
+
63
+ if __name__ == "__main__":
64
+ host_name = socket.gethostname()
65
+ print(f"{host_name}: In {sys.argv[0]}")
66
+ print(f"{host_name}: called with arguments: \"{sys.argv[1]} {sys.argv[2]} {sys.argv[3]}\"")
67
+ pretrained_url = str(sys.argv[1])
68
+ pretrained_model = str(sys.argv[2])
69
+ flatten_archive_str = str(sys.argv[3])
70
+ if flatten_archive_str == "True":
71
+ flatten_archive = True
72
+ else:
73
+ flatten_archive = False
74
+ print(f"{host_name}: MULTI_HLS_IPS = {os.environ.get('MULTI_HLS_IPS')}")
75
+ download_pretrained_model_r(pretrained_url, pretrained_model, flatten_archive)
docker/bloom13b/Model-References/TensorFlow/nlp/bert/optimization.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
3
+ # Copyright 2018 The Google AI Language Team Authors.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Functions and classes related to optimization (weight updates)."""
18
+
19
+ from __future__ import absolute_import
20
+ from __future__ import division
21
+ from __future__ import print_function
22
+
23
+ import re
24
+ import tensorflow as tf
25
+ from tensorflow.python.ops import array_ops
26
+ from tensorflow.python.ops import linalg_ops
27
+ from tensorflow.python.ops import math_ops
28
+
29
+ try:
30
+ import horovod.tensorflow as hvd
31
+ except ImportError:
32
+ hvd = None
33
+
34
+ def horovod_enabled():
35
+ return hvd is not None and hvd.is_initialized()
36
+
37
+ def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, manual_fp16=False, use_fp16=False, num_accumulation_steps=1,
38
+ optimizer_type="adam", allreduce_post_accumulation=False, init_loss_scale=2**32, use_tpu=False):
39
+ """Creates an optimizer training op."""
40
+ global_step = tf.compat.v1.train.get_or_create_global_step()
41
+
42
+ # avoid step change in learning rate at end of warmup phase
43
+ if optimizer_type == "adam":
44
+ power = 1.0
45
+ decayed_learning_rate_at_crossover_point = init_lr * (
46
+ (1.0 - float(num_warmup_steps) / float(num_train_steps)) ** power)
47
+ else:
48
+ power = 0.5
49
+ decayed_learning_rate_at_crossover_point = init_lr
50
+
51
+ adjusted_init_lr = init_lr * (init_lr / decayed_learning_rate_at_crossover_point)
52
+ print('decayed_learning_rate_at_crossover_point = %e, adjusted_init_lr = %e' %
53
+ (decayed_learning_rate_at_crossover_point, adjusted_init_lr))
54
+
55
+ learning_rate = tf.constant(value=adjusted_init_lr, shape=[], dtype=tf.float32)
56
+
57
+ # Implements linear decay of the learning rate.
58
+ learning_rate = tf.compat.v1.train.polynomial_decay(
59
+ learning_rate,
60
+ global_step,
61
+ num_train_steps,
62
+ end_learning_rate=0.0,
63
+ power=power,
64
+ cycle=False)
65
+
66
+ # Implements linear warmup. I.e., if global_step < num_warmup_steps, the
67
+ # learning rate will be `global_step/num_warmup_steps * init_lr`.
68
+ if num_warmup_steps:
69
+ global_steps_int = tf.cast(global_step, tf.int32)
70
+ warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
71
+
72
+ global_steps_float = tf.cast(global_steps_int, tf.float32)
73
+ warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
74
+
75
+ warmup_percent_done = global_steps_float / warmup_steps_float
76
+ warmup_learning_rate = init_lr * warmup_percent_done
77
+
78
+ is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
79
+ learning_rate = (
80
+ (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
81
+
82
+ if optimizer_type == "lamb":
83
+ print("Initializing LAMB Optimizer")
84
+ optimizer = LAMBOptimizer(
85
+ learning_rate=learning_rate,
86
+ weight_decay_rate=0.01,
87
+ beta_1=0.9,
88
+ beta_2=0.999,
89
+ epsilon=1e-6,
90
+ exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
91
+ else:
92
+ print("Initializing ADAM Weight Decay Optimizer")
93
+ # It is recommended that you use this optimizer for fine tuning, since this
94
+ # is how the model was trained (note that the Adam m/v variables are NOT
95
+ # loaded from init_checkpoint.)
96
+ optimizer = AdamWeightDecayOptimizer(
97
+ learning_rate=learning_rate,
98
+ weight_decay_rate=0.01,
99
+ beta_1=0.9,
100
+ beta_2=0.999,
101
+ epsilon=1e-6,
102
+ exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
103
+
104
+ if horovod_enabled() and (num_accumulation_steps == 1 or (not allreduce_post_accumulation)):
105
+ optimizer = hvd.DistributedOptimizer(optimizer, sparse_as_dense=True)
106
+ if use_fp16:
107
+ loss_scaler = tf.train.experimental.DynamicLossScale(
108
+ initial_loss_scale=init_loss_scale, increment_period=1000, multiplier=2.0)
109
+ optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer, loss_scaler)
110
+ loss_scale_value = tf.identity(loss_scaler(), name="loss_scale")
111
+ if manual_fp16:
112
+ assert False, "No support for ExponentialUpdateLossScaleManager and LossScaleOptimizer in TF2.0"
113
+ loss_scale_manager = tf.contrib.mixed_precision.ExponentialUpdateLossScaleManager(init_loss_scale=init_loss_scale,
114
+ incr_every_n_steps=1000,
115
+ decr_every_n_nan_or_inf=2,
116
+ decr_ratio=0.5)
117
+ optimizer = tf.contrib.mixed_precision.LossScaleOptimizer(optimizer, loss_scale_manager)
118
+ if use_tpu:
119
+ optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
120
+ tvars = tf.compat.v1.trainable_variables()
121
+
122
+ if num_accumulation_steps > 1:
123
+ grads_and_vars = optimizer.compute_gradients(loss * 1.0 / num_accumulation_steps, tvars)
124
+ local_step = tf.compat.v1.get_variable(name="local_step", shape=[], dtype=tf.int32, trainable=False,
125
+ initializer=tf.compat.v1.zeros_initializer)
126
+ batch_finite = tf.compat.v1.get_variable(name="batch_finite", shape=[], dtype=tf.bool, trainable=False,
127
+ initializer=tf.compat.v1.ones_initializer)
128
+ accum_vars = [tf.compat.v1.get_variable(
129
+ name=tvar.name.split(":")[0] + "/accum",
130
+ shape=tvar.shape.as_list(),
131
+ dtype=tf.float32,
132
+ trainable=False,
133
+ initializer=tf.compat.v1.zeros_initializer()) for tvar in tf.compat.v1.trainable_variables()]
134
+
135
+ reset_step = tf.cast(tf.math.equal(local_step % num_accumulation_steps, 0), dtype=tf.bool)
136
+ local_step = tf.cond(pred=reset_step, true_fn=lambda: local_step.assign(
137
+ tf.ones_like(local_step)), false_fn=lambda: local_step.assign_add(1))
138
+
139
+ grads_and_vars_and_accums = [(gv[0], gv[1], accum_vars[i])
140
+ for i, gv in enumerate(grads_and_vars) if gv[0] is not None]
141
+ grads, tvars, accum_vars = list(zip(*grads_and_vars_and_accums))
142
+
143
+ all_are_finite = tf.reduce_all(input_tensor=[tf.reduce_all(input_tensor=tf.math.is_finite(
144
+ g)) for g in grads]) if manual_fp16 or use_fp16 else tf.constant(True, dtype=tf.bool)
145
+ batch_finite = tf.cond(pred=reset_step,
146
+ true_fn=lambda: batch_finite.assign(tf.math.logical_and(
147
+ tf.constant(True, dtype=tf.bool), all_are_finite)),
148
+ false_fn=lambda: batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite)))
149
+
150
+ # This is how the model was pre-trained.
151
+ # ensure global norm is a finite number
152
+ # to prevent clip_by_global_norm from having a hizzy fit.
153
+ (clipped_grads, _) = tf.clip_by_global_norm(
154
+ grads, clip_norm=1.0,
155
+ use_norm=tf.cond(
156
+ pred=all_are_finite,
157
+ true_fn=lambda: tf.linalg.global_norm(grads),
158
+ false_fn=lambda: tf.constant(1.0)))
159
+
160
+ accum_vars = tf.cond(pred=reset_step,
161
+ true_fn=lambda: [accum_vars[i].assign(grad) for i, grad in enumerate(clipped_grads)],
162
+ false_fn=lambda: [accum_vars[i].assign_add(grad) for i, grad in enumerate(clipped_grads)])
163
+
164
+ update_step = tf.identity(tf.cast(tf.math.equal(local_step % num_accumulation_steps, 0),
165
+ dtype=tf.bool), name="update_step")
166
+
167
+ def allreduce_of_batch_finite_required():
168
+ # In case of bf16 and fp32 batch finite is tf.constant(True, dtype=tf.bool)
169
+ return horovod_enabled() and manual_fp16 and use_fp16
170
+
171
+ # TODO: in future if we want to enable infinite batch iter skiping we will need to change this allreduce.
172
+ new_global_step = tf.cond(pred=tf.math.logical_and(update_step,
173
+ tf.cast(hvd.allreduce(tf.cast(batch_finite, tf.int32)), tf.bool) if allreduce_of_batch_finite_required() else batch_finite),
174
+ true_fn=lambda: global_step + 1,
175
+ false_fn=lambda: global_step)
176
+ new_global_step = tf.identity(new_global_step, name='step_update')
177
+
178
+ def update(accum_vars):
179
+ with tf.control_dependencies([global_step.assign(new_global_step)]):
180
+ if allreduce_post_accumulation and horovod_enabled():
181
+ accum_vars = [hvd.allreduce(tf.convert_to_tensor(value=accum_var)) if isinstance(accum_var, tf.IndexedSlices)
182
+ else hvd.allreduce(accum_var) for accum_var in accum_vars]
183
+
184
+ return optimizer.apply_gradients(list(zip(accum_vars, tvars)), global_step=global_step)
185
+
186
+ train_op = tf.cond(pred=update_step,
187
+ true_fn=lambda: update(accum_vars), false_fn=lambda: tf.no_op())
188
+ else:
189
+ grads_and_vars = optimizer.compute_gradients(loss, tvars)
190
+ if horovod_enabled():
191
+ grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
192
+ grads, tvars = list(zip(*grads_and_vars))
193
+ else:
194
+ grads = tf.gradients(ys=loss, xs=tvars)
195
+ all_are_finite = tf.reduce_all(
196
+ input_tensor=[tf.reduce_all(input_tensor=tf.math.is_finite(g)) for g in grads]) if use_fp16 or manual_fp16 else tf.constant(True, dtype=tf.bool)
197
+
198
+ # This is how the model was pre-trained.
199
+ # ensure global norm is a finite number
200
+ # to prevent clip_by_global_norm from having a hizzy fit.
201
+ (clipped_grads, _) = tf.clip_by_global_norm(
202
+ grads, clip_norm=1.0,
203
+ use_norm=tf.cond(
204
+ pred=all_are_finite,
205
+ true_fn=lambda: tf.linalg.global_norm(grads),
206
+ false_fn=lambda: tf.constant(1.0)))
207
+
208
+ new_global_step = tf.cond(pred=all_are_finite, true_fn=lambda: global_step + 1, false_fn=lambda: global_step)
209
+ new_global_step = tf.identity(new_global_step, name='step_update')
210
+
211
+ with tf.control_dependencies([global_step.assign(new_global_step)]):
212
+ train_op = optimizer.apply_gradients(
213
+ list(zip(clipped_grads, tvars)), global_step=global_step)
214
+ return train_op
215
+
216
+
217
+ class AdamWeightDecayOptimizer(tf.compat.v1.train.Optimizer):
218
+ """A basic Adam optimizer that includes "correct" L2 weight decay."""
219
+
220
+ def __init__(self,
221
+ learning_rate,
222
+ weight_decay_rate=0.0,
223
+ beta_1=0.9,
224
+ beta_2=0.999,
225
+ epsilon=1e-6,
226
+ exclude_from_weight_decay=None,
227
+ name="AdamWeightDecayOptimizer"):
228
+ """Constructs a AdamWeightDecayOptimizer."""
229
+ super(AdamWeightDecayOptimizer, self).__init__(False, name)
230
+
231
+ self.learning_rate = tf.identity(learning_rate, name='learning_rate')
232
+ self.weight_decay_rate = weight_decay_rate
233
+ self.beta_1 = beta_1
234
+ self.beta_2 = beta_2
235
+ self.epsilon = epsilon
236
+ self.exclude_from_weight_decay = exclude_from_weight_decay
237
+
238
+ def apply_gradients(self, grads_and_vars, global_step=None, name=None,
239
+ manual_fp16=False):
240
+ """See base class."""
241
+ assignments = []
242
+ for (grad, param) in grads_and_vars:
243
+ if grad is None or param is None:
244
+ continue
245
+
246
+ param_name = self._get_variable_name(param.name)
247
+ has_shadow = manual_fp16 and param.dtype.base_dtype != tf.float32
248
+ if has_shadow:
249
+ # create shadow fp32 weights for fp16 variable
250
+ param_fp32 = tf.compat.v1.get_variable(
251
+ name=param_name + "/shadow",
252
+ dtype=tf.float32,
253
+ trainable=False,
254
+ initializer=tf.cast(param.initialized_value(), tf.float32))
255
+ else:
256
+ param_fp32 = param
257
+
258
+ m = tf.compat.v1.get_variable(
259
+ name=param_name + "/adam_m",
260
+ shape=param.shape.as_list(),
261
+ dtype=tf.float32,
262
+ trainable=False,
263
+ initializer=tf.compat.v1.zeros_initializer())
264
+ v = tf.compat.v1.get_variable(
265
+ name=param_name + "/adam_v",
266
+ shape=param.shape.as_list(),
267
+ dtype=tf.float32,
268
+ trainable=False,
269
+ initializer=tf.compat.v1.zeros_initializer())
270
+
271
+ # Standard Adam update.
272
+ next_m = (
273
+ tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
274
+ next_v = (
275
+ tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
276
+ tf.square(grad)))
277
+
278
+ update = next_m * tf.math.rsqrt(next_v + self.epsilon * self.epsilon)
279
+
280
+ # Just adding the square of the weights to the loss function is *not*
281
+ # the correct way of using L2 regularization/weight decay with Adam,
282
+ # since that will interact with the m and v parameters in strange ways.
283
+ #
284
+ # Instead we want to decay the weights in a manner that doesn't interact
285
+ # with the m/v parameters. This is equivalent to adding the square
286
+ # of the weights to the loss with plain (non-momentum) SGD.
287
+ if self._do_use_weight_decay(param_name):
288
+ update += self.weight_decay_rate * param_fp32
289
+
290
+ update_with_lr = self.learning_rate * update
291
+
292
+ next_param = param_fp32 - update_with_lr
293
+
294
+ if has_shadow:
295
+ # cast shadow fp32 weights to fp16 and assign to trainable variable
296
+ param.assign(tf.cast(next_param, param.dtype.base_dtype))
297
+ assignments.extend(
298
+ [param_fp32.assign(next_param),
299
+ m.assign(next_m),
300
+ v.assign(next_v)])
301
+ return tf.group(*assignments, name=name)
302
+
303
+ def _do_use_weight_decay(self, param_name):
304
+ """Whether to use L2 weight decay for `param_name`."""
305
+ if not self.weight_decay_rate:
306
+ return False
307
+ if self.exclude_from_weight_decay:
308
+ for r in self.exclude_from_weight_decay:
309
+ if re.search(r, param_name) is not None:
310
+ return False
311
+ return True
312
+
313
+ def _get_variable_name(self, param_name):
314
+ """Get the variable name from the tensor name."""
315
+ m = re.match("^(.*):\\d+$", param_name)
316
+ if m is not None:
317
+ param_name = m.group(1)
318
+ return param_name
319
+
320
+ # This code originally was a WA for this issue:
321
+ # See: https://jira.habana-labs.com/browse/SW-19371
322
+ # However, the root issue has been fixed and is no longer required.
323
+ #
324
+ # It turned out that this function needs to be uncommented to speed up the BERT finetuning training.
325
+ # See: https://jira.habana-labs.com/browse/SW-19126
326
+ #
327
+ # At this moment, enabling SAO leads to an immediate crash:
328
+ # See: https://jira.habana-labs.com/browse/SW-19688
329
+ #
330
+ def compute_gradients(self, loss, var_list=None,
331
+ gate_gradients=tf.compat.v1.train.Optimizer.GATE_OP,
332
+ aggregation_method=None,
333
+ colocate_gradients_with_ops=False,
334
+ grad_loss=None):
335
+ assert gate_gradients == tf.compat.v1.train.Optimizer.GATE_OP
336
+ assert aggregation_method is None
337
+ assert not colocate_gradients_with_ops
338
+ assert grad_loss is None
339
+
340
+ grads = tf.gradients(ys=loss, xs=var_list)
341
+ grads_and_vars = list(zip(grads, var_list))
342
+ return grads_and_vars
343
+
344
+
345
+ class LAMBOptimizer(tf.compat.v1.train.Optimizer):
346
+ """A LAMB optimizer that includes "correct" L2 weight decay."""
347
+
348
+ def __init__(self,
349
+ learning_rate,
350
+ weight_decay_rate=0.0,
351
+ beta_1=0.9,
352
+ beta_2=0.999,
353
+ epsilon=1e-6,
354
+ exclude_from_weight_decay=None,
355
+ name="LAMBOptimizer"):
356
+ """Constructs a LAMBOptimizer."""
357
+ super(LAMBOptimizer, self).__init__(False, name)
358
+
359
+ self.learning_rate = tf.identity(learning_rate, name='learning_rate')
360
+ self.weight_decay_rate = weight_decay_rate
361
+ self.beta_1 = beta_1
362
+ self.beta_2 = beta_2
363
+ self.epsilon = epsilon
364
+ self.exclude_from_weight_decay = exclude_from_weight_decay
365
+
366
+ def apply_gradients(self, grads_and_vars, global_step, name=None,
367
+ manual_fp16=False):
368
+ """See base class."""
369
+ assignments = []
370
+ steps = tf.cast(global_step, tf.float32)
371
+ for (grad, param) in grads_and_vars:
372
+ if grad is None or param is None:
373
+ continue
374
+
375
+ param_name = self._get_variable_name(param.name)
376
+ has_shadow = manual_fp16 and param.dtype.base_dtype != tf.float32
377
+ if has_shadow:
378
+ # create shadow fp32 weights for fp16 variable
379
+ param_fp32 = tf.compat.v1.get_variable(
380
+ name=param_name + "/shadow",
381
+ dtype=tf.float32,
382
+ trainable=False,
383
+ initializer=tf.cast(param.initialized_value(), tf.float32))
384
+ else:
385
+ param_fp32 = param
386
+
387
+ m = tf.compat.v1.get_variable(
388
+ name=param_name + "/adam_m",
389
+ shape=param.shape.as_list(),
390
+ dtype=tf.float32,
391
+ trainable=False,
392
+ initializer=tf.compat.v1.zeros_initializer())
393
+ v = tf.compat.v1.get_variable(
394
+ name=param_name + "/adam_v",
395
+ shape=param.shape.as_list(),
396
+ dtype=tf.float32,
397
+ trainable=False,
398
+ initializer=tf.compat.v1.zeros_initializer())
399
+
400
+ # LAMB update
401
+ next_m = (
402
+ tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
403
+ next_v = (
404
+ tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
405
+ tf.square(grad)))
406
+
407
+ beta1_correction = (1 - self.beta_1 ** steps)
408
+ beta2_correction = (1 - self.beta_2 ** steps)
409
+
410
+ next_m_unbiased = next_m / beta1_correction
411
+ next_v_unbiased = next_v / beta2_correction
412
+
413
+ update = next_m_unbiased / (tf.sqrt(next_v_unbiased) + self.epsilon)
414
+
415
+ # Just adding the square of the weights to the loss function is *not*
416
+ # the correct way of using L2 regularization/weight decay with Adam,
417
+ # since that will interact with the m and v parameters in strange ways.
418
+ #
419
+ # Instead we want to decay the weights in a manner that doesn't interact
420
+ # with the m/v parameters. This is equivalent to adding the square
421
+ # of the weights to the loss with plain (non-momentum) SGD.
422
+ if self._do_use_weight_decay(param_name):
423
+ update += self.weight_decay_rate * param_fp32
424
+
425
+ w_norm = linalg_ops.norm(param, ord=2)
426
+ g_norm = linalg_ops.norm(update, ord=2)
427
+ ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(
428
+ math_ops.greater(g_norm, 0), (w_norm / g_norm), 1.0), 1.0)
429
+
430
+ update_with_lr = ratio * self.learning_rate * update
431
+
432
+ next_param = param_fp32 - update_with_lr
433
+
434
+ if has_shadow:
435
+ # cast shadow fp32 weights to fp16 and assign to trainable variable
436
+ param.assign(tf.cast(next_param, param.dtype.base_dtype))
437
+ assignments.extend(
438
+ [param_fp32.assign(next_param),
439
+ m.assign(next_m),
440
+ v.assign(next_v)])
441
+ return tf.group(*assignments, name=name)
442
+
443
+ def _do_use_weight_decay(self, param_name):
444
+ """Whether to use L2 weight decay for `param_name`."""
445
+ if not self.weight_decay_rate:
446
+ return False
447
+ if self.exclude_from_weight_decay:
448
+ for r in self.exclude_from_weight_decay:
449
+ if re.search(r, param_name) is not None:
450
+ return False
451
+ return True
452
+
453
+ def _get_variable_name(self, param_name):
454
+ """Get the variable name from the tensor name."""
455
+ m = re.match("^(.*):\\d+$", param_name)
456
+ if m is not None:
457
+ param_name = m.group(1)
458
+ return param_name
docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/fused_layer_norm.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import collections
18
+ import copy
19
+ import json
20
+ import math
21
+ import re
22
+ import six
23
+ import tensorflow as tf
24
+
25
+ from tensorflow.python.framework import ops
26
+ from tensorflow.contrib.layers.python.layers import utils
27
+ from tensorflow.contrib.framework.python.ops import variables
28
+ from tensorflow.python.ops import init_ops
29
+ import numpy
30
+ from tensorflow.python.ops import array_ops
31
+ from tensorflow.python.framework import dtypes
32
+ from tensorflow.python.ops import nn
33
+
34
+ def fused_layer_norm(inputs,
35
+ center=True,
36
+ scale=True,
37
+ activation_fn=None,
38
+ reuse=None,
39
+ variables_collections=None,
40
+ outputs_collections=None,
41
+ trainable=True,
42
+ begin_norm_axis=1,
43
+ begin_params_axis=-1,
44
+ scope=None,
45
+ use_fused_batch_norm=False):
46
+ with tf.compat.v1.variable_scope(
47
+ scope, 'LayerNorm', [inputs], reuse=reuse) as sc:
48
+ inputs = ops.convert_to_tensor(inputs)
49
+ inputs_shape = inputs.shape
50
+ inputs_rank = inputs_shape.ndims
51
+ if inputs_rank is None:
52
+ raise ValueError('Inputs %s has undefined rank.' % inputs.name)
53
+ dtype = inputs.dtype.base_dtype
54
+ if begin_norm_axis < 0:
55
+ begin_norm_axis = inputs_rank + begin_norm_axis
56
+ if begin_params_axis >= inputs_rank or begin_norm_axis >= inputs_rank:
57
+ raise ValueError('begin_params_axis (%d) and begin_norm_axis (%d) '
58
+ 'must be < rank(inputs) (%d)' %
59
+ (begin_params_axis, begin_norm_axis, inputs_rank))
60
+ params_shape = inputs_shape[begin_params_axis:]
61
+ if not params_shape.is_fully_defined():
62
+ raise ValueError(
63
+ 'Inputs %s: shape(inputs)[%s:] is not fully defined: %s' %
64
+ (inputs.name, begin_params_axis, inputs_shape))
65
+ # Allocate parameters for the beta and gamma of the normalization.
66
+ beta, gamma = None, None
67
+ if center:
68
+ beta_collections = utils.get_variable_collections(variables_collections,
69
+ 'beta')
70
+ beta = variables.model_variable(
71
+ 'beta',
72
+ shape=params_shape,
73
+ dtype=dtype,
74
+ initializer=init_ops.zeros_initializer(),
75
+ collections=beta_collections,
76
+ trainable=trainable)
77
+ if scale:
78
+ gamma_collections = utils.get_variable_collections(
79
+ variables_collections, 'gamma')
80
+ gamma = variables.model_variable(
81
+ 'gamma',
82
+ shape=params_shape,
83
+ dtype=dtype,
84
+ initializer=init_ops.ones_initializer(),
85
+ collections=gamma_collections,
86
+ trainable=trainable)
87
+ if use_fused_batch_norm:
88
+ # get static TensorShape if fully defined,
89
+ # otherwise retrieve shape tensor
90
+ norm_shape = inputs.shape[begin_norm_axis:]
91
+ if norm_shape.is_fully_defined():
92
+ bn_shape = [1, -1, 1, numpy.prod(norm_shape.as_list())]
93
+ else:
94
+ norm_shape = tf.shape(input=inputs)[begin_norm_axis:]
95
+ bn_shape = [1, -1, 1, tf.reduce_prod(input_tensor=norm_shape)]
96
+ if inputs.get_shape().is_fully_defined():
97
+ outputs_shape = inputs.get_shape()
98
+ else:
99
+ outputs_shape = tf.shape(input=inputs)
100
+ inputs = array_ops.reshape(inputs, bn_shape)
101
+ if inputs.get_shape().is_fully_defined():
102
+ # static inputs TensorShape fully defined after reshape.
103
+ ones = array_ops.ones(inputs.get_shape()[1], dtype=dtypes.float32)
104
+ zeros = array_ops.zeros(inputs.get_shape()[1], dtype=dtypes.float32)
105
+ else:
106
+ # static inputs TensorShape NOT fully defined after reshape.
107
+ # must use dynamic shape, which means these input tensors
108
+ # have to be created at runtime, which causes a slowdown.
109
+ scale_shape = tf.shape(input=inputs)[1]
110
+ ones = array_ops.ones(scale_shape, dtype=dtypes.float32)
111
+ zeros = array_ops.zeros(scale_shape, dtype=dtypes.float32)
112
+ outputs, mean, variance = nn.fused_batch_norm(
113
+ inputs,
114
+ ones, zeros,
115
+ epsilon=1e-4,
116
+ data_format="NCHW")
117
+ outputs = array_ops.reshape(outputs, outputs_shape)
118
+ if center and scale:
119
+ outputs = outputs * gamma + beta
120
+ elif center:
121
+ outputs = outputs + beta
122
+ elif scale:
123
+ outputs = outputs * gamma
124
+ else:
125
+ # Calculate the moments on the last axis (layer activations).
126
+ norm_axes = list(range(begin_norm_axis, inputs_rank))
127
+ mean, variance = nn.moments(inputs, norm_axes, keep_dims=True)
128
+ # Compute layer normalization using the batch_normalization function.
129
+ variance_epsilon = 1e-4
130
+ outputs = nn.batch_normalization(
131
+ inputs,
132
+ mean,
133
+ variance,
134
+ offset=beta,
135
+ scale=gamma,
136
+ variance_epsilon=variance_epsilon)
137
+ outputs.set_shape(inputs_shape)
138
+ if activation_fn is not None:
139
+ outputs = activation_fn(outputs)
140
+ return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
141
+
docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/gpu_environment.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import tensorflow as tf
17
+ import numpy as np
18
+
19
+ def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
20
+ initializer=None, regularizer=None,
21
+ trainable=True,
22
+ *args, **kwargs):
23
+ """Custom variable getter that forces trainable variables to be stored in
24
+ float32 precision and then casts them to the training precision.
25
+ """
26
+ storage_dtype = tf.float32 if trainable else dtype
27
+ variable = getter(name, shape, dtype=storage_dtype,
28
+ initializer=initializer, regularizer=regularizer,
29
+ trainable=trainable,
30
+ *args, **kwargs)
31
+ if trainable and dtype != tf.float32:
32
+ variable = tf.cast(variable, dtype)
33
+ return variable
34
+
35
+ def get_custom_getter(compute_type):
36
+ return float32_variable_storage_getter if compute_type == tf.float16 else None
docker/bloom13b/Model-References/TensorFlow/nlp/bert/utils/utils.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+
14
+ import tensorflow as tf
15
+ import time
16
+
17
+ # report latency and throughput during eval
18
+ class LogEvalRunHook(tf.estimator.SessionRunHook):
19
+ def __init__(self, global_batch_size, hvd_rank=-1):
20
+ self.global_batch_size = global_batch_size
21
+ self.hvd_rank = hvd_rank
22
+ self.count = 0
23
+ self.time_list = []
24
+
25
+ def before_run(self, run_context):
26
+ self.t0 = time.time()
27
+
28
+ def after_run(self, run_context, run_values):
29
+ elapsed_secs = time.time() - self.t0
30
+ self.count += 1
31
+ self.time_list.append(elapsed_secs)
32
+
33
+ # report throughput during training
34
+ class LogTrainRunHook(tf.estimator.SessionRunHook):
35
+ def __init__(self, global_batch_size, hvd_rank=-1, save_checkpoints_steps=1000, num_steps_ignore_xla=100):
36
+ self.global_batch_size = global_batch_size
37
+ self.hvd_rank = hvd_rank
38
+ self.save_checkpoints_steps = save_checkpoints_steps
39
+
40
+ self.total_time = 0.0
41
+ self.count = 0 # Holds number of iterations, including skipped iterations for fp16 loss scaling
42
+ self.skipped = 0
43
+ self.num_steps_ignore_xla = num_steps_ignore_xla
44
+ #initial steps while xla is still compilingneed to be ignored from throughput computation
45
+
46
+ def after_create_session(self, session, coord):
47
+ self.init_global_step = session.run(tf.compat.v1.train.get_global_step())
48
+
49
+ def before_run(self, run_context):
50
+ self.t0 = time.time()
51
+ return tf.estimator.SessionRunArgs(
52
+ fetches=['step_update:0'])
53
+
54
+ def after_run(self, run_context, run_values):
55
+ elapsed_secs = time.time() - self.t0
56
+ self.global_step = run_values.results[0]
57
+ self.count += 1
58
+
59
+ # Removing first 100 step + first five steps after every checkpoint save
60
+ if (self.global_step - self.init_global_step) <= self.num_steps_ignore_xla or (self.global_step - self.init_global_step) % self.save_checkpoints_steps < 5:
61
+ print("Skipping time record for ", self.global_step, " due to checkpoint-saving/warmup overhead")
62
+ self.skipped += 1
63
+ else:
64
+ self.total_time += elapsed_secs
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2021 Habana Labs, Ltd. an Intel Company
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright [yyyy] [name of copyright owner]
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/NOTICE ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
2
+
3
+ This repository includes software from:
4
+ * Tensor2Tensor, (https://github.com/tensorflow/tensor2tensor) licensed
5
+ under the Apache License, Version 2.0
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/README.md ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Transformer for TensorFlow
2
+
3
+ This repository provides a script and recipe to train the Transformer model for Tensorflow on Intel® Gaudi® AI Accelerator. For further information on performance, refer to [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-training-models/#performance).
4
+
5
+ For more information on training deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/).
6
+
7
+ ## Table of Contents
8
+
9
+ * [Model-References](../../../README.md)
10
+ * [Model Overview](#model-overview)
11
+ * [Setup](#setup)
12
+ * [Training and Examples](#training-and-examples)
13
+ * [Evaluating BLEU Score](#evaluating-bleu-score)
14
+ * [Profile](#profile)
15
+ * [Supported Configuration](#supported-configuration)
16
+ * [Changelog](#changelog)
17
+ * [Known Issues](#known-issues)
18
+
19
+ ## Model Overview
20
+ The Transformer is a Neural Machine Translation (NMT) model which uses attention mechanism to boost training speed and overall accuracy.
21
+ The model was initially introduced in [Attention Is All You Need](https://arxiv.org/abs/1706.03762).
22
+ This implementation is based on [Tensor2Tensor](https://github.com/tensorflow/tensor2tensor) implementation (authors: Google Inc., Artit Wangperawong).
23
+
24
+ There are three model variants available: tiny, base and big.
25
+
26
+ ### Model Architecture
27
+ The Transformer model uses standard NMT encoder-decoder architecture. Unlike other NMT models, Transformer model does not use recurrent connections and operates on fixed size context window.
28
+ The encoder stack is made up of N identical layers. Each layer is composed of the following sub-layers:
29
+ - Self-attention layer
30
+ - Feedforward network (which is 2 fully-connected layers)
31
+
32
+ The decoder stack is also made up of N identical layers. Each layer is composed of the sub-layers:
33
+ - Self-attention layer
34
+ - Multi-headed attention layer combining encoder outputs with results from the previous self-attention layer.
35
+ - Feedforward network (2 fully-connected layers)
36
+
37
+ The encoder uses self-attention to compute a representation of the input sequence. The decoder generates the output sequence one token at a time, taking the encoder output and previous decoder-outputted tokens as inputs.
38
+ The model also applies embeddings on the input and output tokens, and adds a constant positional encoding. The positional encoding adds information about the position of each token.
39
+
40
+ The complete description of the Transformer architecture can be found in [Attention Is All You Need](https://arxiv.org/abs/1706.03762) paper.
41
+
42
+ ## Setup
43
+ Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/GAUDI_Installation_Guide.html) to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/TensorFlow/Model_Optimization_TensorFlow/Optimization_Training_Platform.html).
44
+ The guides will walk you through the process of setting up your system to run the model on Gaudi.
45
+
46
+ ### Clone Habana Model-References
47
+ In the docker container, clone this repository and switch to the branch that matches your SynapseAI version. You can run the [`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) utility to determine the SynapseAI version.
48
+ ```bash
49
+ git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References /root/Model-References
50
+ ```
51
+
52
+ **Note:** If Model-References repository path is not in the PYTHONPATH, make sure you update it:
53
+ ```bash
54
+ export PYTHONPATH=$PYTHONPATH:/root/Model-References
55
+ ```
56
+ ### Download and Generate the Dataset
57
+
58
+ Go to the Transformer directory and generate the dataset. The following script will save the dataset to `/data/tensorflow/wmt32k_packed/train`:
59
+ ```bash
60
+ cd Model-References/TensorFlow/nlp/transformer/
61
+ $PYTHON datagen.py \
62
+ --data_dir=/data/tensorflow/wmt32k_packed/train \
63
+ --tmp_dir=/tmp/transformer_datagen \
64
+ --problem=translate_ende_wmt32k_packed \
65
+ --random_seed=429459
66
+ ```
67
+
68
+ ### Install Model Requirements
69
+
70
+ 1. In the docker container, go to the Transformer directory:
71
+ ```bash
72
+ cd /root/Model-References/TensorFlow/nlp/transformer
73
+ ```
74
+
75
+ 2. Install the required packages using pip:
76
+ ```bash
77
+ $PYTHON -m pip install -r requirements.txt
78
+ ```
79
+
80
+ ## Training and Examples
81
+
82
+ ### Single card and Multi-Card Training Examples
83
+
84
+ **NOTE:** All training examples for 1 HPU and 8 HPUs are valid both for first-gen Gaudi and Gaudi2.
85
+
86
+ **Run training on 1 HPU:**
87
+
88
+ ```bash
89
+ $PYTHON trainer.py \
90
+ --data_dir=<path_to_dataset>/train \
91
+ --problem=translate_ende_wmt32k_packed \
92
+ --model=transformer \
93
+ --hparams_set=transformer_<model_size> \
94
+ --hparams=batch_size=<batch_size> \
95
+ --output_dir=<path_to_output_dir> \
96
+ --local_eval_frequency=<eval_frequency> \
97
+ --train_steps=<train_steps> \
98
+ --schedule=train \
99
+ --use_hpu=True \
100
+ --use_bf16=<use_bf16>
101
+ ```
102
+
103
+ Run training on 1 HPU, batch size 4096, bfloat16, transformer_big, 300k steps with a checkpoint saved every 10k steps, last 10 checkpoints kept:
104
+
105
+ ```bash
106
+ $PYTHON trainer.py \
107
+ --data_dir=/data/tensorflow/wmt32k_packed/train/ \
108
+ --problem=translate_ende_wmt32k_packed \
109
+ --model=transformer \
110
+ --hparams_set=transformer_big \
111
+ --hparams=batch_size=4096 \
112
+ --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \
113
+ --local_eval_frequency=10000 \
114
+ --keep_checkpoint_max=10 \
115
+ --train_steps=300000 \
116
+ --schedule=train \
117
+ --use_hpu=True \
118
+ --use_bf16=True
119
+ ```
120
+
121
+ For Gaudi2, training batch size can be increased for better performance:
122
+ ```bash
123
+ $PYTHON trainer.py \
124
+ --data_dir=/data/tensorflow/wmt32k_packed/train/ \
125
+ --problem=translate_ende_wmt32k_packed \
126
+ --model=transformer \
127
+ --hparams_set=transformer_big \
128
+ --hparams=batch_size=16384,learning_rate_constant=5.0,learning_rate_warmup_steps=5000 \
129
+ --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs16384 \
130
+ --local_eval_frequency=2500 \
131
+ --keep_checkpoint_max=10 \
132
+ --train_steps=75000 \
133
+ --schedule=train \
134
+ --use_hpu=True \
135
+ --use_bf16=True
136
+ ```
137
+
138
+ **Run training on 8 HPUs:**
139
+
140
+ **NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/TensorFlow/Tensorflow_Scaling_Guide/Horovod_Scaling/index.html#mpirun-configuration).
141
+
142
+ Run training on 8 HPUs, global batch size 8 * 4096, bfloat16, transformer_big, 300k steps with a checkpoint saved every 10k steps, last 10 checkpoints kept, learning rate constant 2.5:
143
+
144
+ ```bash
145
+ mpirun \
146
+ --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \
147
+ --tag-output --merge-stderr-to-stdout \
148
+ $PYTHON trainer.py \
149
+ --data_dir=/data/tensorflow/wmt32k_packed/train/ \
150
+ --problem=translate_ende_wmt32k_packed \
151
+ --model=transformer \
152
+ --hparams_set=transformer_big \
153
+ --hparams=batch_size=4096,learning_rate_constant=2.5 \
154
+ --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \
155
+ --local_eval_frequency=10000 \
156
+ --keep_checkpoint_max=10 \
157
+ --train_steps=300000 \
158
+ --schedule=train \
159
+ --use_horovod=True \
160
+ --use_hpu=True \
161
+ --use_bf16=True
162
+ ```
163
+
164
+ For Gaudi2, training batch size can be increased for better performance:
165
+
166
+ ```bash
167
+ mpirun \
168
+ --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \
169
+ --tag-output --merge-stderr-to-stdout \
170
+ $PYTHON trainer.py \
171
+ --data_dir=/data/tensorflow/wmt32k_packed/train/ \
172
+ --problem=translate_ende_wmt32k_packed \
173
+ --model=transformer \
174
+ --hparams_set=transformer_big \
175
+ --hparams=batch_size=16384,learning_rate_constant=5.0,learning_rate_warmup_steps=5000 \
176
+ --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs16384 \
177
+ --local_eval_frequency=2500 \
178
+ --keep_checkpoint_max=10 \
179
+ --train_steps=75000 \
180
+ --schedule=train \
181
+ --use_horovod=True \
182
+ --use_hpu=True \
183
+ --use_bf16=True
184
+ ```
185
+
186
+ ### Multi-Server Training and Examples
187
+ To run training on multiple servers, make sure to set the `MULTI_HLS_IPS` environment
188
+ variable with the IPs of the used servers.
189
+
190
+ **NOTE:** Multi-server training is supported only on first-gen Gaudi.
191
+
192
+ **Run training on 16 HPUs:**
193
+ ```bash
194
+ export MULTI_HLS_IPS=192.10.100.174,10.10.100.101
195
+ mpirun \
196
+ --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \
197
+ --tag-output --merge-stderr-to-stdout \
198
+ $PYTHON trainer.py \
199
+ --data_dir=/data/tensorflow/wmt32k_packed/train/ \
200
+ --problem=translate_ende_wmt32k_packed \
201
+ --model=transformer \
202
+ --hparams_set=transformer_big \
203
+ --hparams=batch_size=4096,learning_rate_constant=3.0 \
204
+ --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \
205
+ --local_eval_frequency=50000 \
206
+ --train_steps=150000 \
207
+ --schedule=train \
208
+ --use_horovod=True \
209
+ --use_hpu=True \
210
+ --use_bf16=True
211
+ ```
212
+
213
+ **Run training on 32 HPUs:**
214
+
215
+ **NOTE:** It is recommended to use `learning_rate_constant` 3.5 and `train_steps` 75000.
216
+
217
+ ```bash
218
+ export MULTI_HLS_IPS=192.10.100.174,10.10.100.101,10.10.100.102,10.10.100.103
219
+ mpirun \
220
+ --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \
221
+ --tag-output --merge-stderr-to-stdout \
222
+ $PYTHON trainer.py \
223
+ --data_dir=/data/tensorflow/wmt32k_packed/train/ \
224
+ --problem=translate_ende_wmt32k_packed \
225
+ --model=transformer \
226
+ --hparams_set=transformer_big \
227
+ --hparams=batch_size=4096,learning_rate_constant=3.5 \
228
+ --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \
229
+ --local_eval_frequency=50000 \
230
+ --train_steps=75000 \
231
+ --schedule=train \
232
+ --use_horovod=True \
233
+ --use_hpu=True \
234
+ --use_bf16=True
235
+ ```
236
+
237
+ ## Evaluating BLEU Score
238
+ After training the model, you can evaluate the achieved BLEU score:
239
+ 1. Download and tokenize the validation file:
240
+ ```bash
241
+ sacrebleu -t wmt14 -l en-de --echo src > wmt14.src
242
+ cat wmt14.src | sacremoses tokenize -l en > wmt14.src.tok
243
+ ```
244
+
245
+ 2. Compute BLEU score of a single checkpoint:
246
+ ```bash
247
+ $PYTHON decoder.py \
248
+ --problem=translate_ende_wmt32k_packed \
249
+ --model=transformer \
250
+ --hparams_set=transformer_big \
251
+ --data_dir=<path_to_dataset>/train \
252
+ --output_dir=<path_to_output_dir> \
253
+ --checkpoint_path=<path_to_checkpoint> \
254
+ --use_hpu=True \
255
+ --decode_from_file=./wmt14.src.tok \
256
+ --decode_to_file=./wmt14.tgt.tok \
257
+ --decode_hparams=log_results=False
258
+ cat wmt14.tgt.tok | sacremoses detokenize -l de | sacrebleu -t wmt14 -l en-de
259
+ ```
260
+
261
+ 3. Optional: To split BLEU calculation to multiple cards, run `decoder.py` through `mpirun`. For example:
262
+ ```bash
263
+ mpirun \
264
+ --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \
265
+ --tag-output --merge-stderr-to-stdout \
266
+ $PYTHON decoder.py \
267
+ --problem=translate_ende_wmt32k_packed \
268
+ --model=transformer \
269
+ --hparams_set=transformer_big \
270
+ --data_dir=<path_to_dataset>/train \
271
+ --output_dir=<path_to_output_dir> \
272
+ --checkpoint_path=<path_to_checkpoint> \
273
+ --decode_from_file=./wmt14.src.tok \
274
+ --decode_to_file=./wmt14.tgt.tok \
275
+ --use_hpu=True \
276
+ --use_horovod=True \
277
+ --decode_hparams=log_results=False
278
+ cat wmt14.tgt.tok | sacremoses detokenize -l de | sacrebleu -t wmt14 -l en-de
279
+ ```
280
+ **NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/TensorFlow/Tensorflow_Scaling_Guide/Horovod_Scaling/index.html#mpirun-configuration).
281
+
282
+ ## Profile
283
+ To run with profiling enabled, pass `--profile_steps` flag. It should be a comma separated pair of numbers - on which step to start and end profiling.
284
+
285
+ Profiler steps are counted individually for each run. Thus, if you run training for 100 steps, with `--profile_steps 99,100`, profiling will be always enabled for the last two steps, no matter the `global_step_count`.
286
+
287
+ **Run training on 1 HPU with profiler:**
288
+
289
+ ```bash
290
+ $PYTHON trainer.py \
291
+ --data_dir=/data/tensorflow/wmt32k_packed/train/ \
292
+ --problem=translate_ende_wmt32k_packed \
293
+ --model=transformer \
294
+ --hparams_set=transformer_big \
295
+ --hparams=batch_size=4096 \
296
+ --output_dir=./translate_ende_wmt32k_packed/transformer_big/bs4096 \
297
+ --local_eval_frequency=10000 \
298
+ --train_steps=100 \
299
+ --schedule=train \
300
+ --use_hpu=True \
301
+ --profile_steps 50,53
302
+ ```
303
+ The above example will produce profile trace for 4 steps (50,51,52,53).
304
+
305
+ ## Supported Configuration
306
+ | Validated on | SynapseAI Version | TensorFlow Version(s) | Mode |
307
+ |:------:|:-----------------:|:-----:|:----------:|
308
+ | Gaudi | 1.14.0 | 2.15.0 | Training |
309
+ | Gaudi2 | 1.14.0 | 2.15.0 | Training |
310
+
311
+ ## Changelog
312
+ ### 1.6.0
313
+ * Model enabled on Gaudi2, with the same config as first-gen Gaudi.
314
+ * Added profiling support.
315
+ * Enabled experimental variable clustering to improve performance.
316
+ * Removed advanced parameters section from README.
317
+
318
+ ### 1.4.0
319
+ * Replaced references to custom demo script by community entry points in README.
320
+ * Added support to import horovod-fork package directly instead of using Model-References' TensorFlow.common.horovod_helpers; wrapped horovod import with a try-catch block so that the user is not required to install this library when the model is being run on a single card.
321
+ * Updated requirements.txt.
322
+ * Changed the default value of the log_step_count_steps flag.
323
+
324
+ ### 1.3.0
325
+ * Enabled multi-HPU BLEU calculation.
326
+ * Updated requirements.txt.
327
+
328
+ ### 1.2.0
329
+ * Added support for recipe cache, see `TF_RECIPE_CACHE_PATH` in HabanaAI documentation for details.
330
+ * Enabled multi-server training.
331
+
332
+ ### Training Script Modifications
333
+ * Support for other models than Transformer was removed.
334
+ * Added support for Horovod together with some adjustments in the topology script to allow simplifying the computational graph.
335
+
336
+ ## Known Issues
337
+
338
+ Only FP32 precision is supported when calculating BLEU on HPU.
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/bf16_config/transformer.json ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "allowlist": [
3
+ "_ScopedAllocatorSplit",
4
+ "_ScopedAllocatorConcat",
5
+ "_ScopedAllocator",
6
+ "BatchMatMul",
7
+ "BatchMatMulV2",
8
+ "BiasAdd",
9
+ "CollectiveReduceV2",
10
+ "CollectiveReduceV3",
11
+ "Conv2D",
12
+ "Conv2DBackpropFilter",
13
+ "Conv2DBackpropInput",
14
+ "Cumprod",
15
+ "Cumsum",
16
+ "EuclideanNorm",
17
+ "Exp",
18
+ "FloorDiv",
19
+ "FusedBatchNormV2",
20
+ "FusedBatchNormV3",
21
+ "FusedBatchNormGradV2",
22
+ "FusedBatchNormGradV3",
23
+ "GatherNd",
24
+ "GatherV2",
25
+ "Greater",
26
+ "GreaterEqual",
27
+ "HabanaConv2DWithPadding",
28
+ "HabanaConv2DWithPaddingBackpropFilter",
29
+ "HabanaConv2DWithPaddingBackpropInput",
30
+ "HabanaDropout",
31
+ "HabanaDropoutGrad",
32
+ "HabanaDropoutStateful",
33
+ "HabanaFusedBatchNormV3",
34
+ "HabanaGelu",
35
+ "HabanaGeluGrad",
36
+ "HabanaLayerNorm",
37
+ "HabanaLayerNormGrad",
38
+ "HabanaSoftmaxGrad",
39
+ "HabanaLogSoftmaxGrad",
40
+ "HorovodAllgather",
41
+ "HorovodAllreduce",
42
+ "HpuCollectiveReduce",
43
+ "Less",
44
+ "LessEqual",
45
+ "Log",
46
+ "Log1p",
47
+ "LogSoftmax",
48
+ "MatMul",
49
+ "MaxPool",
50
+ "MaxPoolV2",
51
+ "MaxPoolGrad",
52
+ "MaxPoolGradV2",
53
+ "Mul",
54
+ "PyramidRoiAlign",
55
+ "PyramidRoiAlignGradImages",
56
+ "Relu",
57
+ "Relu6",
58
+ "ReluGrad",
59
+ "Relu6Grad",
60
+ "Round",
61
+ "Rsqrt",
62
+ "RsqrtGrad",
63
+ "Sigmoid",
64
+ "SigmoidGrad",
65
+ "Softmax",
66
+ "SparseSoftmaxCrossEntropyWithLogits",
67
+ "Square",
68
+ "SquaredDifference",
69
+ "Sqrt",
70
+ "Tanh",
71
+ "TanhGrad",
72
+ "TensorScatterUpdate"
73
+ ],
74
+ "conditional_list": [
75
+ "Abs",
76
+ "Add",
77
+ "AddN",
78
+ "AddV2",
79
+ "ArgMax",
80
+ "ArgMin",
81
+ "BiasAddGrad",
82
+ "CollectiveReduceV2",
83
+ "CollectiveReduceV3",
84
+ "DynamicStitch",
85
+ "Equal",
86
+ "ExpandDims",
87
+ "Fill",
88
+ "HabanaClampFwd",
89
+ "HabanaClampBwd",
90
+ "HabanaMaxGrad",
91
+ "HabanaMinGrad",
92
+ "HabanaSparseSegmentSum",
93
+ "HabanaRandomUniformWithMaxval",
94
+ "HabanaRandomUniformWithScale",
95
+ "HabanaSize",
96
+ "HorovodAllgather",
97
+ "HorovodAllreduce",
98
+ "HpuCollectiveReduce",
99
+ "HpuCollectiveGather",
100
+ "HpuCollectiveGatherV2",
101
+ "Identity",
102
+ "IsFinite",
103
+ "MatrixBandPart",
104
+ "Neg",
105
+ "NotEqual",
106
+ "Pack",
107
+ "Pad",
108
+ "PadV2",
109
+ "RandomStandardNormal",
110
+ "RandomUniform",
111
+ "Rank",
112
+ "Reshape",
113
+ "ResizeNearestNeighbor",
114
+ "ResizeNearestNeighborGrad",
115
+ "Select",
116
+ "SelectV2",
117
+ "Shape",
118
+ "ShapeN",
119
+ "Sign",
120
+ "Size",
121
+ "Slice",
122
+ "SparseSegmentSumWithNumSegments",
123
+ "SplitV",
124
+ "Split",
125
+ "Snapshot",
126
+ "Squeeze",
127
+ "StridedSlice",
128
+ "StridedSliceGrad",
129
+ "Sub",
130
+ "Tile",
131
+ "Transpose",
132
+ "Unpack",
133
+ "ZerosLike"
134
+ ],
135
+ "strict_conditional_list": [
136
+ "Add",
137
+ "AddN",
138
+ "BiasAddGrad",
139
+ "Sub"
140
+ ],
141
+ "non_convertible_exceptions": [
142
+ [
143
+ ".*KEEP_FP32_PRECISION.*",
144
+ ""
145
+ ]
146
+ ],
147
+ "convertible_exceptions": [
148
+ [
149
+ ".*FORCE_BF16_PRECISION.*",
150
+ ""
151
+ ]
152
+ ]
153
+ }
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/build_vocab.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ ###############################################################################
16
+ # Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
17
+ ###############################################################################
18
+ # Changes:
19
+ # - updated imports
20
+
21
+ r"""Build vocab for a subclass of Text2TextProblem.
22
+
23
+ build_vocab \
24
+ --problem=program_search_algolisp \
25
+ --data_dir=~/t2t_data \
26
+ --tmp_dir=~/t2t_data/tmp
27
+ """
28
+
29
+ from __future__ import absolute_import
30
+ from __future__ import division
31
+ from __future__ import print_function
32
+
33
+ import os
34
+
35
+ from TensorFlow.nlp.transformer.utils import problems as problems_lib # pylint: disable=unused-import
36
+ from TensorFlow.nlp.transformer.data_generators import text_problems
37
+ from TensorFlow.nlp.transformer.utils import registry
38
+ import tensorflow.compat.v1 as tf
39
+
40
+ flags = tf.flags
41
+ FLAGS = flags.FLAGS
42
+
43
+ flags.DEFINE_string("data_dir", "/tmp/t2t/data_dir",
44
+ "Directory to place the generated vocabulary file in.")
45
+
46
+ flags.DEFINE_string("tmp_dir", "/tmp/t2t/tmp_dir",
47
+ "Temporary storage directory.")
48
+
49
+ flags.DEFINE_string("problem", None,
50
+ "Problem to generate the vocabulary file for.")
51
+
52
+ flags.mark_flag_as_required("problem")
53
+
54
+
55
+ def main(_):
56
+ problem = registry.problem(FLAGS.problem)
57
+
58
+ # We make the assumption that the problem is a subclass of Text2TextProblem.
59
+ assert isinstance(problem, text_problems.Text2TextProblem)
60
+
61
+ data_dir = os.path.expanduser(FLAGS.data_dir)
62
+ tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
63
+
64
+ tf.gfile.MakeDirs(data_dir)
65
+ tf.gfile.MakeDirs(tmp_dir)
66
+
67
+ tf.logging.info("Saving vocabulary to data_dir: %s" % data_dir)
68
+
69
+ problem.get_or_create_vocab(data_dir, tmp_dir)
70
+
71
+ tf.logging.info("Saved vocabulary file: " +
72
+ os.path.join(data_dir, problem.vocab_filename))
73
+
74
+
75
+ if __name__ == "__main__":
76
+ tf.logging.set_verbosity(tf.logging.INFO)
77
+ tf.app.run()
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/compute_bleu.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import subprocess
5
+ from argparse import ArgumentParser
6
+ from TensorFlow.common.tb_utils import TBSummary
7
+
8
+
9
+ parser = ArgumentParser()
10
+ parser.add_argument('--decoded_file', '-df', type=str, default='wmt14.tgt.tok',
11
+ help='Decoded file produced by t2t-decode command.')
12
+ parser.add_argument('--log_dir', '-ld', type=str, default=None,
13
+ help='Where to store TensorBoard summary file, '
14
+ 'if None summary will not be saved.')
15
+ args = parser.parse_args()
16
+
17
+ def get_sacremoses_version():
18
+ ver_line = subprocess.run(['sacremoses', '--version'], stdout=subprocess.PIPE).stdout.decode()
19
+ return tuple(map(int, ver_line.split()[-1].split('.')))
20
+
21
+ def get_sacremoses_cmd(version):
22
+ if version >= (0, 0, 42):
23
+ return ['sacremoses', '-l', 'de', 'detokenize']
24
+ else:
25
+ return ['sacremoses', 'detokenize', '-l', 'de']
26
+
27
+ def main():
28
+ detok = subprocess.run(get_sacremoses_cmd(get_sacremoses_version()),
29
+ stdin=open(args.decoded_file, 'r'),
30
+ stdout=subprocess.PIPE)
31
+ bleu = subprocess.run(['sacrebleu', '-t', 'wmt14', '-l', 'en-de', '-b'],
32
+ input=detok.stdout, stdout=subprocess.PIPE)
33
+ score = bleu.stdout.decode()
34
+ print('BLEU:', score)
35
+
36
+ if args.log_dir is not None:
37
+ with TBSummary(args.log_dir) as tb:
38
+ tb.add_scalar('accuracy', float(score), 0)
39
+
40
+
41
+ if __name__ == '__main__':
42
+ main()
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/all_problems.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Imports for problem modules."""
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import importlib
22
+ import six
23
+ from six.moves import range # pylint: disable=redefined-builtin
24
+
25
+ MODULES = [
26
+ "TensorFlow.nlp.transformer.data_generators.translate_encs_cubbitt",
27
+ "TensorFlow.nlp.transformer.data_generators.translate_encs",
28
+ "TensorFlow.nlp.transformer.data_generators.translate_ende",
29
+ "TensorFlow.nlp.transformer.data_generators.translate_enes",
30
+ "TensorFlow.nlp.transformer.data_generators.translate_enet",
31
+ "TensorFlow.nlp.transformer.data_generators.translate_enfr",
32
+ "TensorFlow.nlp.transformer.data_generators.translate_enid",
33
+ "TensorFlow.nlp.transformer.data_generators.translate_enmk",
34
+ "TensorFlow.nlp.transformer.data_generators.translate_envi",
35
+ "TensorFlow.nlp.transformer.data_generators.translate_enzh",
36
+ ]
37
+ ALL_MODULES = list(MODULES)
38
+
39
+
40
+
41
+ def _is_import_err_msg(err_str, module):
42
+ parts = module.split(".")
43
+ suffixes = [".".join(parts[i:]) for i in range(len(parts))]
44
+ prefixes = [".".join(parts[:i]) for i in range(len(parts))]
45
+ return err_str in (["No module named %s" % suffix for suffix in suffixes] +
46
+ ["No module named '%s'" % suffix for suffix in suffixes] +
47
+ ["No module named %s" % prefix for prefix in prefixes] +
48
+ ["No module named '%s'" % prefix for prefix in prefixes])
49
+
50
+
51
+ def _handle_errors(errors):
52
+ """Log out and possibly reraise errors during import."""
53
+ if not errors:
54
+ return
55
+ log_all = True # pylint: disable=unused-variable
56
+ err_msg = "T2T: skipped importing {num_missing} data_generators modules."
57
+ print(err_msg.format(num_missing=len(errors)))
58
+ for module, err in errors:
59
+ err_str = str(err)
60
+ if log_all:
61
+ print("Did not import module: %s; Cause: %s" % (module, err_str))
62
+ if not _is_import_err_msg(err_str, module):
63
+ print("From module %s" % module)
64
+ raise err
65
+
66
+
67
+ def import_modules(modules):
68
+ errors = []
69
+ for module in modules:
70
+ try:
71
+ importlib.import_module(module)
72
+ except ImportError as error:
73
+ errors.append((module, error))
74
+ _handle_errors(errors)
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/cleaner_en_xx.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # encoding=UTF-8
17
+ """An unsophisticated data cleaner for en-.. sentence translation pairs.
18
+
19
+ This pattern-based English-... cleaner aims fairly aggressively for clean
20
+ sentence-like pairs. It discards pairs if the English member has signs of
21
+ non-sentence noise or origin, e.g., lacks expected punctuation or has suspicious
22
+ character sequences. It also simplistically detects and corrects some missing
23
+ sentence breaks. It makes minimal assumptions about the other language, mainly
24
+ that its sentences can end in one of '.!?' and that its sentences can start
25
+ with an ASCII capital letter.
26
+ """
27
+
28
+ from __future__ import absolute_import
29
+ from __future__ import division
30
+ from __future__ import print_function
31
+ from __future__ import unicode_literals
32
+
33
+
34
+ import itertools
35
+ import re
36
+
37
+ from TensorFlow.nlp.transformer.data_generators import text_encoder
38
+
39
+ import tensorflow.compat.v1 as tf
40
+
41
+
42
+ _RE_GOOD_S_START = re.compile(r'^["“”]?[A-Z]')
43
+ _RE_GOOD_S_END = re.compile(r'\w[.?!]["”]?$', re.UNICODE)
44
+
45
+ _RE_LABEL_COLON = re.compile(r'^\w+\.?( \w+)?: ', re.UNICODE)
46
+ _RE_DIGIT_SPACE_DIGIT = re.compile(r'\d +\d', re.UNICODE)
47
+ _RE_ALL_CAP_WORDS = re.compile(r'^[A-Z]\S*(\s+[A-Z]\S+)+\s*$')
48
+
49
+ _RE_DQ_ONE = re.compile(r'^[^"“”]*["“”][^"“”]*$')
50
+ _RE_DQ_INITIAL = re.compile(r'^["“”]([^"“”]+)$')
51
+ _RE_DQ_FINAL = re.compile(r'^[^"“”]+["“”]$')
52
+ _RE_DQ_LINE = re.compile(r'^["“”].*["“”]$')
53
+
54
+ _RE_DQ_MANY = re.compile(r'(["“”].*){3,}')
55
+ _RE_SQ_MANY = re.compile(r'''(['‘’][^st].*){3,}''')
56
+ _RE_CHARS_QQ = re.compile(r'''["“”'‘’]\s*["“”'‘’]''')
57
+ _RE_SPACE_PUNCT_SPACE = re.compile(r'''\s["“”'‘’,:;]\s''')
58
+
59
+ _RE_COPYRIGHT = re.compile(r'©|^Copyright|^\(C\)')
60
+ _RE_UNMATCHED_PAREN_LEFT = re.compile(r'[(][^)]*$')
61
+ _RE_UNMATCHED_PAREN_RIGHT = re.compile(r'^[^(]*[)]')
62
+ _RE_TAGLINE_CITY = re.compile(r'^[A-Z]{2,}(\s+[A-Z]+)*\s+-')
63
+ _RE_CHARS_UPPER_UNDERSCORE = re.compile(r'^[A-Z]+[a-z]*_')
64
+
65
+
66
+ def paracrawl_v3_pairs(paracrawl_file):
67
+ """Generates raw (English, other) pairs from a ParaCrawl V3.0 data file.
68
+
69
+ Args:
70
+ paracrawl_file: A ParaCrawl V3.0 en-.. data file.
71
+ Yields:
72
+ Pairs of (sentence_en, sentence_xx), as Unicode strings.
73
+ Raises:
74
+ StopIteration: If the file ends while this method is in the middle of
75
+ creating a translation pair.
76
+ """
77
+ raw_sentences = _raw_sentences(paracrawl_file)
78
+ for s_en in raw_sentences:
79
+ try:
80
+ s_xx = next(raw_sentences)
81
+ if s_en and s_xx: # Prevent empty string examples.
82
+ yield s_en, s_xx
83
+ except StopIteration:
84
+ tf.logging.error(
85
+ 'Unmatched final sentence while reading in sentence pairs: [%s]',
86
+ s_en)
87
+
88
+
89
+ def _raw_sentences(paracrawl_file):
90
+ """Generates Unicode strings, one for each <seg> in a ParaCrawl data file.
91
+
92
+ Also decodes some of the most common HTML entities found in ParaCrawl data.
93
+
94
+ Args:
95
+ paracrawl_file: A ParaCrawl V3.0 en-.. data file.
96
+ Yields:
97
+ One Unicode string for each <seg> element in the ParaCrawl data file.
98
+ """
99
+ for line_utf8 in paracrawl_file:
100
+ line_uni = line_utf8.decode('UTF-8')
101
+ text_match = re.match(r' +<seg>(.*)</seg>$', line_uni)
102
+ if text_match:
103
+ txt = text_match.group(1)
104
+ txt = re.sub(r'&amp;', r'&', txt)
105
+ txt = re.sub(r'& ?amp;', r'&', txt)
106
+ txt = re.sub(r'& ?apos;', r"'", txt)
107
+ txt = re.sub(r'& ?quot;', r'"', txt)
108
+ txt = re.sub(r'& ?lt;', r'<', txt)
109
+ txt = re.sub(r'& ?gt;', r'>', txt)
110
+ yield txt
111
+
112
+
113
+ def clean_en_xx_pairs(en_xx_pairs):
114
+ """Generates a cleaned-up stream of (English, other) translation pairs.
115
+
116
+ Cleaning includes both filtering and simplistic sentence splitting, with
117
+ minimal assumptions on the non-English pair member: (1) All filtering is
118
+ done based on the English member of the pair, and (2) sentence splitting
119
+ assumes only that sentences can end with one of '.!?' and begin with an
120
+ ASCII uppercase letter. Input pairs that would get split into different
121
+ numbers of sentences (e.g., three English sentences vs. two German ones) are
122
+ discarded.
123
+
124
+ Args:
125
+ en_xx_pairs: A stream (iterable) of Unicode string pairs. Each item in the
126
+ stream should be a (sentence_en, sentence_xx) pair.
127
+ Yields:
128
+ Cleaned-up (sentence_en, sentence_xx) pairs.
129
+ """
130
+ for s1, s2 in en_xx_pairs:
131
+ if _regex_filter(s1):
132
+ continue
133
+ s1_list, s2_list = _split_sentences(s1, s2)
134
+ if len(s1_list) != len(s2_list):
135
+ continue # discard this pair
136
+ elif len(s1_list) == 1:
137
+ yield s1, s2
138
+ else:
139
+ for s1_subsentence, s2_subsentence in itertools.izip(s1_list, s2_list):
140
+ if _regex_filter(s1_subsentence):
141
+ continue
142
+ yield s1_subsentence, s2_subsentence
143
+
144
+
145
+ def _regex_filter(sentence):
146
+ return (not _is_match(sentence, _RE_GOOD_S_START)
147
+ or not _is_match(sentence, _RE_GOOD_S_END)
148
+ or _is_match(sentence, _RE_LABEL_COLON)
149
+ or _is_match(sentence, _RE_DIGIT_SPACE_DIGIT)
150
+ or _is_match(sentence, _RE_DQ_ONE)
151
+ or _is_match(sentence, _RE_DQ_INITIAL)
152
+ or _is_match(sentence, _RE_DQ_FINAL)
153
+ or _is_match(sentence, _RE_DQ_LINE)
154
+ or _is_match(sentence, _RE_DQ_MANY)
155
+ or _is_match(sentence, _RE_SQ_MANY)
156
+ or _is_match(sentence, _RE_CHARS_QQ)
157
+ or _is_match(sentence, _RE_SPACE_PUNCT_SPACE)
158
+ or _is_match(sentence, _RE_COPYRIGHT)
159
+ or _is_match(sentence, _RE_UNMATCHED_PAREN_LEFT)
160
+ or _is_match(sentence, _RE_UNMATCHED_PAREN_RIGHT)
161
+ or _is_match(sentence, _RE_TAGLINE_CITY)
162
+ or _is_match(sentence, _RE_CHARS_UPPER_UNDERSCORE))
163
+
164
+
165
+ def _is_match(sentence, regex):
166
+ return regex.search(sentence)
167
+
168
+
169
+ def _split_sentences(s1, s2):
170
+ s1 = text_encoder.native_to_unicode(s1)
171
+ s2 = text_encoder.native_to_unicode(s2)
172
+ s1 = re.sub(r'(\w[A-Z]|[0-9a-z])([.!?]) ([A-Z])', r'\1\2__|__\3', s1)
173
+ s2 = re.sub(r'([^0-9][.!?]) ([A-Z])', r'\1__|__\2', s2)
174
+ s1_subsentences = s1.split('__|__')
175
+ s2_subsentences = s2.split('__|__')
176
+ return s1_subsentences, s2_subsentences
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/generator_utils.py ADDED
@@ -0,0 +1,1259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Utilities for data generators."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ import functools
23
+ import gzip
24
+ import math
25
+ import multiprocessing
26
+ import os
27
+ import random
28
+ import stat
29
+ import tarfile
30
+ import tempfile
31
+ import numpy as np
32
+ import requests
33
+ import six
34
+ from six.moves import range # pylint: disable=redefined-builtin
35
+ # Imports urllib on Python2, urllib.request on Python3
36
+ import six.moves.urllib_request as urllib
37
+
38
+ from TensorFlow.nlp.transformer.data_generators import text_encoder
39
+
40
+ import tensorflow.compat.v1 as tf
41
+
42
+ UNSHUFFLED_SUFFIX = "-unshuffled"
43
+
44
+ flags = tf.flags
45
+ FLAGS = flags.FLAGS
46
+
47
+ def to_example(dictionary):
48
+ """Helper: build tf.Example from (string -> int/float/str list) dictionary."""
49
+ features = {}
50
+ for (k, v) in six.iteritems(dictionary):
51
+ if not v:
52
+ raise ValueError("Empty generated field: %s" % str((k, v)))
53
+ # Subtly in PY2 vs PY3, map is not scriptable in py3. As a result,
54
+ # map objects will fail with TypeError, unless converted to a list.
55
+ if six.PY3 and isinstance(v, map):
56
+ v = list(v)
57
+ if (isinstance(v[0], six.integer_types) or
58
+ np.issubdtype(type(v[0]), np.integer)):
59
+ features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
60
+ elif isinstance(v[0], float):
61
+ features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v))
62
+ elif isinstance(v[0], six.string_types):
63
+ if not six.PY2: # Convert in python 3.
64
+ v = [bytes(x, "utf-8") for x in v]
65
+ features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
66
+ elif isinstance(v[0], bytes):
67
+ features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
68
+ else:
69
+ raise ValueError("Value for %s is not a recognized type; v: %s type: %s" %
70
+ (k, str(v[0]), str(type(v[0]))))
71
+ return tf.train.Example(features=tf.train.Features(feature=features))
72
+
73
+
74
+ def generate_files_distributed(generator,
75
+ output_name,
76
+ output_dir,
77
+ num_shards=1,
78
+ max_cases=None,
79
+ task_id=0):
80
+ """generate_files but with a single writer writing to shard task_id."""
81
+ assert task_id < num_shards
82
+ output_filename = sharded_name(output_name, task_id, num_shards)
83
+ output_file = os.path.join(output_dir, output_filename)
84
+ tf.logging.info("Writing to file %s", output_file)
85
+ writer = tf.python_io.TFRecordWriter(output_file)
86
+
87
+ counter = 0
88
+ for case in generator:
89
+ if counter % 100000 == 0:
90
+ tf.logging.info("Generating case %d for %s." % (counter, output_name))
91
+ counter += 1
92
+ if max_cases and counter > max_cases:
93
+ break
94
+ example = to_example(case)
95
+ writer.write(example.SerializeToString())
96
+
97
+ writer.close()
98
+ return output_file
99
+
100
+
101
+ def _data_filenames(output_name, output_dir, num_shards):
102
+ return [
103
+ os.path.join(output_dir, fname)
104
+ for fname in shard_filepath(output_name, num_shards)
105
+ ]
106
+
107
+
108
+ def train_data_filenames(problem, output_dir, num_shards):
109
+ return _data_filenames(problem + "-train", output_dir, num_shards)
110
+
111
+
112
+ def dev_data_filenames(problem, output_dir, num_shards):
113
+ return _data_filenames(problem + "-dev", output_dir, num_shards)
114
+
115
+
116
+ def test_data_filenames(problem, output_dir, num_shards):
117
+ return _data_filenames(problem + "-test", output_dir, num_shards)
118
+
119
+
120
+ def combined_data_filenames(problem, output_dir, num_training_shards):
121
+ return (train_data_filenames(problem, output_dir, num_training_shards) +
122
+ dev_data_filenames(problem, output_dir, 1) + test_data_filenames(
123
+ problem, output_dir, 1))
124
+
125
+
126
+ def sharded_name(base_name, shard, total_shards):
127
+ return "%s-%.5d-of-%.5d" % (base_name, shard, total_shards)
128
+
129
+
130
+ def shard_filepath(fname, num_shards):
131
+ return [
132
+ sharded_name(fname, shard, num_shards) for shard in range(num_shards)
133
+ ]
134
+
135
+
136
+ def outputs_exist(filenames):
137
+ for out_fname in filenames:
138
+ out_fname = out_fname.replace(UNSHUFFLED_SUFFIX, "")
139
+ if tf.gfile.Exists(out_fname):
140
+ return out_fname
141
+
142
+
143
+ def generate_files(generator, output_filenames,
144
+ max_cases=None, cycle_every_n=1):
145
+ """Generate cases from a generator and save as TFRecord files.
146
+
147
+ Generated cases are transformed to tf.Example protos and saved as TFRecords
148
+ in sharded files named output_dir/output_name-00..N-of-00..M=num_shards.
149
+
150
+ Args:
151
+ generator: a generator yielding (string -> int/float/str list) dictionaries.
152
+ output_filenames: List of output file paths.
153
+ max_cases: maximum number of cases to get from the generator;
154
+ if None (default), we use the generator until StopIteration is raised.
155
+ cycle_every_n: how many cases from the generator to take before
156
+ switching to the next shard; by default set to 1, switch every case.
157
+ """
158
+ if outputs_exist(output_filenames):
159
+ tf.logging.info("Skipping generator because outputs files exists at {}"
160
+ .format(output_filenames))
161
+ return
162
+ tmp_filenames = [fname + ".incomplete" for fname in output_filenames]
163
+ num_shards = len(output_filenames)
164
+ # Check if is training or eval, ref: train_data_filenames().
165
+ if num_shards > 0:
166
+ if "-train" in output_filenames[0]:
167
+ tag = "train"
168
+ elif "-dev" in output_filenames[0]:
169
+ tag = "eval"
170
+ else:
171
+ tag = "other"
172
+
173
+ writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames]
174
+ counter, shard = 0, 0
175
+ for case in generator:
176
+ if case is None:
177
+ continue
178
+ if counter % 100000 == 0:
179
+ tf.logging.info("Generating case %d." % counter)
180
+ counter += 1
181
+ if max_cases and counter > max_cases:
182
+ break
183
+ example = to_example(case)
184
+ writers[shard].write(example.SerializeToString())
185
+ if counter % cycle_every_n == 0:
186
+ shard = (shard + 1) % num_shards
187
+
188
+ for writer in writers:
189
+ writer.close()
190
+
191
+ for tmp_name, final_name in zip(tmp_filenames, output_filenames):
192
+ tf.gfile.Rename(tmp_name, final_name)
193
+
194
+ tf.logging.info("Generated %s Examples", counter)
195
+
196
+
197
+ def download_report_hook(count, block_size, total_size):
198
+ """Report hook for download progress.
199
+
200
+ Args:
201
+ count: current block number
202
+ block_size: block size
203
+ total_size: total size
204
+ """
205
+ percent = int(count * block_size * 100 / total_size)
206
+ print("\r%d%%" % percent + " completed", end="\r")
207
+
208
+
209
+ def maybe_download(directory, filename, uri):
210
+ """Download filename from uri unless it's already in directory.
211
+
212
+ Copies a remote file to local if that local file does not already exist. If
213
+ the local file pre-exists this function call, it does not check that the local
214
+ file is a copy of the remote.
215
+
216
+ Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a
217
+ URL.
218
+
219
+ Args:
220
+ directory: path to the directory that will be used.
221
+ filename: name of the file to download to (do nothing if it already exists).
222
+ uri: URI to copy (or download) from.
223
+
224
+ Returns:
225
+ The path to the downloaded file.
226
+ """
227
+ tf.gfile.MakeDirs(directory)
228
+ filepath = os.path.join(directory, filename)
229
+ if tf.gfile.Exists(filepath):
230
+ tf.logging.info("Not downloading, file already found: %s" % filepath)
231
+ return filepath
232
+
233
+ tf.logging.info("Downloading %s to %s" % (uri, filepath))
234
+ try:
235
+ tf.gfile.Copy(uri, filepath)
236
+ except tf.errors.UnimplementedError:
237
+ if uri.startswith("http"):
238
+ inprogress_filepath = filepath + ".incomplete"
239
+ inprogress_filepath, _ = urllib.urlretrieve(
240
+ uri, inprogress_filepath, reporthook=download_report_hook)
241
+ # Print newline to clear the carriage return from the download progress
242
+ print()
243
+ tf.gfile.Rename(inprogress_filepath, filepath)
244
+ else:
245
+ raise ValueError("Unrecognized URI: " + filepath)
246
+ statinfo = os.stat(filepath)
247
+ tf.logging.info("Successfully downloaded %s, %s bytes." %
248
+ (filename, statinfo.st_size))
249
+ return filepath
250
+
251
+
252
+ def maybe_download_from_drive(directory, filename, url):
253
+ """Download filename from Google drive unless it's already in directory.
254
+
255
+ Args:
256
+ directory: path to the directory that will be used.
257
+ filename: name of the file to download to (do nothing if it already exists).
258
+ url: URL to download from.
259
+
260
+ Returns:
261
+ The path to the downloaded file.
262
+ """
263
+ if not tf.gfile.Exists(directory):
264
+ tf.logging.info("Creating directory %s" % directory)
265
+ tf.gfile.MakeDirs(directory)
266
+ filepath = os.path.join(directory, filename)
267
+ confirm_token = None
268
+ if tf.gfile.Exists(filepath):
269
+ tf.logging.info("Not downloading, file already found: %s" % filepath)
270
+ return filepath
271
+
272
+ # Since the file is big, drive will scan it for virus and take it to a
273
+ # warning page. We find the confirm token on this page and append it to the
274
+ # URL to start the download process.
275
+ confirm_token = None
276
+ session = requests.Session()
277
+ response = session.get(url, stream=True)
278
+ for k, v in response.cookies.items():
279
+ if k.startswith("download_warning"):
280
+ confirm_token = v
281
+
282
+ if confirm_token:
283
+ url = url + "&confirm=" + confirm_token
284
+ tf.logging.info("Downloading %s to %s" % (url, filepath))
285
+
286
+ response = session.get(url, stream=True)
287
+ # Now begin the download.
288
+ chunk_size = 16 * 1024
289
+ with open(filepath, "wb") as f:
290
+ for chunk in response.iter_content(chunk_size):
291
+ if chunk:
292
+ f.write(chunk)
293
+
294
+ # Print newline to clear the carriage return from the download progress
295
+ print()
296
+ statinfo = os.stat(filepath)
297
+ tf.logging.info("Successfully downloaded %s, %s bytes." % (filename,
298
+ statinfo.st_size))
299
+ return filepath
300
+
301
+
302
+ def gunzip_file(gz_path, new_path):
303
+ """Unzips from gz_path into new_path.
304
+
305
+ Args:
306
+ gz_path: path to the zipped file.
307
+ new_path: path to where the file will be unzipped.
308
+ """
309
+ if tf.gfile.Exists(new_path):
310
+ tf.logging.info("File %s already exists, skipping unpacking" % new_path)
311
+ return
312
+ tf.logging.info("Unpacking %s to %s" % (gz_path, new_path))
313
+ # We may be unpacking into a newly created directory, add write mode.
314
+ mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH
315
+ os.chmod(os.path.dirname(new_path), mode)
316
+ with gzip.open(gz_path, "rb") as gz_file:
317
+ with tf.gfile.GFile(new_path, mode="wb") as new_file:
318
+ for line in gz_file:
319
+ new_file.write(line)
320
+
321
+
322
+ def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
323
+ generator, max_subtoken_length=None,
324
+ reserved_tokens=None):
325
+ """Inner implementation for vocab generators.
326
+
327
+ Args:
328
+ data_dir: The base directory where data and vocab files are stored. If None,
329
+ then do not save the vocab even if it doesn't exist.
330
+ vocab_filename: relative filename where vocab file is stored
331
+ vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
332
+ generator: a generator that produces tokens from the vocabulary
333
+ max_subtoken_length: an optional integer. Set this to a finite value to
334
+ avoid quadratic costs during vocab building.
335
+ reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
336
+ should be a prefix of `reserved_tokens`. If `None`, defaults to
337
+ `RESERVED_TOKENS`.
338
+
339
+ Returns:
340
+ A SubwordTextEncoder vocabulary object.
341
+ """
342
+ if data_dir and vocab_filename:
343
+ vocab_filepath = os.path.join(data_dir, vocab_filename)
344
+ if tf.gfile.Exists(vocab_filepath):
345
+ tf.logging.info("Found vocab file: %s", vocab_filepath)
346
+ return text_encoder.SubwordTextEncoder(vocab_filepath)
347
+ else:
348
+ vocab_filepath = None
349
+
350
+ tf.logging.info("Generating vocab file: %s", vocab_filepath)
351
+ vocab = text_encoder.SubwordTextEncoder.build_from_generator(
352
+ generator, vocab_size, max_subtoken_length=max_subtoken_length,
353
+ reserved_tokens=reserved_tokens)
354
+
355
+ if vocab_filepath:
356
+ tf.gfile.MakeDirs(data_dir)
357
+ vocab.store_to_file(vocab_filepath)
358
+
359
+ return vocab
360
+
361
+
362
+ def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size,
363
+ sources, file_byte_budget=1e6,
364
+ max_subtoken_length=None):
365
+ """Generate a vocabulary from the datasets in sources."""
366
+
367
+ vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget)
368
+ return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
369
+ vocab_generator, max_subtoken_length)
370
+
371
+
372
+ def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6):
373
+ """Generate lines for vocabulary generation."""
374
+ tf.logging.info("Generating vocab from: %s", str(sources))
375
+ for source in sources:
376
+ url = source[0]
377
+ filename = os.path.basename(url)
378
+ compressed_file = maybe_download(tmp_dir, filename, url)
379
+
380
+ for lang_file in source[1]:
381
+ tf.logging.info("Reading file: %s" % lang_file)
382
+ filepath = os.path.join(tmp_dir, lang_file)
383
+
384
+ # Extract from tar if needed.
385
+ if not tf.gfile.Exists(filepath):
386
+ read_type = "r:gz" if filename.endswith("tgz") else "r"
387
+ with tarfile.open(compressed_file, read_type) as corpus_tar:
388
+ corpus_tar.extractall(tmp_dir)
389
+
390
+ # For some datasets a second extraction is necessary.
391
+ if lang_file.endswith(".gz"):
392
+ new_filepath = os.path.join(tmp_dir, lang_file[:-3])
393
+ if tf.gfile.Exists(new_filepath):
394
+ tf.logging.info(
395
+ "Subdirectory %s already exists, skipping unpacking" % filepath)
396
+ else:
397
+ tf.logging.info("Unpacking subdirectory %s" % filepath)
398
+ gunzip_file(filepath, new_filepath)
399
+ filepath = new_filepath
400
+
401
+ with tf.gfile.GFile(filepath, mode="r") as source_file:
402
+ file_byte_budget_ = file_byte_budget
403
+ counter = 0
404
+ countermax = int(source_file.size() / file_byte_budget_ / 2)
405
+ for line in source_file:
406
+ if counter < countermax:
407
+ counter += 1
408
+ else:
409
+ if file_byte_budget_ <= 0:
410
+ break
411
+ line = line.strip()
412
+ file_byte_budget_ -= len(line)
413
+ counter = 0
414
+ yield line
415
+
416
+
417
+ def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename,
418
+ index, vocab_filename, vocab_size):
419
+ r"""Generate a vocabulary from a tabbed source file.
420
+
421
+ The source is a file of source, target pairs, where each line contains
422
+ a source string and a target string, separated by a tab ('\t') character.
423
+ The index parameter specifies 0 for the source or 1 for the target.
424
+
425
+ Args:
426
+ data_dir: path to the data directory.
427
+ tmp_dir: path to the temporary directory.
428
+ source_filename: the name of the tab-separated source file.
429
+ index: index.
430
+ vocab_filename: the name of the vocabulary file.
431
+ vocab_size: vocabulary size.
432
+
433
+ Returns:
434
+ The vocabulary.
435
+ """
436
+ def generate():
437
+ filepath = os.path.join(tmp_dir, source_filename)
438
+ tf.logging.info("Generating vocab from %s", filepath)
439
+ with tf.gfile.GFile(filepath, mode="r") as source_file:
440
+ for line in source_file:
441
+ line = line.strip()
442
+ if line and "\t" in line:
443
+ parts = line.split("\t", 1)
444
+ part = parts[index].strip()
445
+ yield part
446
+
447
+ return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
448
+ generate())
449
+
450
+
451
+ def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size,
452
+ filepatterns):
453
+ """Generate a vocabulary from txt files with example-per-line."""
454
+ if isinstance(filepatterns, str):
455
+ filepatterns = [filepatterns]
456
+
457
+ def generate():
458
+ tf.logging.info("Generating vocab from %s", filepatterns)
459
+ for filepattern in filepatterns:
460
+ for filename in tf.gfile.Glob(filepattern):
461
+ with tf.gfile.GFile(filename, mode="r") as source_file:
462
+ for line in source_file:
463
+ yield line.strip()
464
+
465
+ return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
466
+ generate())
467
+
468
+
469
+ def read_records(filename):
470
+ reader = tf.python_io.tf_record_iterator(filename)
471
+ records = []
472
+ for record in reader:
473
+ records.append(record)
474
+ if len(records) % 100000 == 0:
475
+ tf.logging.info("read: %d", len(records))
476
+ return records
477
+
478
+
479
+ def write_records(records, out_filename):
480
+ writer = tf.python_io.TFRecordWriter(out_filename)
481
+ for count, record in enumerate(records):
482
+ writer.write(record)
483
+ if count > 0 and count % 100000 == 0:
484
+ tf.logging.info("write: %d", count)
485
+ writer.close()
486
+
487
+
488
+ def generate_dataset_and_shuffle(train_gen,
489
+ train_paths,
490
+ dev_gen,
491
+ dev_paths,
492
+ shuffle=True):
493
+ generate_files(train_gen, train_paths)
494
+ generate_files(dev_gen, dev_paths)
495
+ if shuffle:
496
+ shuffle_dataset(train_paths + dev_paths)
497
+
498
+
499
+ def _shuffle_single(fname, extra_fn=None):
500
+ """Shuffle a single file of records.
501
+
502
+ Args:
503
+ fname: a string
504
+ extra_fn: an optional function from list of TFRecords to list of TFRecords
505
+ to be called after shuffling.
506
+ """
507
+ records = read_records(fname)
508
+ random.shuffle(records)
509
+ if extra_fn is not None:
510
+ records = extra_fn(records)
511
+ out_fname = fname.replace(UNSHUFFLED_SUFFIX, "")
512
+ write_records(records, out_fname)
513
+ tf.gfile.Remove(fname)
514
+
515
+
516
+ def shuffle_dataset(filenames, extra_fn=None):
517
+ """Shuffles the dataset.
518
+
519
+ Args:
520
+ filenames: a list of strings
521
+ extra_fn: an optional function from list of records to list of records
522
+ to be called after shuffling a file.
523
+ """
524
+ if outputs_exist(filenames):
525
+ tf.logging.info("Skipping shuffle because output files exist")
526
+ return
527
+ tf.logging.info("Shuffling data...")
528
+ for filename in filenames:
529
+ _shuffle_single(filename, extra_fn=extra_fn)
530
+ tf.logging.info("Data shuffled.")
531
+
532
+
533
+ class SequencePacker(object):
534
+ """Helper for constructing a packed example of sequence examples.
535
+
536
+ See comments to pack_examples()
537
+ """
538
+
539
+ def __init__(self, first_sequence, spacing=2):
540
+ self._spacing = spacing
541
+ self._ids = first_sequence[:]
542
+ self._segmentation = [1] * len(first_sequence)
543
+ self._position = list(range(len(first_sequence)))
544
+
545
+ def add(self, ids):
546
+ padding = [0] * self._spacing
547
+ self._ids.extend(padding + ids)
548
+ next_segment_num = self._segmentation[-1] + 1 if self._segmentation else 1
549
+ self._segmentation.extend(padding + [next_segment_num] * len(ids))
550
+ self._position.extend(padding + list(range(len(ids))))
551
+
552
+ def can_fit(self, ids, packed_length):
553
+ return len(self._ids) + self._spacing + len(ids) <= packed_length
554
+
555
+ def pad(self, packed_length):
556
+ padding = [0] * (packed_length - len(self._ids))
557
+ self._ids.extend(padding)
558
+ self._segmentation.extend(padding)
559
+ self._position.extend(padding)
560
+
561
+ def to_dict(self):
562
+ return {"inputs": [0],
563
+ "targets": self._ids,
564
+ "targets_segmentation": self._segmentation,
565
+ "targets_position": self._position}
566
+
567
+
568
+ class SequencePairPacker(object):
569
+ """Helper for packing sequence-to-sequence examples into bigger examples.
570
+
571
+ See comments to pack_examples()
572
+ """
573
+
574
+ def __init__(self, first_sequence_pair, spacing=2):
575
+ self._inputs = SequencePacker(first_sequence_pair[0], spacing)
576
+ self._targets = SequencePacker(first_sequence_pair[1], spacing)
577
+
578
+ def add(self, pair):
579
+ self._inputs.add(pair[0])
580
+ self._targets.add(pair[1])
581
+
582
+ def can_fit(self, pair, packed_length):
583
+ return (self._inputs.can_fit(pair[0], packed_length) and
584
+ self._targets.can_fit(pair[1], packed_length))
585
+
586
+ def pad(self, packed_length):
587
+ self._inputs.pad(packed_length)
588
+ self._targets.pad(packed_length)
589
+
590
+ def to_dict(self):
591
+ ret = self._targets.to_dict()
592
+ inputs_dict = self._inputs.to_dict()
593
+ ret["inputs"] = inputs_dict["targets"]
594
+ ret["inputs_segmentation"] = inputs_dict["targets_segmentation"]
595
+ ret["inputs_position"] = inputs_dict["targets_position"]
596
+ return ret
597
+
598
+
599
+ def pack_examples(examples,
600
+ has_inputs,
601
+ packed_length=256,
602
+ spacing=2,
603
+ queue_size=10,
604
+ chop_long_sequences=False):
605
+ """Pack examples into longer examples.
606
+
607
+ If has_inputs=False, we are packing single-sequence examples with
608
+ targets only and no inputs.
609
+
610
+ In this case, we concatenate the targets from several examples to form
611
+ each new example. We insert a number of zeros for spacing between the
612
+ original sequences. This is to help the sequences stay separate
613
+ under convolutions. If chop_long_sequences is set, then any input sequence
614
+ longer than packed_length gets chopped up into multiple examples. Otherwise,
615
+ long sequences are emitted as singletons.
616
+
617
+ If has_inputs=True, then we are packing sequence-to-sequence
618
+ examples. We combine several examples by concatenating the inputs
619
+ (as above) and concatenating the targets (as above). Chopping of
620
+ long sequences is not supported.
621
+
622
+ The packed examples are represented as dictionaries containing:
623
+ "inputs", "targets": the packed sequences described above
624
+ "inputs_segmentation", "targets_segmentation":
625
+ Sequences aligned with "inputs", "targets" specifying to which original
626
+ sequence each position belongs. Numbering starts from 1, and 0 is used
627
+ for spacing. This information is useful for preventing attention across
628
+ segments.
629
+ e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4]
630
+ "inputs_position", "targets_position":
631
+ Sequences aligned with "inputs", "targets" specifying position within
632
+ the original sequence. This is useful for positional encodings.
633
+ e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2]
634
+
635
+ Args:
636
+ examples: a generator returning feature dictionaries.
637
+ has_inputs: a boolean
638
+ packed_length: an integer
639
+ spacing: an integer
640
+ queue_size: an integer
641
+ chop_long_sequences: a boolean
642
+
643
+ Yields:
644
+ feature dictionaries.
645
+ """
646
+ packer = SequencePairPacker if has_inputs else SequencePacker
647
+ combined = []
648
+ for example in examples:
649
+ x = ((example["inputs"], example["targets"])
650
+ if has_inputs else example["targets"])
651
+ if chop_long_sequences and len(x) > packed_length:
652
+ assert not has_inputs
653
+ num_fragments = len(x) // packed_length
654
+ for i in range(num_fragments):
655
+ yield packer(
656
+ x[packed_length * i:packed_length * (i + 1)], spacing).to_dict()
657
+ x = x[packed_length * num_fragments:]
658
+ added = False
659
+ for c in combined:
660
+ if c.can_fit(x, packed_length):
661
+ c.add(x)
662
+ added = True
663
+ break
664
+ if not added:
665
+ if len(combined) == queue_size:
666
+ if FLAGS.with_padding:
667
+ combined[0].pad(packed_length)
668
+ yield combined[0].to_dict()
669
+ combined = combined[1:]
670
+ combined.append(packer(x, spacing))
671
+ for c in combined:
672
+ if FLAGS.with_padding:
673
+ c.pad(packed_length)
674
+ yield c.to_dict()
675
+
676
+
677
+ def pack_dataset(dataset, length, keys=None, use_custom_ops=False):
678
+ """Creates a 'packed' version of a dataset on-the-fly.
679
+
680
+ This is meant to replace the irritation of having to create a separate
681
+ "packed" version of a dataset to train efficiently on TPU.
682
+
683
+ Each example in the output dataset represents several examples in the
684
+ input dataset.
685
+
686
+ For each key in the input dataset, two additional keys are created:
687
+ <key>_segmentation: an int32 tensor identifying the parts
688
+ representing the original example.
689
+ <key>_position: an int32 tensor identifying the position within the original
690
+ example.
691
+
692
+ Example:
693
+ Two input examples get combined to form an output example.
694
+ The input examples are:
695
+ {"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]}
696
+ {"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]}
697
+ The output example is:
698
+ {
699
+ "inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0]
700
+ "inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0]
701
+ "inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0]
702
+ "targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0]
703
+ "targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0]
704
+ "targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0]
705
+ }
706
+
707
+ 0 represents padding in both the inputs and the outputs.
708
+
709
+ Sequences in the incoming examples are truncated to length "length", and the
710
+ sequences in the output examples all have fixed (padded) length "length".
711
+
712
+ Args:
713
+ dataset: a tf.data.Dataset
714
+ length: an integer
715
+ keys: a list of strings (e.g. ["inputs", "targets"])
716
+ use_custom_ops: use a custom c++ op not included in standard tf (faster)
717
+
718
+ Returns:
719
+ a tf.data.Dataset
720
+ """
721
+ shapes = dataset.output_shapes
722
+ if keys is None:
723
+ keys = shapes.keys()
724
+
725
+ for k in keys:
726
+ if k not in shapes:
727
+ raise ValueError("Key %s not found in dataset. Available keys are %s"
728
+ % (k, shapes.keys()))
729
+ if not shapes[k].is_compatible_with(tf.TensorShape([None])):
730
+ raise ValueError("Tensors to be packed must be one-dimensional.")
731
+
732
+ if use_custom_ops:
733
+ return _pack_with_custom_ops(dataset, keys, length)
734
+ else:
735
+ packer = SequenceDatasetPacker(length, spacing=0, queue_size=10)
736
+ return packer(dataset, cycle_length=10, keys=keys)
737
+
738
+
739
+ def _pack_with_custom_ops(dataset, keys, length):
740
+ """Helper-function for packing a dataset which has already been batched.
741
+
742
+ See pack_dataset()
743
+
744
+ Relies on custom ops which require a custom compiled binary.
745
+ Faster than _pack_with_tf_ops(), and denser packing.
746
+
747
+ Args:
748
+ dataset: a dataset containing padded batches of examples.
749
+ keys: a list of strings (must have length 2)
750
+ length: an integer
751
+
752
+ Returns:
753
+ a dataset.
754
+ """
755
+ from TensorFlow.nlp.transformer.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top
756
+
757
+ # trim to length
758
+ dataset = dataset.map(lambda x: {k: x[k][:length] for k in keys})
759
+ # Setting batch_size=length ensures that the concatenated sequences (if they
760
+ # have length >=1) are sufficient to fill at least one packed example.
761
+ batch_size = length
762
+ dataset = dataset.padded_batch(
763
+ batch_size, padded_shapes={k: [-1] for k in keys})
764
+
765
+ # better packing (may be faster) but requires custom-built binary.
766
+ k1, k2 = keys
767
+ def map_fn_custom(x):
768
+ """Map-function."""
769
+ (k1_packed, k1_segmengation, k1_position,
770
+ k2_packed, k2_segmentation, k2_position) = (
771
+ pack_sequences_ops.pack_sequences2(x[k1], x[k2], length, length))
772
+ packed = {
773
+ k1: k1_packed,
774
+ k1 + "_segmentation": k1_segmengation,
775
+ k1 + "_position": k1_position,
776
+ k2: k2_packed,
777
+ k2 + "_segmentation": k2_segmentation,
778
+ k2 + "_position": k2_position,
779
+ }
780
+ return tf.data.Dataset.from_tensor_slices(packed)
781
+ dataset = dataset.flat_map(map_fn_custom)
782
+ return dataset
783
+
784
+
785
+ INDEX_DTYPE = tf.int32
786
+
787
+
788
+ class SequenceDatasetPacker(object):
789
+ """Helper class for packing a dataset of sequences in an online fashon.
790
+
791
+ The input sequence is expected to be a tuple of 1D Tensors which will be
792
+ converted to a dataset which produces a dict of packed examples, example
793
+ positions, and segment ids.
794
+
795
+ If `window_size` or `cycle_length` is specified multiple packing operations
796
+ will be performed in parallel to increase throughput. A value of None will
797
+ select default parallelism parameters. If this dataset will be run on a TPU,
798
+ specifying a cycle_length > 10 is recommended.
799
+ """
800
+
801
+ def __init__(self, packed_length=256, spacing=0, queue_size=10,
802
+ chop_long_sequences=False):
803
+ self._packed_length = packed_length
804
+ self._spacing = spacing
805
+ self._queue_size = queue_size
806
+ self._chop_long_sequences = chop_long_sequences
807
+ self._num_sequences = None
808
+ self._token_dtype = None
809
+
810
+ def __call__(self, dataset, **kwargs):
811
+ if {"window_size", "cycle_length"}.intersection(kwargs):
812
+ return self._concurrent_pack(dataset, **kwargs)
813
+ return self._pack(dataset, **kwargs)
814
+
815
+ def _concurrent_pack(self, dataset, window_size=None, cycle_length=None,
816
+ keys=None):
817
+ """Selects sensible default parallelism parameters based for a task."""
818
+
819
+ if window_size is None:
820
+ # This is a heuristic to fill all of the queues 10 times, and should do a
821
+ # reasonable job balancing parallelism (which benefits from lower window
822
+ # size) with packing efficiency (which suffers from edge effects when the
823
+ # window size is too low.)
824
+ window_size = int(self._packed_length / 8 * self._queue_size * 10)
825
+
826
+ if cycle_length is None:
827
+ # Typically binning one stream will saturate about 3 cores.
828
+
829
+ # Note on TPUs:
830
+ # cycle_length should still be explicitly set when training on TPUs,
831
+ # since the cpu count will be the local CPU count (which could be quite
832
+ # small), wereas the transforms will actually run on the TPU host
833
+ # controller which has a very robust CPU.
834
+ cycle_length = max([int(multiprocessing.cpu_count() / 3), 1])
835
+ return self._pack(dataset, window_size=window_size,
836
+ cycle_length=cycle_length, keys=keys)
837
+
838
+ def _pack(self, dataset, window_size=None, cycle_length=None,
839
+ deterministic=False, keys=None):
840
+ """Main method for chaining together packing transformation steps."""
841
+ (dataset, self._num_sequences, self._token_dtype, keys
842
+ ) = self._standardize(dataset, keys)
843
+ if window_size is None:
844
+ dataset = self._scanning_pack(dataset)
845
+ else:
846
+ # Dataset.window splits nested Tensors.
847
+ re_zip = lambda *x: tf.data.Dataset.zip(x)
848
+ dataset = dataset.window(window_size).map(re_zip).interleave(
849
+ self._scanning_pack, cycle_length=cycle_length,
850
+ block_length=window_size,
851
+ num_parallel_calls=tf.data.experimental.AUTOTUNE)
852
+
853
+ if not deterministic:
854
+ # Sloppy interleave offers a marginal performance improvement.
855
+ options = tf.data.Options()
856
+ options.experimental_deterministic = False
857
+ dataset = dataset.with_options(options)
858
+
859
+ dataset = dataset.map(
860
+ self._finalize, num_parallel_calls=tf.data.experimental.AUTOTUNE)
861
+ self._num_sequences, self._token_dtype = None, None
862
+
863
+ if keys:
864
+ def dict_pack(example):
865
+ output = {}
866
+ for i, key in enumerate(keys):
867
+ output[key] = example["contents"][:, i]
868
+ output[key + "_segmentation"] = example["segment"][:, i]
869
+ output[key + "_position"] = example["position"][:, i]
870
+ return output
871
+ dataset = dataset.map(dict_pack)
872
+ return dataset
873
+
874
+ def _standardize(self, dataset, keys):
875
+ """Force dataset structure into a tuple of Tensors."""
876
+ shapes = tf.data.get_output_shapes(dataset)
877
+
878
+ if isinstance(shapes, dict):
879
+ keys = keys or tuple(shapes.keys())
880
+ dataset = dataset.map(lambda x: tuple(x[k] for k in keys))
881
+ shapes = tf.data.get_output_shapes(dataset)
882
+
883
+ if not all(isinstance(i, tf.TensorShape) for i in shapes):
884
+ # Internally this class expects tuples of Tensors, even for the degenerate
885
+ # case of a single sequence.
886
+ dataset = dataset.map(lambda x: (x,))
887
+ shapes = tf.data.get_output_shapes(dataset)
888
+
889
+ for s in shapes:
890
+ if not s.is_compatible_with(tf.TensorShape([None])):
891
+ raise ValueError("Tensors to be packed must be one-dimensional.")
892
+
893
+ if not shapes:
894
+ raise ValueError("Expected sequence dataset.")
895
+
896
+ if self._chop_long_sequences and len(shapes) != 1:
897
+ raise ValueError("chop_long_sequences expects a single sequence dataset.")
898
+
899
+ token_types = tf.data.get_output_types(dataset)
900
+ if len(set(token_types)) > 1:
901
+ raise ValueError("Inconsistent dtypes: {}".format(token_types))
902
+
903
+ return dataset, len(shapes), token_types[0], keys
904
+
905
+ def _eviction_fn(self, _):
906
+ return tuple(-tf.ones((self._packed_length,), dtype=self._token_dtype)
907
+ for _ in range(self._num_sequences))
908
+
909
+ def _scan_initial_state(self):
910
+ """Create TensorArrays and indices to track bin assignment.
911
+
912
+ availability: TensorArray[queue_size, num_sequences]
913
+ This represents the number of tokens available in the ith bin.
914
+ See implementation note below.
915
+
916
+ contents: TensorArray[queue_size, num_sequences * 2]
917
+ This holds the actual contents of the packed strings as well as a bit
918
+ mask indicating where sequences begin. It is stored in a flat vector and
919
+ is accessed in offsets of packed_length.
920
+
921
+ top_index: scalar [0, queue_size)
922
+ Integer tensor indicating which index is the "top" bin. See implementation
923
+ note below.
924
+
925
+ IMPLEMENTATION_NOTE:
926
+ The FFD algorithm periodically pops the topmost queue and pushes a new
927
+ one to replace it. In order to replicate those semantics with a fixed size
928
+ TensorArray, indexing operations are shifted by top_index. For example,
929
+ instead of:
930
+ `queue_available.read(i)`
931
+
932
+ a read is instead performed as:
933
+ `queue_available.read((i - top_index) % queue_size)`
934
+
935
+ to account for the fact that the "ith" logical FFD queue is stored at
936
+ position j. This means that the pop / push update can be performed by
937
+ simply incrementing top_index. (And zeroing the old top_index position.)
938
+
939
+ Returns:
940
+ The state for the binning scan.
941
+ """
942
+
943
+ all_available = tf.ones((self._queue_size, self._num_sequences),
944
+ dtype=INDEX_DTYPE) * self._packed_length
945
+ total_size = self._packed_length * self._queue_size
946
+ total_size_range = tf.range(total_size, dtype=INDEX_DTYPE)
947
+ empty = tf.zeros((total_size, self._num_sequences * 2),
948
+ dtype=self._token_dtype)
949
+
950
+ availability = tf.TensorArray(
951
+ dtype=INDEX_DTYPE, size=self._queue_size, dynamic_size=False,
952
+ clear_after_read=False, element_shape=(self._num_sequences,)
953
+ ).scatter(tf.range(self._queue_size, dtype=INDEX_DTYPE), all_available)
954
+
955
+ contents = tf.TensorArray(
956
+ dtype=self._token_dtype, size=total_size, dynamic_size=False,
957
+ clear_after_read=False, element_shape=(self._num_sequences * 2,)
958
+ ).scatter(total_size_range, empty)
959
+
960
+ # Which index should be considered the "top" bucket for the purpose of
961
+ # the first-fit descending algorithm.
962
+ top_index = tf.zeros((), dtype=INDEX_DTYPE)
963
+
964
+ return availability, contents, top_index
965
+
966
+ def _scanning_pack(self, dataset):
967
+ """Apply scan based pack to a dataset."""
968
+ if self._chop_long_sequences:
969
+ dataset = dataset.map(lambda x: (x[:self._packed_length],))
970
+ else:
971
+ dataset = dataset.filter(lambda *x: tf.reduce_max( # pylint: disable=g-long-lambda
972
+ tf.stack([tf.shape(i)[0] for i in x]), axis=0) <= self._packed_length)
973
+
974
+ # In order to retrieve the sequences which are still in the queue when the
975
+ # dataset is exhausted, we feed dummy sequences which are guaranteed to
976
+ # displace the remaining elements.
977
+ dataset = dataset.concatenate(
978
+ tf.data.Dataset.range(self._queue_size).map(self._eviction_fn))
979
+
980
+ initial_state = self._scan_initial_state()
981
+ step_fn = functools.partial(
982
+ tf.autograph.to_graph(_scan_step_fn), packed_length=self._packed_length,
983
+ queue_size=self._queue_size, spacing=self._spacing,
984
+ num_sequences=self._num_sequences, token_dtype=self._token_dtype)
985
+
986
+ dataset = dataset.apply(tf.data.experimental.scan(initial_state, step_fn))
987
+
988
+ is_real_sample = lambda valid_sample, _: valid_sample
989
+ return dataset.filter(is_real_sample)
990
+
991
+ def _compute_auxiliary_structure(self, contents_and_mask):
992
+ """Compute segment and position metadata."""
993
+ contents = contents_and_mask[:, :self._num_sequences]
994
+ start_mask = tf.cast(contents_and_mask[:, self._num_sequences:],
995
+ dtype=INDEX_DTYPE)
996
+
997
+ segment = tf.cumsum(start_mask, axis=0)
998
+ uniform_count = tf.ones_like(segment[:, 0])
999
+ position = []
1000
+ for i in range(self._num_sequences):
1001
+ segment_slice = segment[:, i]
1002
+ counts = tf.math.segment_sum(uniform_count, segment[:, i])
1003
+ position.append(tf.range(self._packed_length) - tf.cumsum(
1004
+ tf.gather(counts, segment_slice - 1) * start_mask[:, i]))
1005
+ position = tf.concat([i[:, tf.newaxis] for i in position], axis=1)
1006
+
1007
+ # Correct for padding tokens.
1008
+ pad_mask = tf.cast(tf.not_equal(contents, 0), dtype=INDEX_DTYPE)
1009
+ segment *= pad_mask
1010
+ position *= pad_mask
1011
+
1012
+ return segment, position
1013
+
1014
+ def _finalize(self, _, contents):
1015
+ """Structure output and compute segment and position metadata."""
1016
+
1017
+ # The output shape information is lost during the filter; however we can
1018
+ # guarantee the shape. (That's the point of this exercise, after all!)
1019
+ contents.set_shape((self._packed_length, self._num_sequences * 2))
1020
+
1021
+ # Both the dummy branch of the scan step function and the eviction dataset
1022
+ # use vectors of minus one. The cost of this check is negligible and the
1023
+ # leakage of such dummy sequences would be difficult to debug downstream.
1024
+ check_leaks = tf.assert_none_equal(contents, -tf.ones_like(contents))
1025
+ with tf.control_dependencies([check_leaks]):
1026
+ contents = tf.identity(contents)
1027
+
1028
+ segment, position = self._compute_auxiliary_structure(contents)
1029
+ return {"contents": contents[:, :self._num_sequences],
1030
+ "segment": segment, "position": position}
1031
+
1032
+
1033
+ def _scan_step_fn(state, example, packed_length, queue_size, spacing,
1034
+ num_sequences, token_dtype): # pylint: disable=g-doc-args
1035
+ """Transform function used by tf.data.experimental.scan to process an example.
1036
+
1037
+ This is written as a stateless function rather than a class method because we
1038
+ trace it with AutoGraph (in order to simplify the conditional), and this way
1039
+ we don't have to worry about handling re-tracing semantics.
1040
+
1041
+ Args:
1042
+ See the SequenceDatasetPacker class.
1043
+
1044
+ Returns:
1045
+ The updated queue state, and either a packed example or a dummy sequence
1046
+ which will be filtered out downstream.
1047
+ """
1048
+
1049
+ # Convert TensorArray tuples to lists since we'll need to replace them.
1050
+ availability, contents, top_index = state
1051
+
1052
+ lengths = tf.concat([tf.shape(i) for i in example], axis=0)
1053
+ start_availability = availability.stack()
1054
+ can_fit = tf.reduce_all(tf.greater_equal(start_availability, lengths), axis=1)
1055
+ any_can_fit = tf.reduce_any(can_fit, axis=0)
1056
+
1057
+ # AutoGraph will convert this block to a tf.cond
1058
+ if any_can_fit:
1059
+ # This indicates where in the FFD queue rotation a given index sits
1060
+ shifted_range = (
1061
+ tf.range(queue_size, dtype=INDEX_DTYPE) - top_index) % queue_size
1062
+
1063
+ # Mark any indices which cannot accommodate the current example.
1064
+ exclusion_mask = tf.cast(tf.logical_not(can_fit), INDEX_DTYPE) * queue_size
1065
+
1066
+ # Index in [0, queue_size) in which to place the sample. Note, this index
1067
+ # is the position in the actual TensorArray, not the index of the FFD queue.
1068
+ queue_index = (tf.reduce_min(shifted_range + exclusion_mask) +
1069
+ top_index) % queue_size
1070
+
1071
+ # NOTE(taylorrobie): We emit a non-empty Tensor for downstream checks.
1072
+ output_contents = -tf.ones((1, num_sequences), dtype=token_dtype)
1073
+
1074
+ else:
1075
+ index_range = top_index * packed_length + tf.range(packed_length)
1076
+ output_contents = contents.gather(index_range)
1077
+
1078
+ # Reset the queue state.
1079
+ availability = availability.write(
1080
+ top_index, packed_length * tf.ones((num_sequences,), dtype=INDEX_DTYPE))
1081
+ empty_contents = tf.zeros((packed_length, num_sequences * 2),
1082
+ dtype=token_dtype)
1083
+ contents = contents.scatter(index_range, empty_contents)
1084
+
1085
+ queue_index = top_index
1086
+ top_index = (top_index + 1) % queue_size
1087
+
1088
+ pre_assign_availability = availability.read(queue_index)
1089
+ space_left = pre_assign_availability - lengths - spacing
1090
+ availability = availability.write(queue_index, space_left)
1091
+
1092
+ # ============================================================================
1093
+ # == Update contents =========================================================
1094
+ # ============================================================================
1095
+ # Consider the following case for a seq-to-seq packing:
1096
+ # (padding is represented as underscores)
1097
+ #
1098
+ # Queue starting state:
1099
+ # [1, 3, 2, 4, 6, 1, _, _, _, _, _, ...]
1100
+ # [5, 9, _, _, _, _, _, _, _, _, _, ...]
1101
+ #
1102
+ # Examples:
1103
+ # [4, 2, 4], [3]
1104
+ #
1105
+ # Desired new queue state:
1106
+ # [1, 3, 2, 4, 6, 1, _, _, 4, 2, 4, _, _, ...]
1107
+ # [5, 9, _, _, 3, _, _, _, _, _, _, _, _, ...]
1108
+ #
1109
+ # This could be acomplished by creating a TensorArray for each of the two
1110
+ # sequences, and scattering into the respective arrays. However TensorArray
1111
+ # writes are extremely expensive relative to other operations. So instead we
1112
+ # store the contents in a single TensorArray of shape (packed_length, 2), and
1113
+ # we pad and concatenate the examples such that they can be added in a single
1114
+ # assign:
1115
+ #
1116
+ # [_, _, _, _, 4, 2, 4]
1117
+ # [3, _, _, _, _, _, _]
1118
+ # +
1119
+ # [1, 3, 2, 4, 6, 1, _, _, _, _, _, ...]
1120
+ # [5, 9, _, _, _, _, _, _, _, _, _, ...]
1121
+ #
1122
+ # And in practice, the extra work of padding is neglidgable compared to
1123
+ # the gain from vectorizing the TensorArray assign. We also store a bit mask
1124
+ # denoting where sequences start which is used to compute segment and
1125
+ # position metadata:
1126
+ #
1127
+ # [_, _, _, _, 1, _, _]
1128
+ # [1, _, _, _, _, _, _]
1129
+ # +
1130
+ # [1, _, _, _, _, _, _, _, _, _, _, ...]
1131
+ # [1, _, _, _, _, _, _, _, _, _, _, ...]
1132
+ #
1133
+ # Both the contents and the mask are concatenated in the same TensorArray
1134
+ # for performance.
1135
+
1136
+ start_index = packed_length - pre_assign_availability
1137
+ end_index = start_index + lengths
1138
+ leftmost = tf.reduce_min(start_index, axis=0)
1139
+ rightmost = tf.reduce_max(end_index, axis=0)
1140
+ delta = rightmost - leftmost
1141
+ pad_indices = [tf.stack((start_index[i] - leftmost, rightmost - end_index[i]))
1142
+ for i in range(num_sequences)]
1143
+
1144
+ padded_examples = [tf.pad(ex, padding[tf.newaxis, :])
1145
+ for ex, padding in zip(example, pad_indices)]
1146
+ padded_examples = tf.transpose(tf.stack(padded_examples))
1147
+ mask_update = tf.one_hot(start_index - leftmost, delta,
1148
+ dtype=contents.dtype, axis=0)
1149
+
1150
+ content_update = tf.concat([padded_examples, mask_update], axis=1)
1151
+
1152
+ index_range = (queue_index * packed_length + # Offset into the right section.
1153
+ tf.range(delta, dtype=INDEX_DTYPE) + leftmost)
1154
+ contents = contents.scatter(index_range, contents.gather(index_range) +
1155
+ content_update)
1156
+
1157
+ state = (availability, contents, top_index)
1158
+ return state, (tf.logical_not(any_can_fit), output_contents)
1159
+
1160
+
1161
+ def make_tmp_dir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
1162
+ """Make a temporary directory."""
1163
+ if dir is None:
1164
+ return tempfile.mkdtemp(suffix, prefix, dir)
1165
+ else:
1166
+ while True:
1167
+ rand_term = random.randint(1, 9999)
1168
+ tmp_dir = os.path.join(dir, "%s%d%s" % (prefix, rand_term, suffix))
1169
+ if tf.gfile.Exists(tmp_dir):
1170
+ continue
1171
+ tf.gfile.MakeDirs(tmp_dir)
1172
+ break
1173
+ return tmp_dir
1174
+
1175
+
1176
+ def tfrecord_iterator_for_problem(problem, data_dir,
1177
+ dataset_split=tf.estimator.ModeKeys.TRAIN):
1178
+ """Iterate over the records on disk for the Problem."""
1179
+ filenames = tf.gfile.Glob(problem.filepattern(data_dir, mode=dataset_split))
1180
+ example_spec = problem.example_reading_spec()[0]
1181
+ return tfrecord_iterator(filenames, example_spec=example_spec)
1182
+
1183
+
1184
+ def tfrecord_iterator(filenames, gzipped=False, example_spec=None):
1185
+ """Yields records from TFRecord files.
1186
+
1187
+ Args:
1188
+ filenames: list<str>, list of TFRecord filenames to read from.
1189
+ gzipped: bool, whether the TFRecord files are gzip-encoded.
1190
+ example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
1191
+ if provided, will parse each record as a tensorflow.Example proto.
1192
+
1193
+ Yields:
1194
+ Records (or parsed Examples, if example_spec is provided) from files.
1195
+ """
1196
+ with tf.Graph().as_default():
1197
+ dataset = tf.data.Dataset.from_tensor_slices(filenames)
1198
+
1199
+ def _load_records(filename):
1200
+ return tf.data.TFRecordDataset(
1201
+ filename,
1202
+ compression_type=tf.constant("GZIP") if gzipped else None,
1203
+ buffer_size=16 * 1000 * 1000)
1204
+
1205
+ dataset = dataset.flat_map(_load_records)
1206
+
1207
+ def _parse_example(ex_ser):
1208
+ return tf.parse_single_example(ex_ser, example_spec)
1209
+
1210
+ if example_spec:
1211
+ dataset = dataset.map(_parse_example, num_parallel_calls=32)
1212
+ dataset = dataset.prefetch(100)
1213
+ record_it = dataset.make_one_shot_iterator().get_next()
1214
+
1215
+ with tf.Session() as sess:
1216
+ while True:
1217
+ try:
1218
+ ex = sess.run(record_it)
1219
+ yield ex
1220
+ except tf.errors.OutOfRangeError:
1221
+ break
1222
+
1223
+
1224
+ def random_deinterleave(text, separator_symbol="X"):
1225
+ """Create a fill-in-the-blanks training example from text.
1226
+
1227
+ Split on spaces, then cut into segments at random points. Alternate segments
1228
+ are assigned to the two output strings. separator_symbol separates segments
1229
+ within each of the outputs.
1230
+
1231
+ example:
1232
+ text="The quick brown fox jumps over the lazy dog."
1233
+ returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.")
1234
+
1235
+ The two outputs can also be reversed to yield an instance of the same problem.
1236
+
1237
+ Args:
1238
+ text: a string
1239
+ separator_symbol: a string
1240
+ Returns:
1241
+ a pair of strings
1242
+ """
1243
+ words = text.strip().split(" ")
1244
+ n = len(words)
1245
+ if n <= 1:
1246
+ return text, ""
1247
+ cut = [False] * n
1248
+ cut[0] = True
1249
+ num_cuts = int(math.exp(random.uniform(0, math.log(n))))
1250
+ for _ in range(num_cuts):
1251
+ cut[random.randint(1, n -1)] = True
1252
+ out = [[], []]
1253
+ part = random.randint(0, 1)
1254
+ for i in range(n):
1255
+ if cut[i]:
1256
+ out[part].append(separator_symbol)
1257
+ part = 1 - part
1258
+ out[part].append(words[i])
1259
+ return " ".join(out[0]), " ".join(out[1])
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder.py ADDED
@@ -0,0 +1,1064 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Encoders for text data.
17
+
18
+ * TextEncoder: base class
19
+ * ByteTextEncoder: for ascii text
20
+ * TokenTextEncoder: with user-supplied vocabulary file
21
+ * SubwordTextEncoder: invertible
22
+ """
23
+ from __future__ import absolute_import
24
+ from __future__ import division
25
+ from __future__ import print_function
26
+
27
+ import collections
28
+ from itertools import chain
29
+ import math
30
+ import re
31
+ import tempfile
32
+ import time
33
+ import numpy as np
34
+ import six
35
+ from six.moves import range # pylint: disable=redefined-builtin
36
+ from TensorFlow.nlp.transformer.data_generators import tokenizer
37
+
38
+ import tensorflow.compat.v1 as tf
39
+
40
+ # Reserved tokens for things like padding and EOS symbols.
41
+ PAD = "<pad>"
42
+ EOS = "<EOS>"
43
+ RESERVED_TOKENS = [PAD, EOS]
44
+ NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
45
+ PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0
46
+ EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1
47
+
48
+ if six.PY2:
49
+ RESERVED_TOKENS_BYTES = RESERVED_TOKENS
50
+ else:
51
+ RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
52
+
53
+ # Regular expression for unescaping token strings.
54
+ # '\u' is converted to '_'
55
+ # '\\' is converted to '\'
56
+ # '\213;' is converted to unichr(213)
57
+ _UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
58
+ _ESCAPE_CHARS = set(u"\\_u;0123456789")
59
+
60
+
61
+ # Unicode utility functions that work with Python 2 and 3
62
+ def native_to_unicode(s):
63
+ if is_unicode(s):
64
+ return s
65
+ try:
66
+ return to_unicode(s)
67
+ except UnicodeDecodeError:
68
+ res = to_unicode(s, ignore_errors=True)
69
+ tf.logging.info("Ignoring Unicode error, outputting: %s" % res)
70
+ return res
71
+
72
+
73
+ def unicode_to_native(s):
74
+ if six.PY2:
75
+ return s.encode("utf-8") if is_unicode(s) else s
76
+ else:
77
+ return s
78
+
79
+
80
+ def is_unicode(s):
81
+ return isinstance(s, six.text_type)
82
+
83
+
84
+ def to_unicode(s, ignore_errors=False):
85
+ if is_unicode(s):
86
+ return s
87
+ error_mode = "ignore" if ignore_errors else "strict"
88
+ return s.decode("utf-8", errors=error_mode)
89
+
90
+
91
+ def to_unicode_ignore_errors(s):
92
+ return to_unicode(s, ignore_errors=True)
93
+
94
+
95
+ def to_unicode_utf8(s):
96
+ return unicode(s, "utf-8") if six.PY2 else s.decode("utf-8")
97
+
98
+
99
+ def strip_ids(ids, ids_to_strip):
100
+ """Strip ids_to_strip from the end ids."""
101
+ ids = list(ids)
102
+ while ids and ids[-1] in ids_to_strip:
103
+ ids.pop()
104
+ return ids
105
+
106
+
107
+ class TextEncoder(object):
108
+ """Base class for converting from ints to/from human readable strings."""
109
+
110
+ def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS):
111
+ self._num_reserved_ids = num_reserved_ids
112
+
113
+ @property
114
+ def num_reserved_ids(self):
115
+ return self._num_reserved_ids
116
+
117
+ def encode(self, s):
118
+ """Transform a human-readable string into a sequence of int ids.
119
+
120
+ The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
121
+ num_reserved_ids) are reserved.
122
+
123
+ EOS is not appended.
124
+
125
+ Args:
126
+ s: human-readable string to be converted.
127
+
128
+ Returns:
129
+ ids: list of integers
130
+ """
131
+ return [int(w) + self._num_reserved_ids for w in s.split()]
132
+
133
+ def decode(self, ids, strip_extraneous=False):
134
+ """Transform a sequence of int ids into a human-readable string.
135
+
136
+ EOS is not expected in ids.
137
+
138
+ Args:
139
+ ids: list of integers to be converted.
140
+ strip_extraneous: bool, whether to strip off extraneous tokens
141
+ (EOS and PAD).
142
+
143
+ Returns:
144
+ s: human-readable string.
145
+ """
146
+ if strip_extraneous:
147
+ ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
148
+ return " ".join(self.decode_list(ids))
149
+
150
+ def decode_list(self, ids):
151
+ """Transform a sequence of int ids into a their string versions.
152
+
153
+ This method supports transforming individual input/output ids to their
154
+ string versions so that sequence to/from text conversions can be visualized
155
+ in a human readable format.
156
+
157
+ Args:
158
+ ids: list of integers to be converted.
159
+
160
+ Returns:
161
+ strs: list of human-readable string.
162
+ """
163
+ decoded_ids = []
164
+ for id_ in ids:
165
+ if 0 <= id_ < self._num_reserved_ids:
166
+ decoded_ids.append(RESERVED_TOKENS[int(id_)])
167
+ else:
168
+ decoded_ids.append(id_ - self._num_reserved_ids)
169
+ return [str(d) for d in decoded_ids]
170
+
171
+ @property
172
+ def vocab_size(self):
173
+ raise NotImplementedError()
174
+
175
+
176
+ class ByteTextEncoder(TextEncoder):
177
+ """Encodes each byte to an id. For 8-bit strings only."""
178
+
179
+ def encode(self, s):
180
+ numres = self._num_reserved_ids
181
+ if six.PY2:
182
+ if isinstance(s, unicode):
183
+ s = s.encode("utf-8")
184
+ return [ord(c) + numres for c in s]
185
+ # Python3: explicitly convert to UTF-8
186
+ return [c + numres for c in s.encode("utf-8")]
187
+
188
+ def decode(self, ids, strip_extraneous=False):
189
+ if strip_extraneous:
190
+ ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
191
+ numres = self._num_reserved_ids
192
+ decoded_ids = []
193
+ int2byte = six.int2byte
194
+ for id_ in ids:
195
+ if 0 <= id_ < numres:
196
+ decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])
197
+ else:
198
+ decoded_ids.append(int2byte(id_ - numres))
199
+ if six.PY2:
200
+ return "".join(decoded_ids)
201
+ # Python3: join byte arrays and then decode string
202
+ return b"".join(decoded_ids).decode("utf-8", "replace")
203
+
204
+ def decode_list(self, ids):
205
+ numres = self._num_reserved_ids
206
+ decoded_ids = []
207
+ int2byte = six.int2byte
208
+ for id_ in ids:
209
+ if 0 <= id_ < numres:
210
+ decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)])
211
+ else:
212
+ decoded_ids.append(int2byte(id_ - numres))
213
+ # Python3: join byte arrays and then decode string
214
+ return decoded_ids
215
+
216
+ @property
217
+ def vocab_size(self):
218
+ return 2**8 + self._num_reserved_ids
219
+
220
+
221
+ class ClassLabelEncoder(TextEncoder):
222
+ """Encoder for class labels."""
223
+
224
+ def __init__(self, class_labels=None, class_labels_fname=None):
225
+ super(ClassLabelEncoder, self).__init__(num_reserved_ids=0)
226
+
227
+ if class_labels_fname:
228
+ with tf.gfile.Open(class_labels_fname) as f:
229
+ class_labels = [label.strip() for label in f.readlines()]
230
+
231
+ assert class_labels
232
+ self._class_labels = class_labels
233
+
234
+ def encode(self, s):
235
+ label_str = s
236
+ return self._class_labels.index(label_str)
237
+
238
+ def decode(self, ids, strip_extraneous=False):
239
+ del strip_extraneous
240
+ label_id = ids
241
+ if isinstance(label_id, list):
242
+ assert len(label_id) == 1
243
+ label_id, = label_id
244
+ if isinstance(label_id, np.ndarray):
245
+ label_id = np.squeeze(label_id)
246
+ return self._class_labels[label_id]
247
+
248
+ def decode_list(self, ids):
249
+ return [self._class_labels[i] for i in ids]
250
+
251
+ @property
252
+ def vocab_size(self):
253
+ return len(self._class_labels)
254
+
255
+
256
+ class OneHotClassLabelEncoder(ClassLabelEncoder):
257
+ """One-hot encoder for class labels."""
258
+
259
+ def encode(self, label_str, on_value=1, off_value=0): # pylint: disable=arguments-differ
260
+ e = np.full(self.vocab_size, off_value, dtype=np.int32)
261
+ e[self._class_labels.index(label_str)] = on_value
262
+ return e.tolist()
263
+
264
+ def decode(self, ids, strip_extraneous=False):
265
+ del strip_extraneous
266
+ label_id = ids
267
+ if isinstance(label_id, np.ndarray):
268
+ label_id = np.squeeze(label_id).astype(np.int8).tolist()
269
+ assert isinstance(label_id, list)
270
+ assert len(label_id) == self.vocab_size
271
+ return self._class_labels[label_id.index(1)]
272
+
273
+ @property
274
+ def vocab_size(self):
275
+ return len(self._class_labels)
276
+
277
+
278
+ class TokenTextEncoder(TextEncoder):
279
+ """Encoder based on a user-supplied vocabulary (file or list)."""
280
+
281
+ def __init__(self,
282
+ vocab_filename,
283
+ reverse=False,
284
+ vocab_list=None,
285
+ replace_oov=None,
286
+ num_reserved_ids=NUM_RESERVED_TOKENS):
287
+ """Initialize from a file or list, one token per line.
288
+
289
+ Handling of reserved tokens works as follows:
290
+ - When initializing from a list, we add reserved tokens to the vocab.
291
+ - When initializing from a file, we do not add reserved tokens to the vocab.
292
+ - When saving vocab files, we save reserved tokens to the file.
293
+
294
+ Args:
295
+ vocab_filename: If not None, the full filename to read vocab from. If this
296
+ is not None, then vocab_list should be None.
297
+ reverse: Boolean indicating if tokens should be reversed during encoding
298
+ and decoding.
299
+ vocab_list: If not None, a list of elements of the vocabulary. If this is
300
+ not None, then vocab_filename should be None.
301
+ replace_oov: If not None, every out-of-vocabulary token seen when
302
+ encoding will be replaced by this string (which must be in vocab).
303
+ num_reserved_ids: Number of IDs to save for reserved tokens like <EOS>.
304
+ """
305
+ super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids)
306
+ self._reverse = reverse
307
+ self._replace_oov = replace_oov
308
+ if vocab_filename:
309
+ self._init_vocab_from_file(vocab_filename)
310
+ else:
311
+ assert vocab_list is not None
312
+ self._init_vocab_from_list(vocab_list)
313
+
314
+ def encode(self, s):
315
+ """Converts a space-separated string of tokens to a list of ids."""
316
+ sentence = s
317
+ tokens = sentence.strip().split()
318
+ if self._replace_oov is not None:
319
+ tokens = [t if t in self._token_to_id else self._replace_oov
320
+ for t in tokens]
321
+ ret = [self._token_to_id[tok] for tok in tokens]
322
+ return ret[::-1] if self._reverse else ret
323
+
324
+ def decode(self, ids, strip_extraneous=False):
325
+ return " ".join(self.decode_list(ids))
326
+
327
+ def decode_list(self, ids):
328
+ seq = reversed(ids) if self._reverse else ids
329
+ return [self._safe_id_to_token(i) for i in seq]
330
+
331
+ @property
332
+ def vocab_size(self):
333
+ return len(self._id_to_token)
334
+
335
+ def _safe_id_to_token(self, idx):
336
+ return self._id_to_token.get(idx, "ID_%d" % idx)
337
+
338
+ def _init_vocab_from_file(self, filename):
339
+ """Load vocab from a file.
340
+
341
+ Args:
342
+ filename: The file to load vocabulary from.
343
+ """
344
+ with tf.gfile.Open(filename) as f:
345
+ tokens = [token.strip() for token in f.readlines()]
346
+
347
+ def token_gen():
348
+ for token in tokens:
349
+ yield token
350
+
351
+ self._init_vocab(token_gen(), add_reserved_tokens=False)
352
+
353
+ def _init_vocab_from_list(self, vocab_list):
354
+ """Initialize tokens from a list of tokens.
355
+
356
+ It is ok if reserved tokens appear in the vocab list. They will be
357
+ removed. The set of tokens in vocab_list should be unique.
358
+
359
+ Args:
360
+ vocab_list: A list of tokens.
361
+ """
362
+ def token_gen():
363
+ for token in vocab_list:
364
+ if token not in RESERVED_TOKENS:
365
+ yield token
366
+
367
+ self._init_vocab(token_gen())
368
+
369
+ def _init_vocab(self, token_generator, add_reserved_tokens=True):
370
+ """Initialize vocabulary with tokens from token_generator."""
371
+
372
+ self._id_to_token = {}
373
+ non_reserved_start_index = 0
374
+
375
+ if add_reserved_tokens:
376
+ self._id_to_token.update(enumerate(RESERVED_TOKENS))
377
+ non_reserved_start_index = len(RESERVED_TOKENS)
378
+
379
+ self._id_to_token.update(
380
+ enumerate(token_generator, start=non_reserved_start_index))
381
+
382
+ # _token_to_id is the reverse of _id_to_token
383
+ self._token_to_id = dict((v, k)
384
+ for k, v in six.iteritems(self._id_to_token))
385
+
386
+ def store_to_file(self, filename):
387
+ """Write vocab file to disk.
388
+
389
+ Vocab files have one token per line. The file ends in a newline. Reserved
390
+ tokens are written to the vocab file as well.
391
+
392
+ Args:
393
+ filename: Full path of the file to store the vocab to.
394
+ """
395
+ with tf.gfile.Open(filename, "w") as f:
396
+ for i in range(len(self._id_to_token)):
397
+ f.write(self._id_to_token[i] + "\n")
398
+
399
+
400
+ def _escape_token(token, alphabet):
401
+ """Escape away underscores and OOV characters and append '_'.
402
+
403
+ This allows the token to be expressed as the concatenation of a list
404
+ of subtokens from the vocabulary. The underscore acts as a sentinel
405
+ which allows us to invertibly concatenate multiple such lists.
406
+
407
+ Args:
408
+ token: A unicode string to be escaped.
409
+ alphabet: A set of all characters in the vocabulary's alphabet.
410
+
411
+ Returns:
412
+ escaped_token: An escaped unicode string.
413
+
414
+ Raises:
415
+ ValueError: If the provided token is not unicode.
416
+ """
417
+ if not isinstance(token, six.text_type):
418
+ raise ValueError("Expected string type for token, got %s" % type(token))
419
+
420
+ token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
421
+ ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
422
+ return u"".join(ret) + "_"
423
+
424
+
425
+ def _unescape_token(escaped_token):
426
+ """Inverse of _escape_token().
427
+
428
+ Args:
429
+ escaped_token: a unicode string
430
+
431
+ Returns:
432
+ token: a unicode string
433
+ """
434
+
435
+ def match(m):
436
+ if m.group(1) is None:
437
+ return u"_" if m.group(0) == u"\\u" else u"\\"
438
+
439
+ try:
440
+ return six.unichr(int(m.group(1)))
441
+ except (ValueError, OverflowError) as _:
442
+ return u"\u3013" # Unicode for undefined character.
443
+
444
+ trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
445
+ return _UNESCAPE_REGEX.sub(match, trimmed)
446
+
447
+
448
+ class SubwordTextEncoder(TextEncoder):
449
+ """Class for invertibly encoding text using a limited vocabulary.
450
+
451
+ Invertibly encodes a native string as a sequence of subtokens from a limited
452
+ vocabulary.
453
+
454
+ A SubwordTextEncoder is built from a corpus (so it is tailored to the text in
455
+ the corpus), and stored to a file. See text_encoder_build_subword.py.
456
+
457
+ It can then be loaded and used to encode/decode any text.
458
+
459
+ Encoding has four phases:
460
+
461
+ 1. Tokenize into a list of tokens. Each token is a unicode string of either
462
+ all alphanumeric characters or all non-alphanumeric characters. We drop
463
+ tokens consisting of a single space that are between two alphanumeric
464
+ tokens.
465
+
466
+ 2. Escape each token. This escapes away special and out-of-vocabulary
467
+ characters, and makes sure that each token ends with an underscore, and
468
+ has no other underscores.
469
+
470
+ 3. Represent each escaped token as a the concatenation of a list of subtokens
471
+ from the limited vocabulary. Subtoken selection is done greedily from
472
+ beginning to end. That is, we construct the list in order, always picking
473
+ the longest subtoken in our vocabulary that matches a prefix of the
474
+ remaining portion of the encoded token.
475
+
476
+ 4. Concatenate these lists. This concatenation is invertible due to the
477
+ fact that the trailing underscores indicate when one list is finished.
478
+
479
+ """
480
+
481
+ def __init__(self, filename=None):
482
+ """Initialize and read from a file, if provided.
483
+
484
+ Args:
485
+ filename: filename from which to read vocab. If None, do not load a
486
+ vocab
487
+ """
488
+ self._alphabet = set()
489
+ self.filename = filename
490
+ if filename is not None:
491
+ self._load_from_file(filename)
492
+ super(SubwordTextEncoder, self).__init__()
493
+
494
+ def encode(self, s):
495
+ """Converts a native string to a list of subtoken ids.
496
+
497
+ Args:
498
+ s: a native string.
499
+ Returns:
500
+ a list of integers in the range [0, vocab_size)
501
+ """
502
+ return self._tokens_to_subtoken_ids(
503
+ tokenizer.encode(native_to_unicode(s)))
504
+
505
+ def encode_without_tokenizing(self, token_text):
506
+ """Converts string to list of subtoken ids without calling tokenizer.
507
+
508
+ This treats `token_text` as a single token and directly converts it
509
+ to subtoken ids. This may be useful when the default tokenizer doesn't
510
+ do what we want (e.g., when encoding text with tokens composed of lots of
511
+ nonalphanumeric characters). It is then up to the caller to make sure that
512
+ raw text is consistently converted into tokens. Only use this if you are
513
+ sure that `encode` doesn't suit your needs.
514
+
515
+ Args:
516
+ token_text: A native string representation of a single token.
517
+ Returns:
518
+ A list of subword token ids; i.e., integers in the range [0, vocab_size).
519
+ """
520
+ return self._tokens_to_subtoken_ids([native_to_unicode(token_text)])
521
+
522
+ def decode(self, ids, strip_extraneous=False):
523
+ """Converts a sequence of subtoken ids to a native string.
524
+
525
+ Args:
526
+ ids: a list of integers in the range [0, vocab_size)
527
+ strip_extraneous: bool, whether to strip off extraneous tokens
528
+ (EOS and PAD).
529
+
530
+ Returns:
531
+ a native string
532
+ """
533
+ if strip_extraneous:
534
+ ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
535
+ return unicode_to_native(
536
+ tokenizer.decode(self._subtoken_ids_to_tokens(ids)))
537
+
538
+ def decode_list(self, ids):
539
+ return [self._subtoken_id_to_subtoken_string(s) for s in ids]
540
+
541
+ @property
542
+ def vocab_size(self):
543
+ """The subtoken vocabulary size."""
544
+ return len(self._all_subtoken_strings)
545
+
546
+ def _tokens_to_subtoken_ids(self, tokens):
547
+ """Converts a list of tokens to a list of subtoken ids.
548
+
549
+ Args:
550
+ tokens: a list of strings.
551
+ Returns:
552
+ a list of integers in the range [0, vocab_size)
553
+ """
554
+ ret = []
555
+ for token in tokens:
556
+ ret.extend(self._token_to_subtoken_ids(token))
557
+ return ret
558
+
559
+ def _token_to_subtoken_ids(self, token):
560
+ """Converts token to a list of subtoken ids.
561
+
562
+ Args:
563
+ token: a string.
564
+ Returns:
565
+ a list of integers in the range [0, vocab_size)
566
+ """
567
+ cache_location = hash(token) % self._cache_size
568
+ cache_key, cache_value = self._cache[cache_location]
569
+ if cache_key == token:
570
+ return cache_value
571
+ ret = self._escaped_token_to_subtoken_ids(
572
+ _escape_token(token, self._alphabet))
573
+ self._cache[cache_location] = (token, ret)
574
+ return ret
575
+
576
+ def _subtoken_ids_to_tokens(self, subtokens):
577
+ """Converts a list of subtoken ids to a list of tokens.
578
+
579
+ Args:
580
+ subtokens: a list of integers in the range [0, vocab_size)
581
+ Returns:
582
+ a list of strings.
583
+ """
584
+ concatenated = "".join(
585
+ [self._subtoken_id_to_subtoken_string(s) for s in subtokens])
586
+ split = concatenated.split("_")
587
+ ret = []
588
+ for t in split:
589
+ if t:
590
+ unescaped = _unescape_token(t + "_")
591
+ if unescaped:
592
+ ret.append(unescaped)
593
+ return ret
594
+
595
+ def _subtoken_id_to_subtoken_string(self, subtoken):
596
+ """Converts a subtoken integer ID to a subtoken string."""
597
+ if 0 <= subtoken < self.vocab_size:
598
+ return self._all_subtoken_strings[subtoken]
599
+ return u""
600
+
601
+ def _escaped_token_to_subtoken_strings(self, escaped_token):
602
+ """Converts an escaped token string to a list of subtoken strings.
603
+
604
+ Args:
605
+ escaped_token: An escaped token as a unicode string.
606
+ Returns:
607
+ A list of subtokens as unicode strings.
608
+ """
609
+ # NOTE: This algorithm is greedy; it won't necessarily produce the "best"
610
+ # list of subtokens.
611
+ ret = []
612
+ start = 0
613
+ token_len = len(escaped_token)
614
+ while start < token_len:
615
+ for end in range(
616
+ min(token_len, start + self._max_subtoken_len), start, -1):
617
+ subtoken = escaped_token[start:end]
618
+ if subtoken in self._subtoken_string_to_id:
619
+ ret.append(subtoken)
620
+ start = end
621
+ break
622
+
623
+ else: # Did not break
624
+ # If there is no possible encoding of the escaped token then one of the
625
+ # characters in the token is not in the alphabet. This should be
626
+ # impossible and would be indicative of a bug.
627
+ assert False, "Token substring not found in subtoken vocabulary."
628
+
629
+ return ret
630
+
631
+ def _escaped_token_to_subtoken_ids(self, escaped_token):
632
+ """Converts an escaped token string to a list of subtoken IDs.
633
+
634
+ Args:
635
+ escaped_token: An escaped token as a unicode string.
636
+ Returns:
637
+ A list of subtoken IDs as integers.
638
+ """
639
+ return [
640
+ self._subtoken_string_to_id[subtoken]
641
+ for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)
642
+ ]
643
+
644
+ @classmethod
645
+ def build_from_generator(cls,
646
+ generator,
647
+ target_size,
648
+ max_subtoken_length=None,
649
+ reserved_tokens=None):
650
+ """Builds a SubwordTextEncoder from the generated text.
651
+
652
+ Args:
653
+ generator: yields text.
654
+ target_size: int, approximate vocabulary size to create.
655
+ max_subtoken_length: Maximum length of a subtoken. If this is not set,
656
+ then the runtime and memory use of creating the vocab is quadratic in
657
+ the length of the longest token. If this is set, then it is instead
658
+ O(max_subtoken_length * length of longest token).
659
+ reserved_tokens: List of reserved tokens. The global variable
660
+ `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
661
+ argument is `None`, it will use `RESERVED_TOKENS`.
662
+
663
+ Returns:
664
+ SubwordTextEncoder with `vocab_size` approximately `target_size`.
665
+ """
666
+ token_counts = collections.defaultdict(int)
667
+ for item in generator:
668
+ for tok in tokenizer.encode(native_to_unicode(item)):
669
+ token_counts[tok] += 1
670
+ encoder = cls.build_to_target_size(
671
+ target_size, token_counts, 1, 1e3,
672
+ max_subtoken_length=max_subtoken_length,
673
+ reserved_tokens=reserved_tokens)
674
+ return encoder
675
+
676
+ @classmethod
677
+ def build_to_target_size(cls,
678
+ target_size,
679
+ token_counts,
680
+ min_val,
681
+ max_val,
682
+ max_subtoken_length=None,
683
+ reserved_tokens=None,
684
+ num_iterations=4):
685
+ """Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
686
+
687
+ Uses simple recursive binary search to find a minimum token count that most
688
+ closely matches the `target_size`.
689
+
690
+ Args:
691
+ target_size: Desired vocab_size to approximate.
692
+ token_counts: A dictionary of token counts, mapping string to int.
693
+ min_val: An integer; lower bound for the minimum token count.
694
+ max_val: An integer; upper bound for the minimum token count.
695
+ max_subtoken_length: Maximum length of a subtoken. If this is not set,
696
+ then the runtime and memory use of creating the vocab is quadratic in
697
+ the length of the longest token. If this is set, then it is instead
698
+ O(max_subtoken_length * length of longest token).
699
+ reserved_tokens: List of reserved tokens. The global variable
700
+ `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
701
+ argument is `None`, it will use `RESERVED_TOKENS`.
702
+ num_iterations: An integer; how many iterations of refinement.
703
+
704
+ Returns:
705
+ A SubwordTextEncoder instance.
706
+
707
+ Raises:
708
+ ValueError: If `min_val` is greater than `max_val`.
709
+ """
710
+ if min_val > max_val:
711
+ raise ValueError("Lower bound for the minimum token count "
712
+ "is greater than the upper bound.")
713
+ if target_size < 1:
714
+ raise ValueError("Target size must be positive.")
715
+
716
+ if reserved_tokens is None:
717
+ reserved_tokens = RESERVED_TOKENS
718
+
719
+ def bisect(min_val, max_val):
720
+ """Bisection to find the right size."""
721
+ present_count = (max_val + min_val) // 2
722
+ tf.logging.info("Trying min_count %d" % present_count)
723
+ subtokenizer = cls()
724
+ subtokenizer.build_from_token_counts(
725
+ token_counts, present_count, num_iterations,
726
+ max_subtoken_length=max_subtoken_length,
727
+ reserved_tokens=reserved_tokens)
728
+
729
+ # Being within 1% of the target size is ok.
730
+ is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size
731
+ # If min_val == max_val, we can't do any better than this.
732
+ if is_ok or min_val >= max_val or present_count < 2:
733
+ return subtokenizer
734
+
735
+ if subtokenizer.vocab_size > target_size:
736
+ other_subtokenizer = bisect(present_count + 1, max_val)
737
+ else:
738
+ other_subtokenizer = bisect(min_val, present_count - 1)
739
+
740
+ if other_subtokenizer is None:
741
+ return subtokenizer
742
+
743
+ if (abs(other_subtokenizer.vocab_size - target_size) <
744
+ abs(subtokenizer.vocab_size - target_size)):
745
+ return other_subtokenizer
746
+ return subtokenizer
747
+
748
+ return bisect(min_val, max_val)
749
+
750
+ def build_from_token_counts(self,
751
+ token_counts,
752
+ min_count,
753
+ num_iterations=4,
754
+ reserved_tokens=None,
755
+ max_subtoken_length=None):
756
+ """Train a SubwordTextEncoder based on a dictionary of word counts.
757
+
758
+ Args:
759
+ token_counts: a dictionary of Unicode strings to int.
760
+ min_count: an integer - discard subtokens with lower counts.
761
+ num_iterations: an integer. how many iterations of refinement.
762
+ reserved_tokens: List of reserved tokens. The global variable
763
+ `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
764
+ argument is `None`, it will use `RESERVED_TOKENS`.
765
+ max_subtoken_length: Maximum length of a subtoken. If this is not set,
766
+ then the runtime and memory use of creating the vocab is quadratic in
767
+ the length of the longest token. If this is set, then it is instead
768
+ O(max_subtoken_length * length of longest token).
769
+
770
+ Raises:
771
+ ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
772
+ is not clear what the space is being reserved for, or when it will be
773
+ filled in.
774
+ """
775
+ if reserved_tokens is None:
776
+ reserved_tokens = RESERVED_TOKENS
777
+ else:
778
+ # There is not complete freedom in replacing RESERVED_TOKENS.
779
+ for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
780
+ if default != proposed:
781
+ raise ValueError("RESERVED_TOKENS must be a prefix of "
782
+ "reserved_tokens.")
783
+
784
+ # Initialize the alphabet. Note, this must include reserved tokens or it can
785
+ # result in encoding failures.
786
+ alphabet_tokens = chain(six.iterkeys(token_counts),
787
+ [native_to_unicode(t) for t in reserved_tokens])
788
+
789
+ self._init_alphabet_from_tokens(alphabet_tokens)
790
+
791
+ # Bootstrap the initial list of subtokens with the characters from the
792
+ # alphabet plus the escaping characters.
793
+ self._init_subtokens_from_list(list(self._alphabet),
794
+ reserved_tokens=reserved_tokens)
795
+
796
+ # We build iteratively. On each iteration, we segment all the words,
797
+ # then count the resulting potential subtokens, keeping the ones
798
+ # with high enough counts for our new vocabulary.
799
+ if min_count < 1:
800
+ min_count = 1
801
+ for i in range(num_iterations):
802
+ tf.logging.info("Iteration {0}".format(i))
803
+
804
+ # Collect all substrings of the encoded token that break along current
805
+ # subtoken boundaries.
806
+ subtoken_counts = collections.defaultdict(int)
807
+ for token, count in six.iteritems(token_counts):
808
+ iter_start_time = time.time()
809
+ escaped_token = _escape_token(token, self._alphabet)
810
+ subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
811
+ start = 0
812
+ for subtoken in subtokens:
813
+ last_position = len(escaped_token) + 1
814
+ if max_subtoken_length is not None:
815
+ last_position = min(last_position, start + max_subtoken_length)
816
+
817
+ for end in range(start + 1, last_position):
818
+ new_subtoken = escaped_token[start:end]
819
+ subtoken_counts[new_subtoken] += count
820
+ start += len(subtoken)
821
+ iter_time_secs = time.time() - iter_start_time
822
+ if iter_time_secs > 0.1:
823
+ tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
824
+ "setting Text2TextProblem.max_subtoken_length to a "
825
+ "smaller value.".format(token, iter_time_secs))
826
+
827
+ # Array of sets of candidate subtoken strings, by length.
828
+ len_to_subtoken_strings = []
829
+ for subtoken_string, count in six.iteritems(subtoken_counts):
830
+ lsub = len(subtoken_string)
831
+ if count >= min_count:
832
+ while len(len_to_subtoken_strings) <= lsub:
833
+ len_to_subtoken_strings.append(set())
834
+ len_to_subtoken_strings[lsub].add(subtoken_string)
835
+
836
+ # Consider the candidates longest to shortest, so that if we accept
837
+ # a longer subtoken string, we can decrement the counts of its prefixes.
838
+ new_subtoken_strings = []
839
+ for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
840
+ subtoken_strings = len_to_subtoken_strings[lsub]
841
+ for subtoken_string in subtoken_strings:
842
+ count = subtoken_counts[subtoken_string]
843
+ if count >= min_count:
844
+ # Exclude alphabet tokens here, as they must be included later,
845
+ # explicitly, regardless of count.
846
+ if subtoken_string not in self._alphabet:
847
+ new_subtoken_strings.append((count, subtoken_string))
848
+ for l in range(1, lsub):
849
+ subtoken_counts[subtoken_string[:l]] -= count
850
+
851
+ # Include the alphabet explicitly to guarantee all strings are encodable.
852
+ new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
853
+ for a in self._alphabet)
854
+ new_subtoken_strings.sort(reverse=True)
855
+
856
+ # Reinitialize to the candidate vocabulary.
857
+ new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
858
+ if reserved_tokens:
859
+ escaped_reserved_tokens = [
860
+ _escape_token(native_to_unicode(t), self._alphabet)
861
+ for t in reserved_tokens
862
+ ]
863
+ new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
864
+
865
+ self._init_subtokens_from_list(new_subtoken_strings)
866
+ tf.logging.info("vocab_size = %d" % self.vocab_size)
867
+
868
+ @property
869
+ def all_subtoken_strings(self):
870
+ return tuple(self._all_subtoken_strings)
871
+
872
+ def dump(self):
873
+ """Debugging dump of the current subtoken vocabulary."""
874
+ subtoken_strings = [(i, s)
875
+ for s, i in six.iteritems(self._subtoken_string_to_id)]
876
+ print(u", ".join(u"{0} : '{1}'".format(i, s)
877
+ for i, s in sorted(subtoken_strings)))
878
+
879
+ def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None):
880
+ """Initialize token information from a list of subtoken strings.
881
+
882
+ Args:
883
+ subtoken_strings: a list of subtokens
884
+ reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
885
+ as None or the empty list, or else the global variable `RESERVED_TOKENS`
886
+ must be a prefix of `reserved_tokens`.
887
+
888
+ Raises:
889
+ ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
890
+ is not clear what the space is being reserved for, or when it will be
891
+ filled in.
892
+ """
893
+ if reserved_tokens is None:
894
+ reserved_tokens = []
895
+
896
+ if reserved_tokens:
897
+ self._all_subtoken_strings = reserved_tokens + subtoken_strings
898
+ else:
899
+ self._all_subtoken_strings = subtoken_strings
900
+
901
+ # we remember the maximum length of any subtoken to avoid having to
902
+ # check arbitrarily long strings.
903
+ self._max_subtoken_len = max([len(s) for s in subtoken_strings])
904
+ self._subtoken_string_to_id = {
905
+ s: i + len(reserved_tokens)
906
+ for i, s in enumerate(subtoken_strings) if s
907
+ }
908
+ # Initialize the cache to empty.
909
+ self._cache_size = 2 ** 20
910
+ self._cache = [(None, None)] * self._cache_size
911
+
912
+ def _init_alphabet_from_tokens(self, tokens):
913
+ """Initialize alphabet from an iterable of token or subtoken strings."""
914
+ # Include all characters from all tokens in the alphabet to guarantee that
915
+ # any token can be encoded. Additionally, include all escaping characters.
916
+ self._alphabet = {c for token in tokens for c in token}
917
+ self._alphabet |= _ESCAPE_CHARS
918
+
919
+ def _load_from_file_object(self, f):
920
+ """Load from a file object.
921
+
922
+ Args:
923
+ f: File object to load vocabulary from
924
+ """
925
+ subtoken_strings = []
926
+ for line in f:
927
+ s = line.rstrip()
928
+ # Some vocab files wrap words in single quotes, but others don't
929
+ if ((s.startswith("'") and s.endswith("'")) or
930
+ (s.startswith("\"") and s.endswith("\""))):
931
+ s = s[1:-1]
932
+ subtoken_strings.append(native_to_unicode(s))
933
+ self._init_subtokens_from_list(subtoken_strings)
934
+ self._init_alphabet_from_tokens(subtoken_strings)
935
+
936
+ def _load_from_file(self, filename):
937
+ """Load from a vocab file."""
938
+ if not tf.gfile.Exists(filename):
939
+ raise ValueError("File %s not found" % filename)
940
+ with tf.gfile.Open(filename) as f:
941
+ self._load_from_file_object(f)
942
+
943
+ def store_to_file(self, filename, add_single_quotes=True):
944
+ with tf.gfile.Open(filename, "w") as f:
945
+ for subtoken_string in self._all_subtoken_strings:
946
+ if add_single_quotes:
947
+ f.write("'" + unicode_to_native(subtoken_string) + "'\n")
948
+ else:
949
+ f.write(unicode_to_native(subtoken_string) + "\n")
950
+
951
+
952
+ class ImageEncoder(object):
953
+ """Encoder class for saving and loading images."""
954
+
955
+ def __init__(self, num_reserved_ids=0, height=None, width=None, channels=3):
956
+ assert num_reserved_ids == 0
957
+ self._height = height
958
+ self._width = width
959
+ self._channels = channels
960
+
961
+ @property
962
+ def num_reserved_ids(self):
963
+ return 0
964
+
965
+ def encode(self, s):
966
+ """Transform a string with a filename into a list of RGB integers.
967
+
968
+ Args:
969
+ s: path to the file with an image.
970
+
971
+ Returns:
972
+ ids: list of integers
973
+ """
974
+ try:
975
+ import matplotlib.image as im # pylint: disable=g-import-not-at-top
976
+ except ImportError as e:
977
+ tf.logging.warning(
978
+ "Reading an image requires matplotlib to be installed: %s", e)
979
+ raise NotImplementedError("Image reading not implemented.")
980
+ return im.imread(s)
981
+
982
+ def decode(self, ids, strip_extraneous=False):
983
+ """Transform a sequence of int ids into an image file.
984
+
985
+ Args:
986
+ ids: list of integers to be converted.
987
+ strip_extraneous: unused
988
+
989
+ Returns:
990
+ Path to the temporary file where the image was saved.
991
+
992
+ Raises:
993
+ ValueError: if the ids are not of the appropriate size.
994
+ """
995
+ del strip_extraneous
996
+ _, tmp_file_path = tempfile.mkstemp("_decode.png")
997
+ if self._height is None or self._width is None:
998
+ size = int(math.sqrt(len(ids) / self._channels))
999
+ length = size * size * self._channels
1000
+ else:
1001
+ size = None
1002
+ length = self._height * self._width * self._channels
1003
+ if len(ids) != length:
1004
+ raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x "
1005
+ "channels (%d); %d != %d.\n Ids: %s"
1006
+ % (len(ids), self._height, self._width, self._channels,
1007
+ len(ids), length, " ".join([str(i) for i in ids])))
1008
+ with tf.Graph().as_default():
1009
+ raw = tf.constant(ids, dtype=tf.uint8)
1010
+ if size is None:
1011
+ img = tf.reshape(raw, [self._height, self._width, self._channels])
1012
+ else:
1013
+ img = tf.reshape(raw, [size, size, self._channels])
1014
+ png = tf.image.encode_png(img)
1015
+ op = tf.write_file(tmp_file_path, png)
1016
+ with tf.Session() as sess:
1017
+ sess.run(op)
1018
+ return tmp_file_path
1019
+
1020
+ def decode_list(self, ids):
1021
+ """Transform a sequence of int ids into an image file.
1022
+
1023
+ Args:
1024
+ ids: list of integers to be converted.
1025
+
1026
+ Returns:
1027
+ Singleton list: path to the temporary file where the image was saved.
1028
+ """
1029
+ return [self.decode(ids)]
1030
+
1031
+ @property
1032
+ def vocab_size(self):
1033
+ return 256
1034
+
1035
+
1036
+ class RealEncoder(object):
1037
+ """Encoder class for saving and loading float values."""
1038
+
1039
+ def encode(self, s):
1040
+ """Transform a string (space separated float values) into a float array.
1041
+
1042
+ Args:
1043
+ s: space separated float values.
1044
+
1045
+ Returns:
1046
+ Array of float values.
1047
+ """
1048
+ return [float(w) for w in s.split()]
1049
+
1050
+ def decode(self, ids, strip_extraneous=False):
1051
+ """Transform sequence of float values into string (float values).
1052
+
1053
+ Args:
1054
+ ids: array of floats to be converted.
1055
+ strip_extraneous: unused
1056
+
1057
+ Returns:
1058
+ String having space separated float values.
1059
+
1060
+ Raises:
1061
+ ValueError: if the ids are not of the appropriate size.
1062
+ """
1063
+ del strip_extraneous
1064
+ return " ".join([str(i) for i in ids])
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/text_encoder_build_subword.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ r"""Program to build a SubwordTextEncoder.
17
+
18
+ The flags --min_count and --corpus_max_lines will affect the size of the
19
+ vocabulary. Try changing these flags until you get a vocabulary
20
+ of the size you want.
21
+
22
+ Example usage:
23
+
24
+ python data_generators/text_encoder_build_subword.py \
25
+ --corpus_filepattern=$DATA_DIR/my_problem-train-* \
26
+ --corpus_max_lines=12345 \
27
+ --output_filename=$DATA_DIR/my_problem.subword_text_encoder \
28
+ --logtostderr
29
+
30
+ """
31
+ from __future__ import absolute_import
32
+ from __future__ import division
33
+ from __future__ import print_function
34
+ from TensorFlow.nlp.transformer.data_generators import text_encoder
35
+ from TensorFlow.nlp.transformer.data_generators import tokenizer
36
+
37
+ import tensorflow.compat.v1 as tf
38
+
39
+ tf.flags.DEFINE_string('output_filename', '/tmp/my.subword_text_encoder',
40
+ 'where to store the SubwordTextEncoder')
41
+ tf.flags.DEFINE_string('corpus_filepattern', '',
42
+ 'Corpus of one or more text files')
43
+ tf.flags.DEFINE_string('vocab_filepattern', '', 'One or more vocabulary files '
44
+ '(one word per line as "word,count")')
45
+ tf.flags.DEFINE_integer('min_count', 5, 'Minimum subtoken count in corpus')
46
+ tf.flags.DEFINE_integer('corpus_max_lines', 10000,
47
+ 'How many lines of corpus to read')
48
+ tf.flags.DEFINE_integer('num_iterations', 4, 'Number of iterations')
49
+ tf.flags.DEFINE_bool('split_on_newlines', True, 'Break corpus into lines.')
50
+ FLAGS = tf.flags.FLAGS
51
+
52
+
53
+ def main(unused_argv):
54
+ if FLAGS.corpus_filepattern and FLAGS.vocab_filepattern:
55
+ raise ValueError(
56
+ 'Must only provide one of --corpus_filepattern or --vocab_filepattern')
57
+
58
+ elif FLAGS.corpus_filepattern:
59
+ token_counts = tokenizer.corpus_token_counts(
60
+ FLAGS.corpus_filepattern,
61
+ FLAGS.corpus_max_lines,
62
+ split_on_newlines=FLAGS.split_on_newlines)
63
+
64
+ elif FLAGS.vocab_filepattern:
65
+ token_counts = tokenizer.vocab_token_counts(FLAGS.vocab_filepattern,
66
+ FLAGS.corpus_max_lines)
67
+
68
+ else:
69
+ raise ValueError(
70
+ 'Must provide one of --corpus_filepattern or --vocab_filepattern')
71
+
72
+ encoder = text_encoder.SubwordTextEncoder()
73
+ encoder.build_from_token_counts(token_counts, FLAGS.min_count,
74
+ FLAGS.num_iterations)
75
+ encoder.store_to_file(FLAGS.output_filename)
76
+
77
+
78
+ if __name__ == '__main__':
79
+ tf.app.run()
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/tokenizer.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """A simple invertible tokenizer.
17
+
18
+ Converts from a unicode string to a list of tokens
19
+ (represented as Unicode strings).
20
+
21
+ This tokenizer has the following desirable properties:
22
+ - It is invertible.
23
+ - Alphanumeric characters are broken away from non-alphanumeric characters.
24
+ - A single space between words does not produce an extra token.
25
+ - The full Unicode punctuation and separator set is recognized.
26
+
27
+ The tokenization algorithm is as follows:
28
+
29
+ 1. Split the text into a list of tokens, splitting at every boundary of an
30
+ alphanumeric character and a non-alphanumeric character. This produces
31
+ a list which alternates between "alphanumeric tokens"
32
+ (strings of alphanumeric characters) and "non-alphanumeric tokens"
33
+ (strings of non-alphanumeric characters).
34
+
35
+ 2. Remove every token consisting of a single space, unless it is
36
+ the very first or very last token in the list. These tokens are now
37
+ implied by the fact that there are two adjacent alphanumeric tokens.
38
+
39
+ e.g. u"Dude - that's so cool."
40
+ -> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."]
41
+ """
42
+
43
+ from __future__ import absolute_import
44
+ from __future__ import division
45
+ from __future__ import print_function
46
+
47
+ import collections
48
+ import sys
49
+ import unicodedata
50
+ import six
51
+ from six.moves import range # pylint: disable=redefined-builtin
52
+ import tensorflow.compat.v1 as tf
53
+
54
+ # Conversion between Unicode and UTF-8, if required (on Python2)
55
+ _native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s)
56
+
57
+
58
+ # This set contains all letter and number characters.
59
+ _ALPHANUMERIC_CHAR_SET = set(
60
+ six.unichr(i) for i in range(sys.maxunicode)
61
+ if (unicodedata.category(six.unichr(i)).startswith("L") or
62
+ unicodedata.category(six.unichr(i)).startswith("N")))
63
+
64
+
65
+ def encode(text):
66
+ """Encode a unicode string as a list of tokens.
67
+
68
+ Args:
69
+ text: a unicode string
70
+ Returns:
71
+ a list of tokens as Unicode strings
72
+ """
73
+ if not text:
74
+ return []
75
+ ret = []
76
+ token_start = 0
77
+ # Classify each character in the input string
78
+ is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
79
+ for pos in range(1, len(text)):
80
+ if is_alnum[pos] != is_alnum[pos - 1]:
81
+ token = text[token_start:pos]
82
+ if token != u" " or token_start == 0:
83
+ ret.append(token)
84
+ token_start = pos
85
+ final_token = text[token_start:]
86
+ ret.append(final_token)
87
+ return ret
88
+
89
+
90
+ def decode(tokens):
91
+ """Decode a list of tokens to a unicode string.
92
+
93
+ Args:
94
+ tokens: a list of Unicode strings
95
+ Returns:
96
+ a unicode string
97
+ """
98
+ token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
99
+ ret = []
100
+ for i, token in enumerate(tokens):
101
+ if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
102
+ ret.append(u" ")
103
+ ret.append(token)
104
+ return "".join(ret)
105
+
106
+
107
+ def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True):
108
+ """Reads files matching a wildcard pattern, yielding the contents.
109
+
110
+ Args:
111
+ filepattern: A wildcard pattern matching one or more files.
112
+ max_lines: If set, stop reading after reading this many lines.
113
+ split_on_newlines: A boolean. If true, then split files by lines and strip
114
+ leading and trailing whitespace from each line. Otherwise, treat each
115
+ file as a single string.
116
+
117
+ Yields:
118
+ The contents of the files as lines, if split_on_newlines is True, or
119
+ the entire contents of each file if False.
120
+ """
121
+ filenames = sorted(tf.gfile.Glob(filepattern))
122
+ lines_read = 0
123
+ for filename in filenames:
124
+ with tf.gfile.Open(filename) as f:
125
+ if split_on_newlines:
126
+ for line in f:
127
+ yield line.strip()
128
+ lines_read += 1
129
+ if max_lines and lines_read >= max_lines:
130
+ return
131
+
132
+ else:
133
+ if max_lines:
134
+ doc = []
135
+ for line in f:
136
+ doc.append(line)
137
+ lines_read += 1
138
+ if max_lines and lines_read >= max_lines:
139
+ yield "".join(doc)
140
+ return
141
+ yield "".join(doc)
142
+
143
+ else:
144
+ yield f.read()
145
+
146
+
147
+ def corpus_token_counts(
148
+ text_filepattern, corpus_max_lines, split_on_newlines=True):
149
+ """Read the corpus and compute a dictionary of token counts.
150
+
151
+ Args:
152
+ text_filepattern: A pattern matching one or more files.
153
+ corpus_max_lines: An integer; maximum total lines to read.
154
+ split_on_newlines: A boolean. If true, then split files by lines and strip
155
+ leading and trailing whitespace from each line. Otherwise, treat each
156
+ file as a single string.
157
+
158
+ Returns:
159
+ a dictionary mapping token to count.
160
+ """
161
+ counts = collections.Counter()
162
+ for doc in _read_filepattern(
163
+ text_filepattern,
164
+ max_lines=corpus_max_lines,
165
+ split_on_newlines=split_on_newlines):
166
+ counts.update(encode(_native_to_unicode(doc)))
167
+
168
+ return counts
169
+
170
+
171
+ def vocab_token_counts(text_filepattern, max_lines):
172
+ """Read a vocab file and return a dictionary of token counts.
173
+
174
+ Reads a two-column CSV file of tokens and their frequency in a dataset. The
175
+ tokens are presumed to be generated by encode() or the equivalent.
176
+
177
+ Args:
178
+ text_filepattern: A pattern matching one or more files.
179
+ max_lines: An integer; maximum total lines to read.
180
+
181
+ Returns:
182
+ a dictionary mapping token to count.
183
+ """
184
+ ret = {}
185
+ for i, line in enumerate(
186
+ _read_filepattern(text_filepattern, max_lines=max_lines)):
187
+ if "," not in line:
188
+ tf.logging.warning("Malformed vocab line #%d '%s'", i, line)
189
+ continue
190
+
191
+ token, count = line.rsplit(",", 1)
192
+ ret[_native_to_unicode(token)] = int(count)
193
+
194
+ return ret
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_encs.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Data generators for translation data-sets."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+ from TensorFlow.nlp.transformer.data_generators import problem
22
+ from TensorFlow.nlp.transformer.data_generators import text_encoder
23
+ from TensorFlow.nlp.transformer.data_generators import text_problems
24
+ from TensorFlow.nlp.transformer.data_generators import translate
25
+ from TensorFlow.nlp.transformer.utils import registry
26
+
27
+
28
+ # End-of-sentence marker.
29
+ EOS = text_encoder.EOS_ID
30
+
31
+ _ENCS_TRAIN_DATASETS = [
32
+ [("https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/"
33
+ "11234/1-1458/data-plaintext-format.tar"),
34
+ ("tsv", 3, 2, "data.plaintext-format/*train.gz")],
35
+ [
36
+ "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz", # pylint: disable=line-too-long
37
+ ("training-parallel-nc-v13/news-commentary-v13.cs-en.en",
38
+ "training-parallel-nc-v13/news-commentary-v13.cs-en.cs")
39
+ ],
40
+ [
41
+ "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
42
+ ("commoncrawl.cs-en.en", "commoncrawl.cs-en.cs")
43
+ ],
44
+ [
45
+ "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
46
+ ("training/europarl-v7.cs-en.en", "training/europarl-v7.cs-en.cs")
47
+ ],
48
+ ]
49
+ _ENCS_TEST_DATASETS = [
50
+ [
51
+ "http://data.statmt.org/wmt17/translation-task/dev.tgz",
52
+ ("dev/newstest2013.en", "dev/newstest2013.cs")
53
+ ],
54
+ ]
55
+
56
+
57
+ @registry.register_problem
58
+ class TranslateEncsWmt32k(translate.TranslateProblem):
59
+ """Problem spec for WMT English-Czech translation."""
60
+
61
+ @property
62
+ def approx_vocab_size(self):
63
+ return 2**15 # 32768
64
+
65
+ def source_data_files(self, dataset_split):
66
+ train = dataset_split == problem.DatasetSplit.TRAIN
67
+ return _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS
68
+
69
+ def vocab_data_files(self):
70
+ datasets = self.source_data_files(problem.DatasetSplit.TRAIN)
71
+ vocab_datasets = []
72
+ if datasets[0][0].endswith("data-plaintext-format.tar"):
73
+ vocab_datasets.append([
74
+ datasets[0][0], [
75
+ "%s-compiled-train.lang1" % self.name,
76
+ "%s-compiled-train.lang2" % self.name
77
+ ]
78
+ ])
79
+ datasets = datasets[1:]
80
+ vocab_datasets += [[item[0], [item[1][0], item[1][1]]] for item in datasets]
81
+ return vocab_datasets
82
+
83
+
84
+ @registry.register_problem
85
+ class TranslateEncsWmtCharacters(translate.TranslateProblem):
86
+ """Problem spec for WMT En-Cs character-based translation."""
87
+
88
+ @property
89
+ def vocab_type(self):
90
+ return text_problems.VocabType.CHARACTER
91
+
92
+ def generate_samples(self, data_dir, tmp_dir, dataset_split):
93
+ train = dataset_split == problem.DatasetSplit.TRAIN
94
+ datasets = _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS
95
+ tag = "train" if train else "dev"
96
+ data_path = translate.compile_data(tmp_dir, datasets,
97
+ "wmt_encs_chr_%s" % tag)
98
+ return text_problems.text2text_txt_iterator(data_path + ".lang1",
99
+ data_path + ".lang2")
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_ende.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Data generators for translation data-sets."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ from TensorFlow.nlp.transformer.data_generators import problem
23
+ from TensorFlow.nlp.transformer.data_generators import text_problems
24
+ from TensorFlow.nlp.transformer.data_generators import translate
25
+ from TensorFlow.nlp.transformer.utils import registry
26
+
27
+
28
+ _ENDE_TRAIN_DATASETS = [
29
+ [
30
+ "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz", # pylint: disable=line-too-long
31
+ ("training-parallel-nc-v13/news-commentary-v13.de-en.en",
32
+ "training-parallel-nc-v13/news-commentary-v13.de-en.de")
33
+ ],
34
+ [
35
+ "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
36
+ ("commoncrawl.de-en.en", "commoncrawl.de-en.de")
37
+ ],
38
+ [
39
+ "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
40
+ ("training/europarl-v7.de-en.en", "training/europarl-v7.de-en.de")
41
+ ],
42
+ ]
43
+
44
+ _ENDE_EVAL_DATASETS = [
45
+ [
46
+ "http://data.statmt.org/wmt17/translation-task/dev.tgz",
47
+ ("dev/newstest2013.en", "dev/newstest2013.de")
48
+ ],
49
+ ]
50
+
51
+ _ENDE_RAPID_TRAIN_DATASET = [
52
+ # additional training data available for WMT 18 news task training data
53
+ # as defined by http://www.statmt.org/wmt18/translation-task.html
54
+ [
55
+ "http://data.statmt.org/wmt18/translation-task/rapid2016.tgz",
56
+ ("rapid2016.de-en.en", "rapid2016.de-en.de"),
57
+ ],
58
+ ]
59
+
60
+ _ENDE_PARACRAWL_DATASETS = [
61
+ [
62
+ "https://s3.amazonaws.com/web-language-models/paracrawl/release4/en-de.bicleaner07.tmx.gz", # pylint: disable=line-too-long
63
+ ("tmx", "en-de.bicleaner07.tmx.gz")
64
+ ]
65
+ ]
66
+
67
+
68
+ @registry.register_problem
69
+ class TranslateEndeWmt32k(translate.TranslateProblem):
70
+ """En-de translation trained on WMT corpus."""
71
+
72
+ @property
73
+ def additional_training_datasets(self):
74
+ """Allow subclasses to add training datasets."""
75
+ return []
76
+
77
+ def source_data_files(self, dataset_split):
78
+ train = dataset_split == problem.DatasetSplit.TRAIN
79
+ train_datasets = _ENDE_TRAIN_DATASETS + self.additional_training_datasets
80
+ return train_datasets if train else _ENDE_EVAL_DATASETS
81
+
82
+
83
+ @registry.register_problem
84
+ class TranslateEnde2018Wmt32k(translate.TranslateProblem):
85
+ """En-de translation trained on WMT18 corpus."""
86
+
87
+ @property
88
+ def use_vocab_from_other_problem(self):
89
+ return TranslateEndeWmt32k()
90
+
91
+ @property
92
+ def additional_training_datasets(self):
93
+ """WMT18 adds rapid data."""
94
+ return _ENDE_RAPID_TRAIN_DATASET
95
+
96
+
97
+ @registry.register_problem
98
+ class TranslateEndeWmtClean32k(TranslateEndeWmt32k):
99
+ """En-de translation trained on WMT with further cleaning."""
100
+
101
+ @property
102
+ def use_vocab_from_other_problem(self):
103
+ return TranslateEndeWmt32k()
104
+
105
+ @property
106
+ def datatypes_to_clean(self):
107
+ return ["txt"]
108
+
109
+
110
+ @registry.register_problem
111
+ class TranslateEndePc32k(translate.TranslateProblem):
112
+ """En-de translation trained on Paracrawl (bicleaner corpus)."""
113
+
114
+ @property
115
+ def use_vocab_from_other_problem(self):
116
+ return TranslateEndeWmt32k()
117
+
118
+ @property
119
+ def additional_training_datasets(self):
120
+ """Allow subclasses to add training datasets."""
121
+ return []
122
+
123
+ def source_data_files(self, dataset_split):
124
+ train = dataset_split == problem.DatasetSplit.TRAIN
125
+ train_datasets = (
126
+ _ENDE_PARACRAWL_DATASETS + self.additional_training_datasets)
127
+ return train_datasets if train else _ENDE_EVAL_DATASETS
128
+
129
+
130
+ @registry.register_problem
131
+ class TranslateEndePcClean32k(TranslateEndePc32k):
132
+ """En-de translation trained on Paracrawl with further cleaning."""
133
+
134
+ @property
135
+ def datatypes_to_clean(self):
136
+ return ["tmx"]
137
+
138
+
139
+ @registry.register_problem
140
+ class TranslateEndeWmtPc32k(TranslateEndeWmt32k):
141
+ """En-de translation trained on WMT plus Paracrawl."""
142
+
143
+ @property
144
+ def use_vocab_from_other_problem(self):
145
+ return TranslateEndeWmt32k()
146
+
147
+ @property
148
+ def additional_training_datasets(self):
149
+ return _ENDE_PARACRAWL_DATASETS
150
+
151
+
152
+ @registry.register_problem
153
+ class TranslateEndeWmtCleanPc32k(TranslateEndeWmtPc32k):
154
+ """En-de translation trained on cleaned WMT plus Paracrawl."""
155
+
156
+ @property
157
+ def datatypes_to_clean(self):
158
+ return ["txt"]
159
+
160
+
161
+ @registry.register_problem
162
+ class TranslateEndeWmtPcClean32k(TranslateEndeWmtPc32k):
163
+ """En-de translation trained on WMT plus cleaned Paracrawl."""
164
+
165
+ @property
166
+ def datatypes_to_clean(self):
167
+ return ["tmx"]
168
+
169
+
170
+ @registry.register_problem
171
+ class TranslateEndeWmtCleanPcClean32k(TranslateEndeWmtPcClean32k):
172
+ """En-de translation trained on cleaned WMT plus cleaned Paracrawl."""
173
+
174
+ @property
175
+ def datatypes_to_clean(self):
176
+ return ["txt", "tmx"]
177
+
178
+
179
+ @registry.register_problem
180
+ class TranslateEndeWmt32kPacked(TranslateEndeWmt32k):
181
+
182
+ @property
183
+ def packed_length(self):
184
+ return 256
185
+
186
+ @property
187
+ def use_vocab_from_other_problem(self):
188
+ return TranslateEndeWmt32k()
189
+
190
+
191
+ @registry.register_problem
192
+ class TranslateEndeWmt8k(TranslateEndeWmt32k):
193
+ """Problem spec for WMT En-De translation."""
194
+
195
+ @property
196
+ def approx_vocab_size(self):
197
+ return 2**13 # 8192
198
+
199
+
200
+ @registry.register_problem
201
+ class TranslateEndeWmt8kPacked(TranslateEndeWmt8k):
202
+
203
+ @property
204
+ def packed_length(self):
205
+ return 256
206
+
207
+ @property
208
+ def use_vocab_from_other_problem(self):
209
+ return TranslateEndeWmt8k()
210
+
211
+
212
+ @registry.register_problem
213
+ class TranslateEndeWmtCharacters(TranslateEndeWmt8k):
214
+ """Problem spec for WMT En-De translation."""
215
+
216
+ @property
217
+ def vocab_type(self):
218
+ return text_problems.VocabType.CHARACTER
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enfr.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Data generators for translation data-sets."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ import os
23
+
24
+ from TensorFlow.nlp.transformer.data_generators import problem
25
+ from TensorFlow.nlp.transformer.data_generators import text_encoder
26
+ from TensorFlow.nlp.transformer.data_generators import text_problems
27
+ from TensorFlow.nlp.transformer.data_generators import translate
28
+ from TensorFlow.nlp.transformer.utils import registry
29
+
30
+
31
+ # End-of-sentence marker.
32
+ EOS = text_encoder.EOS_ID
33
+
34
+ _ENFR_TRAIN_SMALL_DATA = [
35
+ [
36
+ "https://s3.amazonaws.com/opennmt-trainingdata/baseline-1M-enfr.tgz",
37
+ ("baseline-1M-enfr/baseline-1M_train.en",
38
+ "baseline-1M-enfr/baseline-1M_train.fr")
39
+ ],
40
+ ]
41
+ _ENFR_TEST_SMALL_DATA = [
42
+ [
43
+ "https://s3.amazonaws.com/opennmt-trainingdata/baseline-1M-enfr.tgz",
44
+ ("baseline-1M-enfr/baseline-1M_valid.en",
45
+ "baseline-1M-enfr/baseline-1M_valid.fr")
46
+ ],
47
+ ]
48
+ _ENFR_TRAIN_LARGE_DATA = [
49
+ [
50
+ "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
51
+ ("commoncrawl.fr-en.en", "commoncrawl.fr-en.fr")
52
+ ],
53
+ [
54
+ "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
55
+ ("training/europarl-v7.fr-en.en", "training/europarl-v7.fr-en.fr")
56
+ ],
57
+ [
58
+ "http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz",
59
+ ("training/news-commentary-v9.fr-en.en",
60
+ "training/news-commentary-v9.fr-en.fr")
61
+ ],
62
+ [
63
+ "http://www.statmt.org/wmt10/training-giga-fren.tar",
64
+ ("giga-fren.release2.fixed.en.gz",
65
+ "giga-fren.release2.fixed.fr.gz")
66
+ ],
67
+ [
68
+ "http://www.statmt.org/wmt13/training-parallel-un.tgz",
69
+ ("un/undoc.2000.fr-en.en", "un/undoc.2000.fr-en.fr")
70
+ ],
71
+ ]
72
+ _ENFR_TEST_LARGE_DATA = [
73
+ [
74
+ "http://data.statmt.org/wmt17/translation-task/dev.tgz",
75
+ ("dev/newstest2013.en", "dev/newstest2013.fr")
76
+ ],
77
+ ]
78
+
79
+
80
+ @registry.register_problem
81
+ class TranslateEnfrWmtSmall8k(translate.TranslateProblem):
82
+ """Problem spec for WMT En-Fr translation."""
83
+
84
+ @property
85
+ def approx_vocab_size(self):
86
+ return 2**13 # 8192
87
+
88
+ @property
89
+ def use_small_dataset(self):
90
+ return True
91
+
92
+ def source_data_files(self, dataset_split):
93
+ train = dataset_split == problem.DatasetSplit.TRAIN
94
+ if self.use_small_dataset:
95
+ datasets = _ENFR_TRAIN_SMALL_DATA if train else _ENFR_TEST_SMALL_DATA
96
+ else:
97
+ datasets = _ENFR_TRAIN_LARGE_DATA if train else _ENFR_TEST_LARGE_DATA
98
+ return datasets
99
+
100
+ def vocab_data_files(self):
101
+ return (_ENFR_TRAIN_SMALL_DATA if self.use_small_dataset
102
+ else _ENFR_TRAIN_LARGE_DATA)
103
+
104
+
105
+ @registry.register_problem
106
+ class TranslateEnfrWmtSmall32k(TranslateEnfrWmtSmall8k):
107
+
108
+ @property
109
+ def approx_vocab_size(self):
110
+ return 2**15 # 32768
111
+
112
+
113
+ @registry.register_problem
114
+ class TranslateEnfrWmt8k(TranslateEnfrWmtSmall8k):
115
+
116
+ @property
117
+ def use_small_dataset(self):
118
+ return False
119
+
120
+
121
+ @registry.register_problem
122
+ class TranslateEnfrWmt32k(TranslateEnfrWmtSmall32k):
123
+
124
+ @property
125
+ def use_small_dataset(self):
126
+ return False
127
+
128
+
129
+ @registry.register_problem
130
+ class TranslateEnfrWmt32kPacked(TranslateEnfrWmt32k):
131
+
132
+ @property
133
+ def packed_length(self):
134
+ return 256
135
+
136
+ @property
137
+ def use_vocab_from_other_problem(self):
138
+ return TranslateEnfrWmt32k()
139
+
140
+
141
+ @registry.register_problem
142
+ class TranslateEnfrWmt32kWithBacktranslateFr(TranslateEnfrWmt32k):
143
+ """En-Fr translation with added French data, back-translated."""
144
+
145
+ @property
146
+ def use_vocab_from_other_problem(self):
147
+ return TranslateEnfrWmt32k()
148
+
149
+ @property
150
+ def already_shuffled(self):
151
+ return True
152
+
153
+ @property
154
+ def skip_random_fraction_when_training(self):
155
+ return False
156
+
157
+ @property
158
+ def backtranslate_data_filenames(self):
159
+ """List of pairs of files with matched back-translated data."""
160
+ # Files must be placed in tmp_dir, each similar size to authentic data.
161
+ return [("fr_mono_en.txt", "fr_mono_fr.txt")]
162
+
163
+ @property
164
+ def dataset_splits(self):
165
+ """Splits of data to produce and number of output shards for each."""
166
+ return [{
167
+ "split": problem.DatasetSplit.TRAIN,
168
+ "shards": 1, # Use just 1 shard so as to not mix data.
169
+ }, {
170
+ "split": problem.DatasetSplit.EVAL,
171
+ "shards": 1,
172
+ }]
173
+
174
+ def generate_samples(self, data_dir, tmp_dir, dataset_split):
175
+ datasets = self.source_data_files(dataset_split)
176
+ tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev"
177
+ data_path = translate.compile_data(
178
+ tmp_dir, datasets, "%s-compiled-%s" % (self.name, tag))
179
+ # For eval, use authentic data.
180
+ if dataset_split != problem.DatasetSplit.TRAIN:
181
+ for example in text_problems.text2text_txt_iterator(
182
+ data_path + ".lang1", data_path + ".lang2"):
183
+ yield example
184
+ else: # For training, mix synthetic and authentic data as follows.
185
+ for (file1, file2) in self.backtranslate_data_filenames:
186
+ path1 = os.path.join(tmp_dir, file1)
187
+ path2 = os.path.join(tmp_dir, file2)
188
+ # Synthetic data first.
189
+ for example in text_problems.text2text_txt_iterator(path1, path2):
190
+ yield example
191
+ # Now authentic data.
192
+ for example in text_problems.text2text_txt_iterator(
193
+ data_path + ".lang1", data_path + ".lang2"):
194
+ yield example
195
+
196
+
197
+ @registry.register_problem
198
+ class TranslateEnfrWmt32kWithBacktranslateEn(
199
+ TranslateEnfrWmt32kWithBacktranslateFr):
200
+ """En-Fr translation with added English data, back-translated."""
201
+
202
+ @property
203
+ def backtranslate_data_filenames(self):
204
+ """List of pairs of files with matched back-translated data."""
205
+ # Files must be placed in tmp_dir, each similar size to authentic data.
206
+ return [("en_mono_en.txt%d" % i, "en_mono_fr.txt%d" % i) for i in [0, 1, 2]]
207
+
208
+
209
+ @registry.register_problem
210
+ class TranslateEnfrWmtSmallCharacters(translate.TranslateProblem):
211
+ """Problem spec for WMT En-Fr translation."""
212
+
213
+ @property
214
+ def vocab_type(self):
215
+ return text_problems.VocabType.CHARACTER
216
+
217
+ @property
218
+ def use_small_dataset(self):
219
+ return True
220
+
221
+ def source_data_files(self, dataset_split):
222
+ train = dataset_split == problem.DatasetSplit.TRAIN
223
+ if self.use_small_dataset:
224
+ datasets = _ENFR_TRAIN_SMALL_DATA if train else _ENFR_TEST_SMALL_DATA
225
+ else:
226
+ datasets = _ENFR_TRAIN_LARGE_DATA if train else _ENFR_TEST_LARGE_DATA
227
+ return datasets
228
+
229
+
230
+ @registry.register_problem
231
+ class TranslateEnfrWmtCharacters(TranslateEnfrWmtSmallCharacters):
232
+
233
+ @property
234
+ def use_small_dataset(self):
235
+ return False
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enro.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Data generators for translation data-sets."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ import random
23
+
24
+ from TensorFlow.nlp.transformer.data_generators import problem
25
+ from TensorFlow.nlp.transformer.data_generators import text_problems
26
+ from TensorFlow.nlp.transformer.data_generators import translate
27
+ from TensorFlow.nlp.transformer.utils import registry
28
+
29
+
30
+ _ENRO_TRAIN_DATASETS = [
31
+ [
32
+ "http://www.statmt.org/europarl/v7/ro-en.tgz",
33
+ ("europarl-v7.ro-en.en", "europarl-v7.ro-en.ro")
34
+ ],
35
+ [
36
+ "http://opus.nlpl.eu/download.php?f=SETIMES/v2/moses/en-ro.txt.zip",
37
+ ("SETIMES.en-ro.en", "SETIMES.en-ro.ro")
38
+ ]
39
+ ]
40
+ _ENRO_TEST_DATASETS = [
41
+ [
42
+ ("http://data.statmt.org/wmt16/translation-task/"
43
+ "dev-romanian-updated.tgz"),
44
+ ("dev/newsdev2016-roen-ref.en.sgm", "dev/newsdev2016-roen-src.ro.sgm")
45
+ ],
46
+ ]
47
+
48
+
49
+ @registry.register_problem
50
+ class TranslateEnroWmt8k(translate.TranslateProblem):
51
+ """Problem spec for WMT En-Ro translation."""
52
+
53
+ @property
54
+ def approx_vocab_size(self):
55
+ return 2**13 # 8192
56
+
57
+ def source_data_files(self, dataset_split):
58
+ train = dataset_split == problem.DatasetSplit.TRAIN
59
+ return _ENRO_TRAIN_DATASETS if train else _ENRO_TEST_DATASETS
60
+
61
+
62
+ @registry.register_problem
63
+ class TranslateEnroWmt32k(TranslateEnroWmt8k):
64
+
65
+ @property
66
+ def approx_vocab_size(self):
67
+ return 2**15 # 32768
68
+
69
+
70
+ @registry.register_problem
71
+ class TranslateEnroWmtCharacters(TranslateEnroWmt8k):
72
+ """Problem spec for WMT En-Ro translation."""
73
+
74
+ @property
75
+ def vocab_type(self):
76
+ return text_problems.VocabType.CHARACTER
77
+
78
+
79
+ @registry.register_problem
80
+ class TranslateEnroWmtMulti64k(TranslateEnroWmt8k):
81
+ """Translation with muli-lingual vocabulary."""
82
+
83
+ @property
84
+ def use_vocab_from_other_problem(self):
85
+ return wiki_lm.LanguagemodelDeEnFrRoWiki64k()
86
+
87
+
88
+ @registry.register_problem
89
+ class TranslateEnroWmtMultiSmall64k(TranslateEnroWmt8k):
90
+ """Translation with muli-lingual vocabulary, small (6K) training data."""
91
+
92
+ @property
93
+ def dataset_splits(self):
94
+ """Splits of data to produce and number of output shards for each."""
95
+ return [{
96
+ "split": problem.DatasetSplit.TRAIN,
97
+ "shards": 16, # It's a small dataset, TPUs like at least a few shards.
98
+ }, {
99
+ "split": problem.DatasetSplit.EVAL,
100
+ "shards": 1,
101
+ }]
102
+
103
+ @property
104
+ def use_vocab_from_other_problem(self):
105
+ return wiki_lm.LanguagemodelDeEnFrRoWiki64k()
106
+
107
+ @property
108
+ def how_many_examples_to_sample(self):
109
+ return 6000
110
+
111
+ def generate_samples(self, data_dir, tmp_dir, dataset_split):
112
+ """Generate just the first 6k samples for training."""
113
+ # If not training, do the same as before.
114
+ if dataset_split != problem.DatasetSplit.TRAIN:
115
+ for x in super(TranslateEnroWmtMultiSmall64k, self).generate_samples(
116
+ data_dir, tmp_dir, dataset_split):
117
+ yield x
118
+ raise StopIteration
119
+ # Now we assume we're training.
120
+ counter = 0
121
+ # The size of this data-set in total is around 614K, we want to sample so
122
+ # that in expectation we take the requested number of samples in 1 go.
123
+ sample_prob = self.how_many_examples_to_sample / float(614000)
124
+ # Let's sample.
125
+ for x in super(TranslateEnroWmtMultiSmall64k, self).generate_samples(
126
+ data_dir, tmp_dir, dataset_split):
127
+ if random.random() > sample_prob:
128
+ continue
129
+ counter += 1
130
+ if counter > self.how_many_examples_to_sample:
131
+ raise StopIteration
132
+ yield x
133
+ # We do it again if we don't have enough samples.
134
+ if counter < self.how_many_examples_to_sample:
135
+ for x in super(TranslateEnroWmtMultiSmall64k, self).generate_samples(
136
+ data_dir, tmp_dir, dataset_split):
137
+ if random.random() > sample_prob:
138
+ continue
139
+ counter += 1
140
+ if counter > self.how_many_examples_to_sample:
141
+ raise StopIteration
142
+ yield x
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_envi.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Data generators for En-Vi translation."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+ from TensorFlow.nlp.transformer.data_generators import problem
22
+ from TensorFlow.nlp.transformer.data_generators import text_encoder
23
+ from TensorFlow.nlp.transformer.data_generators import translate
24
+ from TensorFlow.nlp.transformer.utils import registry
25
+
26
+ # End-of-sentence marker.
27
+ EOS = text_encoder.EOS_ID
28
+
29
+ # For English-Vietnamese the IWSLT'15 corpus
30
+ # from https://nlp.stanford.edu/projects/nmt/ is used.
31
+ # The original dataset has 133K parallel sentences.
32
+ _ENVI_TRAIN_DATASETS = [[
33
+ "https://github.com/stefan-it/nmt-en-vi/raw/master/data/train-en-vi.tgz", # pylint: disable=line-too-long
34
+ ("train.en", "train.vi")
35
+ ]]
36
+
37
+ # For development 1,553 parallel sentences are used.
38
+ _ENVI_TEST_DATASETS = [[
39
+ "https://github.com/stefan-it/nmt-en-vi/raw/master/data/dev-2012-en-vi.tgz", # pylint: disable=line-too-long
40
+ ("tst2012.en", "tst2012.vi")
41
+ ]]
42
+
43
+
44
+ # See this PR on github for some results with Transformer on this Problem.
45
+ # https://github.com/tensorflow/tensor2tensor/pull/611
46
+
47
+
48
+ @registry.register_problem
49
+ class TranslateEnviIwslt32k(translate.TranslateProblem):
50
+ """Problem spec for IWSLT'15 En-Vi translation."""
51
+
52
+ @property
53
+ def approx_vocab_size(self):
54
+ return 2**15 # 32768
55
+
56
+ def source_data_files(self, dataset_split):
57
+ train = dataset_split == problem.DatasetSplit.TRAIN
58
+ return _ENVI_TRAIN_DATASETS if train else _ENVI_TEST_DATASETS
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/translate_enzh.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Data generators for translation data-sets."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ import os
23
+ from TensorFlow.nlp.transformer.data_generators import generator_utils
24
+ from TensorFlow.nlp.transformer.data_generators import problem
25
+ from TensorFlow.nlp.transformer.data_generators import text_encoder
26
+ from TensorFlow.nlp.transformer.data_generators import text_problems
27
+ from TensorFlow.nlp.transformer.data_generators import translate
28
+ from TensorFlow.nlp.transformer.utils import registry
29
+
30
+ import tensorflow.compat.v1 as tf
31
+
32
+
33
+ # End-of-sentence marker.
34
+ EOS = text_encoder.EOS_ID
35
+
36
+ # This is far from being the real WMT18 task - only toyset here
37
+ # you need to register to get UN data and CWT data. Also, by convention,
38
+ # this is EN to ZH - use translate_enzh_wmt8k_rev for ZH to EN task
39
+ #
40
+ # News Commentary, around 252k lines
41
+ # This dataset is only a small fraction of full WMT18 task
42
+ _STAT_MT_URL = "http://data.statmt.org/wmt18/translation-task/"
43
+ _NC_TRAIN_DATASETS = [[
44
+ _STAT_MT_URL + "training-parallel-nc-v13.tgz", [
45
+ "training-parallel-nc-v13/news-commentary-v13.zh-en.en",
46
+ "training-parallel-nc-v13/news-commentary-v13.zh-en.zh"
47
+ ]
48
+ ]]
49
+
50
+ # Test set from News Commentary. 2000 lines
51
+ _NC_TEST_DATASETS = [[
52
+ _STAT_MT_URL + "dev.tgz",
53
+ ("dev/newsdev2017-enzh-src.en.sgm", "dev/newsdev2017-enzh-ref.zh.sgm")
54
+ ]]
55
+
56
+ # UN parallel corpus. 15,886,041 lines
57
+ # Visit source website to download manually:
58
+ # https://conferences.unite.un.org/UNCorpus
59
+ #
60
+ # NOTE: You need to register to download dataset from official source
61
+ # place into tmp directory e.g. /tmp/t2t_datagen/dataset.tgz
62
+ _UN_TRAIN_DATASETS = [[
63
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/UNv1.0.en-zh.tar"
64
+ ".gz", ["en-zh/UNv1.0.en-zh.en", "en-zh/UNv1.0.en-zh.zh"]
65
+ ]]
66
+
67
+ # CWMT corpus
68
+ # Visit source website to download manually:
69
+ # http://nlp.nju.edu.cn/cwmt-wmt/
70
+ #
71
+ # casia2015: 1,050,000 lines
72
+ # casict2015: 2,036,833 lines
73
+ # datum2015: 1,000,003 lines
74
+ # datum2017: 1,999,968 lines
75
+ # NEU2017: 2,000,000 lines
76
+ #
77
+ # NOTE: You need to register to download dataset from official source
78
+ # place into tmp directory e.g. /tmp/t2t_datagen/dataset.tgz
79
+
80
+ _CWMT_TRAIN_DATASETS = [[
81
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
82
+ ["cwmt/casia2015/casia2015_en.txt", "cwmt/casia2015/casia2015_ch.txt"]
83
+ ], [
84
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
85
+ ["cwmt/casict2015/casict2015_en.txt", "cwmt/casict2015/casict2015_ch.txt"]
86
+ ], [
87
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
88
+ ["cwmt/neu2017/NEU_en.txt", "cwmt/neu2017/NEU_cn.txt"]
89
+ ], [
90
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
91
+ ["cwmt/datum2015/datum_en.txt", "cwmt/datum2015/datum_ch.txt"]
92
+ ], [
93
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
94
+ ["cwmt/datum2017/Book1_en.txt", "cwmt/datum2017/Book1_cn.txt"]
95
+ ], [
96
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
97
+ ["cwmt/datum2017/Book2_en.txt", "cwmt/datum2017/Book2_cn.txt"]
98
+ ], [
99
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
100
+ ["cwmt/datum2017/Book3_en.txt", "cwmt/datum2017/Book3_cn.txt"]
101
+ ], [
102
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
103
+ ["cwmt/datum2017/Book4_en.txt", "cwmt/datum2017/Book4_cn.txt"]
104
+ ], [
105
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
106
+ ["cwmt/datum2017/Book5_en.txt", "cwmt/datum2017/Book5_cn.txt"]
107
+ ], [
108
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
109
+ ["cwmt/datum2017/Book6_en.txt", "cwmt/datum2017/Book6_cn.txt"]
110
+ ], [
111
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
112
+ ["cwmt/datum2017/Book7_en.txt", "cwmt/datum2017/Book7_cn.txt"]
113
+ ], [
114
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
115
+ ["cwmt/datum2017/Book8_en.txt", "cwmt/datum2017/Book8_cn.txt"]
116
+ ], [
117
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
118
+ ["cwmt/datum2017/Book9_en.txt", "cwmt/datum2017/Book9_cn.txt"]
119
+ ], [
120
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
121
+ ["cwmt/datum2017/Book10_en.txt", "cwmt/datum2017/Book10_cn.txt"]
122
+ ], [
123
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
124
+ ["cwmt/datum2017/Book11_en.txt", "cwmt/datum2017/Book11_cn.txt"]
125
+ ], [
126
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
127
+ ["cwmt/datum2017/Book12_en.txt", "cwmt/datum2017/Book12_cn.txt"]
128
+ ], [
129
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
130
+ ["cwmt/datum2017/Book13_en.txt", "cwmt/datum2017/Book13_cn.txt"]
131
+ ], [
132
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
133
+ ["cwmt/datum2017/Book14_en.txt", "cwmt/datum2017/Book14_cn.txt"]
134
+ ], [
135
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
136
+ ["cwmt/datum2017/Book15_en.txt", "cwmt/datum2017/Book15_cn.txt"]
137
+ ], [
138
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
139
+ ["cwmt/datum2017/Book16_en.txt", "cwmt/datum2017/Book16_cn.txt"]
140
+ ], [
141
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
142
+ ["cwmt/datum2017/Book17_en.txt", "cwmt/datum2017/Book17_cn.txt"]
143
+ ], [
144
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
145
+ ["cwmt/datum2017/Book18_en.txt", "cwmt/datum2017/Book18_cn.txt"]
146
+ ], [
147
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
148
+ ["cwmt/datum2017/Book19_en.txt", "cwmt/datum2017/Book19_cn.txt"]
149
+ ], [
150
+ "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz",
151
+ ["cwmt/datum2017/Book20_en.txt", "cwmt/datum2017/Book20_cn.txt"]
152
+ ]]
153
+
154
+
155
+ def get_filename(dataset):
156
+ return dataset[0][0].split("/")[-1]
157
+
158
+
159
+ @registry.register_problem
160
+ class TranslateEnzhWmt32k(translate.TranslateProblem):
161
+ """Problem spec for WMT En-Zh translation.
162
+
163
+ Attempts to use full training dataset, which needs website
164
+ registration and downloaded manually from official sources:
165
+
166
+ CWMT:
167
+ - http://nlp.nju.edu.cn/cwmt-wmt/
168
+ - Website contains instructions for FTP server access.
169
+ - You'll need to download CASIA, CASICT, DATUM2015, DATUM2017,
170
+ NEU datasets
171
+
172
+ UN Parallel Corpus:
173
+ - https://conferences.unite.un.org/UNCorpus
174
+ - You'll need to register your to download the dataset.
175
+
176
+ NOTE: place into tmp directory e.g. /tmp/t2t_datagen/dataset.tgz
177
+ """
178
+
179
+ @property
180
+ def approx_vocab_size(self):
181
+ return 2**15 # 32k
182
+
183
+ @property
184
+ def source_vocab_name(self):
185
+ return "%s.en" % self.vocab_filename
186
+
187
+ @property
188
+ def target_vocab_name(self):
189
+ return "%s.zh" % self.vocab_filename
190
+
191
+ def get_training_dataset(self, tmp_dir):
192
+ """UN Parallel Corpus and CWMT Corpus need to be downloaded manually.
193
+
194
+ Append to training dataset if available
195
+
196
+ Args:
197
+ tmp_dir: path to temporary dir with the data in it.
198
+
199
+ Returns:
200
+ paths
201
+ """
202
+ full_dataset = _NC_TRAIN_DATASETS
203
+ for dataset in [_CWMT_TRAIN_DATASETS, _UN_TRAIN_DATASETS]:
204
+ filename = get_filename(dataset)
205
+ tmp_filepath = os.path.join(tmp_dir, filename)
206
+ if tf.gfile.Exists(tmp_filepath):
207
+ full_dataset += dataset
208
+ else:
209
+ tf.logging.info("[TranslateEzhWmt] dataset incomplete, you need to "
210
+ "manually download %s" % filename)
211
+ return full_dataset
212
+
213
+ def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
214
+ train = dataset_split == problem.DatasetSplit.TRAIN
215
+ train_dataset = self.get_training_dataset(tmp_dir)
216
+ datasets = train_dataset if train else _NC_TEST_DATASETS
217
+ source_datasets = [[item[0], [item[1][0]]] for item in train_dataset]
218
+ target_datasets = [[item[0], [item[1][1]]] for item in train_dataset]
219
+ source_vocab = generator_utils.get_or_generate_vocab(
220
+ data_dir,
221
+ tmp_dir,
222
+ self.source_vocab_name,
223
+ self.approx_vocab_size,
224
+ source_datasets,
225
+ file_byte_budget=1e8,
226
+ max_subtoken_length=self.max_subtoken_length)
227
+ target_vocab = generator_utils.get_or_generate_vocab(
228
+ data_dir,
229
+ tmp_dir,
230
+ self.target_vocab_name,
231
+ self.approx_vocab_size,
232
+ target_datasets,
233
+ file_byte_budget=1e8,
234
+ max_subtoken_length=self.max_subtoken_length)
235
+ tag = "train" if train else "dev"
236
+ filename_base = "wmt_enzh_%sk_tok_%s" % (self.approx_vocab_size, tag)
237
+ data_path = translate.compile_data(tmp_dir, datasets, filename_base)
238
+ return text_problems.text2text_generate_encoded(
239
+ text_problems.text2text_txt_iterator(data_path + ".lang1",
240
+ data_path + ".lang2"),
241
+ source_vocab, target_vocab)
242
+
243
+ def feature_encoders(self, data_dir):
244
+ source_vocab_filename = os.path.join(data_dir, self.source_vocab_name)
245
+ target_vocab_filename = os.path.join(data_dir, self.target_vocab_name)
246
+ source_token = text_encoder.SubwordTextEncoder(source_vocab_filename)
247
+ target_token = text_encoder.SubwordTextEncoder(target_vocab_filename)
248
+ return {
249
+ "inputs": source_token,
250
+ "targets": target_token,
251
+ }
252
+
253
+
254
+ @registry.register_problem
255
+ class TranslateEnzhWmt8k(TranslateEnzhWmt32k):
256
+ """Problem spec for WMT En-Zh translation.
257
+
258
+ This is far from being the real WMT17 task - only toyset here
259
+ """
260
+
261
+ @property
262
+ def approx_vocab_size(self):
263
+ return 2**13 # 8192
264
+
265
+ @property
266
+ def dataset_splits(self):
267
+ return [
268
+ {
269
+ "split": problem.DatasetSplit.TRAIN,
270
+ "shards": 10, # this is a small dataset
271
+ },
272
+ {
273
+ "split": problem.DatasetSplit.EVAL,
274
+ "shards": 1,
275
+ }
276
+ ]
277
+
278
+ def get_training_dataset(self, tmp_dir):
279
+ """Uses only News Commentary Dataset for training."""
280
+ return _NC_TRAIN_DATASETS
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/datagen.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # coding=utf-8
4
+ # Copyright 2021 The Tensor2Tensor Authors.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ ###############################################################################
18
+ # Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
19
+ ###############################################################################
20
+ # Changes:
21
+ # - added shebang
22
+ # - organized imports
23
+ # - removed unsupported problem generators
24
+ # - renamed from t2t_datagen.py to datagen.py
25
+
26
+ """Produces the training and dev data for --problem into --data_dir.
27
+
28
+ Produces sharded and shuffled TFRecord files of tensorflow.Example protocol
29
+ buffers for a variety of registered datasets.
30
+
31
+ All Problems are registered with @registry.register_problem or are in
32
+ _SUPPORTED_PROBLEM_GENERATORS in this file. Each entry maps a string name
33
+ (selectable on the command-line with --problem) to a function that takes 2
34
+ arguments - input_directory and mode (one of "train" or "dev") - and yields for
35
+ each training example a dictionary mapping string feature names to lists of
36
+ {string, int, float}. The generator will be run once for each mode.
37
+ """
38
+ from __future__ import absolute_import
39
+ from __future__ import division
40
+ from __future__ import print_function
41
+
42
+ import multiprocessing
43
+ import os
44
+ import random
45
+ import tempfile
46
+
47
+ import numpy as np
48
+
49
+ from TensorFlow.nlp.transformer.utils import problems as problems_lib # pylint: disable=unused-import
50
+ from TensorFlow.nlp.transformer.data_generators import generator_utils
51
+ from TensorFlow.nlp.transformer.utils import registry
52
+ from TensorFlow.nlp.transformer.utils import usr_dir
53
+
54
+ # Improrting here to prevent pylint from ungrouped-imports warning.
55
+ import tensorflow.compat.v1 as tf # pylint: disable=g-import-not-at-top
56
+
57
+ flags = tf.flags
58
+ FLAGS = flags.FLAGS
59
+
60
+ flags.DEFINE_string("data_dir", "", "Data directory.")
61
+ flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen",
62
+ "Temporary storage directory.")
63
+ flags.DEFINE_string("problem", "",
64
+ "The name of the problem to generate data for.")
65
+ flags.DEFINE_string("exclude_problems", "",
66
+ "Comma-separates list of problems to exclude.")
67
+ flags.DEFINE_integer(
68
+ "num_shards", 0, "How many shards to use. Ignored for "
69
+ "registered Problems.")
70
+ flags.DEFINE_integer("max_cases", 0,
71
+ "Maximum number of cases to generate (unbounded if 0).")
72
+ flags.DEFINE_integer(
73
+ "env_problem_max_env_steps", 0,
74
+ "Maximum number of steps to take for environment-based problems. "
75
+ "Actions are chosen randomly")
76
+ flags.DEFINE_integer(
77
+ "env_problem_batch_size", 0,
78
+ "Number of environments to simulate for environment-based problems.")
79
+ flags.DEFINE_bool("only_list", False,
80
+ "If true, we only list the problems that will be generated.")
81
+ flags.DEFINE_integer("random_seed", 429459, "Random seed to use.")
82
+ flags.DEFINE_integer("task_id", -1, "For distributed data generation.")
83
+ flags.DEFINE_integer("task_id_start", -1, "For distributed data generation.")
84
+ flags.DEFINE_integer("task_id_end", -1, "For distributed data generation.")
85
+ flags.DEFINE_integer(
86
+ "num_concurrent_processes", None,
87
+ "Applies only to problems for which multiprocess_generate=True.")
88
+ flags.DEFINE_string(
89
+ "t2t_usr_dir", "", "Path to a Python module that will be imported. The "
90
+ "__init__.py file should include the necessary imports. "
91
+ "The imported files should contain registrations, "
92
+ "e.g. @registry.register_problem calls, that will then be "
93
+ "available to t2t-datagen.")
94
+ flags.DEFINE_bool("with_padding", False, "If true dataset features will be padded")
95
+
96
+ # Mapping from problems that we can generate data for to their generators.
97
+ _SUPPORTED_PROBLEM_GENERATORS = {}
98
+
99
+
100
+ def set_random_seed():
101
+ """Set the random seed from flag everywhere."""
102
+ tf.set_random_seed(FLAGS.random_seed)
103
+ random.seed(FLAGS.random_seed)
104
+ np.random.seed(FLAGS.random_seed)
105
+
106
+
107
+ def main(_):
108
+ usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
109
+
110
+ # Calculate the list of problems to generate.
111
+ problems = sorted(
112
+ list(_SUPPORTED_PROBLEM_GENERATORS) + registry.list_base_problems() +
113
+ registry.list_env_problems())
114
+ for exclude in FLAGS.exclude_problems.split(","):
115
+ if exclude:
116
+ problems = [p for p in problems if exclude not in p]
117
+ if FLAGS.problem and FLAGS.problem[-1] == "*":
118
+ problems = [p for p in problems if p.startswith(FLAGS.problem[:-1])]
119
+ elif FLAGS.problem and "," in FLAGS.problem:
120
+ problems = [p for p in problems if p in FLAGS.problem.split(",")]
121
+ elif FLAGS.problem:
122
+ problems = [p for p in problems if p == FLAGS.problem]
123
+ else:
124
+ problems = []
125
+
126
+ # Remove TIMIT if paths are not given.
127
+ if getattr(FLAGS, "timit_paths", None):
128
+ problems = [p for p in problems if "timit" not in p]
129
+ # Remove parsing if paths are not given.
130
+ if getattr(FLAGS, "parsing_path", None):
131
+ problems = [p for p in problems if "parsing_english_ptb" not in p]
132
+
133
+ if not problems:
134
+ problems_str = "\n * ".join(
135
+ sorted(
136
+ list(_SUPPORTED_PROBLEM_GENERATORS) +
137
+ registry.list_base_problems() + registry.list_env_problems()))
138
+ error_msg = ("You must specify one of the supported problems to "
139
+ "generate data for:\n * " + problems_str + "\n")
140
+ error_msg += ("TIMIT and parsing need data_sets specified with "
141
+ "--timit_paths and --parsing_path.")
142
+ raise ValueError(error_msg)
143
+
144
+ if not FLAGS.data_dir:
145
+ FLAGS.data_dir = tempfile.gettempdir()
146
+ tf.logging.warning(
147
+ "It is strongly recommended to specify --data_dir. "
148
+ "Data will be written to default data_dir=%s.", FLAGS.data_dir)
149
+ FLAGS.data_dir = os.path.expanduser(FLAGS.data_dir)
150
+ tf.gfile.MakeDirs(FLAGS.data_dir)
151
+
152
+ tf.logging.info("Generating problems:\n%s" %
153
+ registry.display_list_by_prefix(problems, starting_spaces=4))
154
+ if FLAGS.only_list:
155
+ return
156
+ for problem in problems:
157
+ set_random_seed()
158
+
159
+ if problem in _SUPPORTED_PROBLEM_GENERATORS:
160
+ generate_data_for_problem(problem)
161
+ elif problem in registry.list_base_problems():
162
+ generate_data_for_registered_problem(problem)
163
+ elif problem in registry.list_env_problems():
164
+ generate_data_for_env_problem(problem)
165
+ else:
166
+ tf.logging.error("Problem %s is not a supported problem for datagen.",
167
+ problem)
168
+
169
+
170
+ def generate_data_for_problem(problem):
171
+ """Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS."""
172
+ training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem]
173
+
174
+ num_train_shards = FLAGS.num_shards or 10
175
+ tf.logging.info("Generating training data for %s.", problem)
176
+ train_output_files = generator_utils.train_data_filenames(
177
+ problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir,
178
+ num_train_shards)
179
+ generator_utils.generate_files(training_gen(), train_output_files,
180
+ FLAGS.max_cases)
181
+ num_dev_shards = int(num_train_shards * 0.1)
182
+ tf.logging.info("Generating development data for %s.", problem)
183
+ dev_output_files = generator_utils.dev_data_filenames(
184
+ problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir,
185
+ num_dev_shards)
186
+ generator_utils.generate_files(dev_gen(), dev_output_files)
187
+ num_test_shards = int(num_train_shards * 0.1)
188
+ test_output_files = []
189
+ test_gen_data = test_gen()
190
+ if test_gen_data is not None:
191
+ tf.logging.info("Generating test data for %s.", problem)
192
+ test_output_files = generator_utils.test_data_filenames(
193
+ problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir,
194
+ num_test_shards)
195
+ generator_utils.generate_files(test_gen_data, test_output_files)
196
+ all_output_files = train_output_files + dev_output_files + test_output_files
197
+ generator_utils.shuffle_dataset(all_output_files)
198
+
199
+
200
+ def generate_data_in_process(arg):
201
+ problem_name, data_dir, tmp_dir, task_id = arg
202
+ problem = registry.problem(problem_name)
203
+ problem.generate_data(data_dir, tmp_dir, task_id)
204
+
205
+
206
+ def generate_data_for_env_problem(problem_name):
207
+ """Generate data for `EnvProblem`s."""
208
+ assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps "
209
+ "should be greater than zero")
210
+ assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be"
211
+ " greather than zero")
212
+ problem = registry.env_problem(problem_name)
213
+ task_id = None if FLAGS.task_id < 0 else FLAGS.task_id
214
+ data_dir = os.path.expanduser(FLAGS.data_dir)
215
+ tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
216
+ # TODO(msaffar): Handle large values for env_problem_batch_size where we
217
+ # cannot create that many environments within the same process.
218
+ problem.initialize(batch_size=FLAGS.env_problem_batch_size)
219
+ env_problem_utils.play_env_problem_randomly(
220
+ problem, num_steps=FLAGS.env_problem_max_env_steps)
221
+ problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id)
222
+
223
+
224
+ def generate_data_for_registered_problem(problem_name):
225
+ """Generate data for a registered problem."""
226
+ tf.logging.info("Generating data for %s.", problem_name)
227
+ if FLAGS.num_shards:
228
+ raise ValueError("--num_shards should not be set for registered Problem.")
229
+ problem = registry.problem(problem_name)
230
+ task_id = None if FLAGS.task_id < 0 else FLAGS.task_id
231
+ data_dir = os.path.expanduser(FLAGS.data_dir)
232
+ tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
233
+ if task_id is None and problem.multiprocess_generate:
234
+ if FLAGS.task_id_start != -1:
235
+ assert FLAGS.task_id_end != -1
236
+ task_id_start = FLAGS.task_id_start
237
+ task_id_end = FLAGS.task_id_end
238
+ else:
239
+ task_id_start = 0
240
+ task_id_end = problem.num_generate_tasks
241
+ pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes)
242
+ problem.prepare_to_generate(data_dir, tmp_dir)
243
+ args = [(problem_name, data_dir, tmp_dir, task_id)
244
+ for task_id in range(task_id_start, task_id_end)]
245
+ pool.map(generate_data_in_process, args)
246
+ else:
247
+ problem.generate_data(data_dir, tmp_dir, task_id)
248
+
249
+
250
+ if __name__ == "__main__":
251
+ tf.logging.set_verbosity(tf.logging.INFO)
252
+ tf.app.run()
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/decoder.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # coding=utf-8
4
+ # Copyright 2021 The Tensor2Tensor Authors.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ ###############################################################################
18
+ # Copyright (C) 2021-2022 Habana Labs, Ltd. an Intel Company
19
+ ###############################################################################
20
+ # Changes:
21
+ # - renamed from t2t_decoder.py to decoder.py
22
+ # - added shebang
23
+ # - organized imports
24
+ # - added support for HPU
25
+ # - renamed t2t_trainer to trainer
26
+ # - added use_hpu hparam
27
+ # - added workarounds to run on HPU
28
+ # - added support for recipe cache
29
+ # - added support for fast inference
30
+ # - added support for horovod
31
+
32
+ r"""Decode from trained T2T models.
33
+
34
+ This binary performs inference using the Estimator API.
35
+
36
+ Example usage to decode from dataset:
37
+
38
+ ./decoder.py \
39
+ --data_dir ~/data \
40
+ --problem=algorithmic_identity_binary40 \
41
+ --model=transformer
42
+ --hparams_set=transformer_base
43
+
44
+ Set FLAGS.decode_interactive or FLAGS.decode_from_file for alternative decode
45
+ sources.
46
+ """
47
+ from __future__ import absolute_import
48
+ from __future__ import division
49
+ from __future__ import print_function
50
+
51
+ import os
52
+ import shutil
53
+ from TensorFlow.nlp.transformer import trainer
54
+ from TensorFlow.nlp.transformer.data_generators import problem # pylint: disable=unused-import
55
+ from TensorFlow.nlp.transformer.data_generators import text_encoder
56
+ from TensorFlow.nlp.transformer.utils import decoding
57
+ from TensorFlow.nlp.transformer.utils import registry
58
+ from TensorFlow.nlp.transformer.utils import trainer_lib
59
+ from TensorFlow.nlp.transformer.utils import usr_dir
60
+
61
+ import tensorflow.compat.v1 as tf
62
+
63
+ flags = tf.flags
64
+ FLAGS = flags.FLAGS
65
+
66
+ # Additional flags in trainer.py and utils/flags.py
67
+ flags.DEFINE_string("checkpoint_path", None,
68
+ "Path to the model checkpoint. Overrides output_dir.")
69
+ flags.DEFINE_bool("keep_timestamp", False,
70
+ "Set the mtime of the decoded file to the "
71
+ "checkpoint_path+'.index' mtime.")
72
+ flags.DEFINE_bool("decode_interactive", False,
73
+ "Interactive local inference mode.")
74
+ flags.DEFINE_integer("decode_shards", 1, "Number of decoding replicas.")
75
+ flags.DEFINE_string("score_file", "", "File to score. Each line in the file "
76
+ "must be in the format input \t target.")
77
+ flags.DEFINE_bool("decode_in_memory", False, "Decode in memory.")
78
+ flags.DEFINE_bool("disable_grappler_optimizations", False,
79
+ "Disable Grappler if need be to avoid tensor format errors.")
80
+ flags.DEFINE_bool("use_fast_inference", True, "Use fast inference with static shapes")
81
+
82
+ def create_hparams():
83
+ hparams_path = None
84
+ if FLAGS.output_dir:
85
+ hparams_path = os.path.join(FLAGS.output_dir, "hparams.json")
86
+ return trainer_lib.create_hparams(
87
+ FLAGS.hparams_set,
88
+ FLAGS.hparams,
89
+ data_dir=os.path.expanduser(FLAGS.data_dir),
90
+ problem_name=FLAGS.problem,
91
+ hparams_path=hparams_path)
92
+
93
+
94
+ def create_decode_hparams():
95
+ decode_hp = decoding.decode_hparams(FLAGS.decode_hparams)
96
+ decode_hp.shards = FLAGS.decode_shards
97
+ decode_hp.shard_id = FLAGS.worker_id
98
+ decode_in_memory = FLAGS.decode_in_memory or decode_hp.decode_in_memory
99
+ decode_hp.decode_in_memory = decode_in_memory
100
+ decode_hp.decode_to_file = FLAGS.decode_to_file
101
+ decode_hp.decode_reference = FLAGS.decode_reference
102
+ return decode_hp
103
+
104
+
105
+ def decode(estimator, hparams, decode_hp):
106
+ """Decode from estimator. Interactive, from file, or from dataset."""
107
+ if FLAGS.decode_interactive:
108
+ if estimator.config.use_tpu:
109
+ raise ValueError("TPU can only decode from dataset.")
110
+ decoding.decode_interactively(estimator, hparams, decode_hp,
111
+ checkpoint_path=FLAGS.checkpoint_path)
112
+ elif FLAGS.decode_from_file:
113
+ decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams,
114
+ decode_hp, FLAGS.decode_to_file,
115
+ checkpoint_path=FLAGS.checkpoint_path)
116
+ if FLAGS.checkpoint_path and FLAGS.keep_timestamp:
117
+ ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index")
118
+ os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time))
119
+ else:
120
+ decoding.decode_from_dataset(
121
+ estimator,
122
+ FLAGS.problem,
123
+ hparams,
124
+ decode_hp,
125
+ decode_to_file=FLAGS.decode_to_file,
126
+ dataset_split="test" if FLAGS.eval_use_test_set else None,
127
+ checkpoint_path=FLAGS.checkpoint_path)
128
+
129
+
130
+ def score_file(filename):
131
+ """Score each line in a file and return the scores."""
132
+ # Prepare model.
133
+ hparams = create_hparams()
134
+ encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir)
135
+ has_inputs = "inputs" in encoders
136
+
137
+ # Prepare features for feeding into the model.
138
+ if has_inputs:
139
+ inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
140
+ batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D.
141
+ targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
142
+ batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D.
143
+ if has_inputs:
144
+ features = {"inputs": batch_inputs, "targets": batch_targets}
145
+ else:
146
+ features = {"targets": batch_targets}
147
+
148
+ # Prepare the model and the graph when model runs on features.
149
+ model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL)
150
+ _, losses = model(features)
151
+ saver = tf.train.Saver()
152
+
153
+ with tf.Session() as sess:
154
+ # Load weights from checkpoint.
155
+ if FLAGS.checkpoint_path is None:
156
+ ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir)
157
+ ckpt = ckpts.model_checkpoint_path
158
+ else:
159
+ ckpt = FLAGS.checkpoint_path
160
+ saver.restore(sess, ckpt)
161
+ # Run on each line.
162
+ with tf.gfile.Open(filename) as f:
163
+ lines = f.readlines()
164
+ results = []
165
+ for line in lines:
166
+ tab_split = line.split("\t")
167
+ if len(tab_split) > 2:
168
+ raise ValueError("Each line must have at most one tab separator.")
169
+ if len(tab_split) == 1:
170
+ targets = tab_split[0].strip()
171
+ else:
172
+ targets = tab_split[1].strip()
173
+ inputs = tab_split[0].strip()
174
+ # Run encoders and append EOS symbol.
175
+ targets_numpy = encoders["targets"].encode(
176
+ targets) + [text_encoder.EOS_ID]
177
+ if has_inputs:
178
+ inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID]
179
+ # Prepare the feed.
180
+ if has_inputs:
181
+ feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy}
182
+ else:
183
+ feed = {targets_ph: targets_numpy}
184
+ # Get the score.
185
+ np_loss = sess.run(losses["training"], feed)
186
+ results.append(np_loss)
187
+ return results
188
+
189
+ def get_workaround_flag(name):
190
+ return f'WA_{name}'
191
+
192
+ def is_workaround_enabled(name):
193
+ flag = get_workaround_flag(name)
194
+ is_enabled = os.environ.get(flag, 'true') == 'true'
195
+ if is_enabled:
196
+ print(f"Warning! Workaround {flag} is enabled. Run with {flag}=false to disable it.")
197
+ return is_enabled
198
+
199
+ def main(_):
200
+ tf.disable_v2_behavior()
201
+ tf.enable_resource_variables()
202
+ tf.logging.set_verbosity(tf.logging.INFO)
203
+ trainer_lib.set_random_seed(FLAGS.random_seed)
204
+ usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
205
+
206
+ if FLAGS.use_hpu:
207
+ from habana_frameworks.tensorflow import load_habana_module
208
+ load_habana_module()
209
+
210
+ hvd = trainer.init_multinode()
211
+
212
+ if FLAGS.use_hpu:
213
+ if FLAGS.recipe_cache:
214
+ trainer.prepare_recipe_cache()
215
+ if FLAGS.use_bf16:
216
+ if not is_workaround_enabled('FORCE_FP32'):
217
+ os.environ['TF_BF16_CONVERSION'] = FLAGS.bf16_config_path
218
+ else:
219
+ print("Warning! BF16 precision is not supported in inference mode. Switching back to fp32...")
220
+ if is_workaround_enabled('DISABLE_DYNAMIC_SHAPES'):
221
+ os.environ['TF_ENABLE_DYNAMIC_SHAPES'] = 'false'
222
+
223
+ if FLAGS.score_file:
224
+ filename = os.path.expanduser(FLAGS.score_file)
225
+ if not tf.gfile.Exists(filename):
226
+ raise ValueError("The file to score doesn't exist: %s" % filename)
227
+ results = score_file(filename)
228
+ if not FLAGS.decode_to_file:
229
+ raise ValueError("To score a file, specify --decode_to_file for results.")
230
+ write_file = tf.gfile.Open(os.path.expanduser(FLAGS.decode_to_file), "w")
231
+ for score in results:
232
+ write_file.write("%.6f\n" % score)
233
+ write_file.close()
234
+ return
235
+
236
+ hp = create_hparams()
237
+ hp.add_hparam("use_hpu", FLAGS.use_hpu)
238
+ hp.add_hparam("use_horovod", FLAGS.use_horovod)
239
+ decode_hp = create_decode_hparams()
240
+ decode_hp.add_hparam("use_horovod", hp.use_horovod)
241
+
242
+ if FLAGS.use_horovod:
243
+ hp.add_hparam("hvd_worker_id", hvd.rank())
244
+ hp.add_hparam("hvd_size", hvd.size())
245
+ decode_hp.add_hparam("hvd_worker_id", hp.hvd_worker_id)
246
+ decode_hp.add_hparam("hvd_size", hp.hvd_size)
247
+ run_config = trainer.create_run_config(hp)
248
+ if FLAGS.disable_grappler_optimizations:
249
+ run_config.session_config.graph_options.rewrite_options.disable_meta_optimizer = True
250
+
251
+ assert FLAGS.use_fast_inference or not FLAGS.use_horovod, "Multinode inference is only supported with use_fast_inference=True"
252
+
253
+ # summary-hook in tf.estimator.EstimatorSpec requires
254
+ # hparams.model_dir to be set.
255
+ hp.add_hparam("model_dir", run_config.model_dir)
256
+
257
+ estimator = trainer_lib.create_estimator(
258
+ FLAGS.model,
259
+ hp,
260
+ run_config,
261
+ decode_hparams=decode_hp,
262
+ use_tpu=FLAGS.use_tpu)
263
+
264
+ decode(estimator, hp, decode_hp)
265
+
266
+
267
+ if __name__ == "__main__":
268
+ tf.logging.set_verbosity(tf.logging.INFO)
269
+ tf.app.run()
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/area_attention.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Utilities for area attention."""
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import numpy as np
22
+ from six.moves import range # pylint: disable=redefined-builtin
23
+ from TensorFlow.nlp.transformer.layers import common_layers
24
+ import tensorflow.compat.v1 as tf
25
+
26
+
27
+ def lengths_to_area_mask(feature_length, length, max_area_size):
28
+ """Generates a non-padding mask for areas based on lengths.
29
+
30
+ Args:
31
+ feature_length: a tensor of [batch_size]
32
+ length: the length of the batch
33
+ max_area_size: the maximum area size considered
34
+ Returns:
35
+ mask: a tensor in shape of [batch_size, num_areas]
36
+ """
37
+
38
+ paddings = tf.cast(tf.expand_dims(
39
+ tf.logical_not(
40
+ tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)
41
+ _, _, area_sum, _, _ = compute_area_features(paddings,
42
+ max_area_width=max_area_size)
43
+ mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])
44
+ return mask
45
+
46
+
47
+ def _pool_one_shape(features_2d, area_width, area_height, batch_size,
48
+ width, height, depth, fn=tf.reduce_max, name=None):
49
+ """Pools for an area in features_2d.
50
+
51
+ Args:
52
+ features_2d: a Tensor in a shape of [batch_size, height, width, depth].
53
+ area_width: the max width allowed for an area.
54
+ area_height: the max height allowed for an area.
55
+ batch_size: the batch size.
56
+ width: the width of the memory.
57
+ height: the height of the memory.
58
+ depth: the depth of the features.
59
+ fn: the TF function for the pooling.
60
+ name: the op name.
61
+ Returns:
62
+ pool_tensor: A Tensor of shape [batch_size, num_areas, depth]
63
+ """
64
+ with tf.name_scope(name, default_name="pool_one_shape"):
65
+ images = []
66
+ for y_shift in range(area_height):
67
+ image_height = tf.maximum(height - area_height + 1 + y_shift, 0)
68
+ for x_shift in range(area_width):
69
+ image_width = tf.maximum(width - area_width + 1 + x_shift, 0)
70
+ area = features_2d[:, y_shift:image_height, x_shift:image_width, :]
71
+ flatten_area = tf.reshape(area, [batch_size, -1, depth, 1])
72
+ images.append(flatten_area)
73
+ image_tensor = tf.concat(images, axis=3)
74
+ max_tensor = fn(image_tensor, axis=3)
75
+ return max_tensor
76
+
77
+
78
+ def basic_pool(features, max_area_width, max_area_height=1, height=1,
79
+ fn=tf.reduce_max, name=None):
80
+ """Pools for each area based on a given pooling function (fn).
81
+
82
+ Args:
83
+ features: a Tensor in a shape of [batch_size, height * width, depth].
84
+ max_area_width: the max width allowed for an area.
85
+ max_area_height: the max height allowed for an area.
86
+ height: the height of the image.
87
+ fn: the TF function for the pooling.
88
+ name: the namescope.
89
+ Returns:
90
+ pool_results: A Tensor of shape [batch_size, num_areas, depth]
91
+ area_heights: A Tensor of shape [batch_size, num_areas, 1]
92
+ area_widths: A Tensor of shape [batch_size, num_areas, 1]
93
+ """
94
+ with tf.name_scope(name, default_name="basic_pool"):
95
+ feature_shape = common_layers.shape_list(features)
96
+ batch_size = feature_shape[0]
97
+ length = feature_shape[-2]
98
+ depth = feature_shape[-1]
99
+ width = length // height
100
+ features_2d = tf.reshape(features, [batch_size, height, width, depth])
101
+ height_list = []
102
+ width_list = []
103
+ pool_list = []
104
+ size_tensor = tf.ones_like(features_2d[:, :, :, 0], dtype=tf.int32)
105
+ for area_height in range(max_area_height):
106
+ for area_width in range(max_area_width):
107
+ pool_tensor = _pool_one_shape(features_2d,
108
+ area_width=area_width + 1,
109
+ area_height=area_height + 1,
110
+ batch_size=batch_size,
111
+ width=width,
112
+ height=height,
113
+ depth=depth,
114
+ fn=fn)
115
+ pool_list.append(
116
+ tf.reshape(pool_tensor, [batch_size, -1, depth]))
117
+ height_list.append(
118
+ tf.reshape(
119
+ size_tensor[:, area_height:, area_width:] *\
120
+ (area_height + 1), [batch_size, -1]))
121
+ width_list.append(
122
+ tf.reshape(
123
+ size_tensor[:, area_height:, area_width:] *\
124
+ (area_width + 1), [batch_size, -1]))
125
+ pool_results = tf.concat(pool_list, axis=1)
126
+ area_heights = tf.expand_dims(tf.concat(height_list, axis=1), 2)
127
+ area_widths = tf.expand_dims(tf.concat(width_list, axis=1), 2)
128
+ return pool_results, area_heights, area_widths
129
+
130
+
131
+ def _compute_sum_image(features, max_area_width, max_area_height=1, height=1,
132
+ name=None):
133
+ """Computes area sums for features.
134
+
135
+ Args:
136
+ features: a Tensor in a shape of [batch_size, height * width, depth].
137
+ max_area_width: the max width allowed for an area.
138
+ max_area_height: the max height allowed for an area.
139
+ height: the height of the image.
140
+ name: the namescope.
141
+ Returns:
142
+ sum_image: A Tensor of shape [batch_size, num_areas, depth]
143
+ area_heights: A Tensor of shape [batch_size, num_areas, 1]
144
+ area_widths: A Tensor of shape [batch_size, num_areas, 1]
145
+ """
146
+ with tf.name_scope(name, default_name="compute_sum_image"):
147
+ feature_shape = common_layers.shape_list(features)
148
+ batch_size = feature_shape[0]
149
+ length = feature_shape[-2]
150
+ depth = feature_shape[-1]
151
+ width = length // height
152
+ features_2d = tf.reshape(features, [batch_size, height, width, depth])
153
+ width_cum = tf.cumsum(features_2d, axis=-2, name="compute_integral_h")
154
+ integral_image = tf.cumsum(width_cum, axis=-3, name="compute_integral_v")
155
+ padded_image = tf.pad(
156
+ integral_image, [[0, 0], [1, 0], [1, 0], [0, 0]], constant_values=0)
157
+ height_list = []
158
+ width_list = []
159
+ dst_images = []
160
+ src_images_diag = []
161
+ src_images_h = []
162
+ src_images_v = []
163
+ size_tensor = tf.ones_like(padded_image[:, :, :, 0],
164
+ dtype=tf.int32)
165
+ for area_height in range(max_area_height):
166
+ for area_width in range(max_area_width):
167
+ dst_images.append(
168
+ tf.reshape(
169
+ padded_image[:, area_height + 1:, area_width + 1:, :],
170
+ [batch_size, -1, depth]))
171
+ src_images_diag.append(
172
+ tf.reshape(
173
+ padded_image[:, :-area_height - 1, :-area_width - 1, :],
174
+ [batch_size, -1, depth]))
175
+ src_images_h.append(
176
+ tf.reshape(
177
+ padded_image[:, area_height + 1:, :-area_width - 1, :],
178
+ [batch_size, -1, depth]))
179
+ src_images_v.append(
180
+ tf.reshape(
181
+ padded_image[:, :-area_height - 1, area_width + 1:, :],
182
+ [batch_size, -1, depth]))
183
+ height_list.append(
184
+ tf.reshape(
185
+ size_tensor[:, area_height + 1:, area_width + 1:] *\
186
+ (area_height + 1), [batch_size, -1]))
187
+ width_list.append(
188
+ tf.reshape(
189
+ size_tensor[:, area_height + 1:, area_width + 1:] *\
190
+ (area_width + 1), [batch_size, -1]))
191
+ sum_image = tf.subtract(
192
+ tf.concat(dst_images, axis=1) + tf.concat(src_images_diag, axis=1),
193
+ tf.concat(src_images_v, axis=1) + tf.concat(src_images_h, axis=1))
194
+ area_heights = tf.expand_dims(tf.concat(height_list, axis=1), 2)
195
+ area_widths = tf.expand_dims(tf.concat(width_list, axis=1), 2)
196
+ return sum_image, area_heights, area_widths
197
+
198
+
199
+ def compute_area_features(features, max_area_width, max_area_height=1, height=1,
200
+ epsilon=1e-6):
201
+ """Computes features for each area.
202
+
203
+ Args:
204
+ features: a Tensor in a shape of [batch_size, height * width, depth].
205
+ max_area_width: the max width allowed for an area.
206
+ max_area_height: the max height allowed for an area.
207
+ height: the height of the image.
208
+ epsilon: the epsilon added to the variance for computing standard deviation.
209
+ Returns:
210
+ area_mean: A Tensor of shape [batch_size, num_areas, depth]
211
+ area_std: A Tensor of shape [batch_size, num_areas, depth]
212
+ area_sum: A Tensor of shape [batch_size, num_areas, depth]
213
+ area_heights: A Tensor of shape [batch_size, num_areas, 1]
214
+ area_widths: A Tensor of shape [batch_size, num_areas, 1]
215
+ """
216
+ with tf.name_scope("compute_area_features"):
217
+ tf.logging.info("area_attention compute_area_features: %d x %d",
218
+ max_area_height, max_area_width)
219
+ area_sum, area_heights, area_widths = _compute_sum_image(
220
+ features, max_area_width=max_area_width,
221
+ max_area_height=max_area_height, height=height)
222
+ area_squared_sum, _, _ = _compute_sum_image(
223
+ tf.pow(features, 2), max_area_width=max_area_width,
224
+ max_area_height=max_area_height, height=height)
225
+ sizes = tf.multiply(area_heights, area_widths)
226
+ float_area_sizes = tf.cast(sizes, tf.float32)
227
+ area_mean = tf.div(area_sum, float_area_sizes)
228
+ s2_n = tf.div(area_squared_sum, float_area_sizes)
229
+ area_variance = tf.subtract(s2_n, tf.pow(area_mean, 2))
230
+ area_std = tf.sqrt(tf.abs(area_variance) + epsilon)
231
+ return area_mean, area_std, area_sum, area_heights, area_widths
232
+
233
+
234
+ def compute_area_key(features, max_area_width, max_area_height=1, height=1,
235
+ mode="mean", training=True, name=None):
236
+ """Computes the key for each area.
237
+
238
+ Args:
239
+ features: a Tensor in a shape of [batch_size, height * width, depth].
240
+ max_area_width: the max width allowed for an area.
241
+ max_area_height: the max height allowed for an area.
242
+ height: the height of the image.
243
+ mode: whether to combine different area features or only use
244
+ the vector mean of each area, which can be "mean", "concat", "sum",
245
+ "sample_concat", and "sample_sum".
246
+ training: indicating if it is in the training mode.
247
+ name: the name for setting the variable scope.
248
+ Returns:
249
+ area_key: a Tensor in the shape of [batch_size, num_areas, depth]
250
+ """
251
+
252
+ tf.logging.info("area_attention mode=%s", mode)
253
+ area_mean, area_std, _, area_heights, area_widths =\
254
+ compute_area_features(features, max_area_width=max_area_width,
255
+ max_area_height=max_area_height, height=height)
256
+ if mode == "mean":
257
+ return area_mean
258
+ elif mode == "max":
259
+ area_max, _, _ = basic_pool(features, max_area_width=max_area_width,
260
+ max_area_height=max_area_height, height=height)
261
+ return area_max
262
+ elif mode == "sample":
263
+ if training:
264
+ area_mean += (area_std * tf.random_normal(tf.shape(area_std)))
265
+ return area_mean
266
+ with tf.variable_scope(
267
+ name, default_name="combine_area_features",
268
+ values=[area_mean, area_std, area_heights, area_widths]):
269
+ depth = common_layers.shape_list(area_mean)[-1]
270
+ height_embed = tf.nn.embedding_lookup(
271
+ params=tf.get_variable("area_height_emb",
272
+ [max_area_height, depth // 2]),
273
+ ids=area_heights[:, :, 0] - 1)
274
+ width_embed = tf.nn.embedding_lookup(
275
+ params=tf.get_variable("area_width_emb",
276
+ [max_area_width, depth // 2]),
277
+ ids=area_widths[:, :, 0] - 1)
278
+ size_embed = tf.concat([height_embed, width_embed], -1)
279
+ if mode == "concat":
280
+ feature_concat = tf.concat([area_mean, area_std, size_embed], -1)
281
+ elif mode == "max_concat":
282
+ area_max, _, _ = basic_pool(features, max_area_width=max_area_width,
283
+ max_area_height=max_area_height,
284
+ height=height)
285
+ feature_concat = tf.concat([area_max, size_embed], -1)
286
+ elif mode == "sum":
287
+ feature_concat = size_embed + area_mean + area_std
288
+ elif mode == "sample_concat":
289
+ if training:
290
+ area_mean += (area_std * tf.random_normal(tf.shape(area_std)))
291
+ feature_concat = tf.concat([area_mean, size_embed], -1)
292
+ elif mode == "sample_sum":
293
+ if training:
294
+ area_mean += (area_std * tf.random_normal(tf.shape(area_std)))
295
+ feature_concat = area_mean + size_embed
296
+ else:
297
+ raise ValueError("Unsupported area key mode=%s" % mode)
298
+ feature_hidden = tf.layers.dense(inputs=feature_concat,
299
+ units=depth,
300
+ activation=tf.nn.relu)
301
+ area_key = tf.layers.dense(feature_hidden, units=depth)
302
+ return area_key
303
+
304
+
305
+ def dot_product_area_attention(q,
306
+ k,
307
+ v,
308
+ bias,
309
+ dropout_rate=0.0,
310
+ image_shapes=None,
311
+ name=None,
312
+ attention_image_summary=None,
313
+ save_weights_to=None,
314
+ dropout_broadcast_dims=None,
315
+ max_area_width=1,
316
+ max_area_height=1,
317
+ memory_height=1,
318
+ area_key_mode="mean",
319
+ area_value_mode="sum",
320
+ top_k_areas=0,
321
+ area_temperature=1.0,
322
+ training=True):
323
+ """Dot-product area attention.
324
+
325
+ Args:
326
+ q: Tensor with shape [..., length_q, depth_k].
327
+ k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
328
+ match with q.
329
+ v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
330
+ match with q.
331
+ bias: bias Tensor (see attention_bias())
332
+ dropout_rate: a float.
333
+ image_shapes: optional tuple of integer scalars.
334
+ see comments for attention_image_summary()
335
+ name: an optional string
336
+ attention_image_summary: the callback for making image summary of attention.
337
+ save_weights_to: an optional dictionary to capture attention weights
338
+ for visualization; the weights tensor will be appended there under
339
+ a string key created from the variable scope (including name).
340
+ dropout_broadcast_dims: an optional list of integers less than rank of q.
341
+ Specifies in which dimensions to broadcast the dropout decisions.
342
+ max_area_width: the max width allowed for an area.
343
+ max_area_height: the max height allowed for an area.
344
+ memory_height: the height of the memory.
345
+ area_key_mode: the mode for computing area keys, which can be "mean",
346
+ "concat", "sum", "sample_concat", and "sample_sum".
347
+ area_value_mode: the mode for computing area values, which can be either
348
+ "mean", or "sum".
349
+ top_k_areas: Use the top key areas for attention.
350
+ area_temperature: the temperature for attention softmax.
351
+ training: indicating if it is in the training mode.
352
+ Returns:
353
+ Tensor with shape [..., length_q, depth_v].
354
+ """
355
+
356
+ tf.logging.info("dot_product_area_attention: "
357
+ "area_h=%d, area_w=%d, mem_h=%d, "
358
+ "area_key_mode=%s, area_value_mode=%s, "
359
+ "area_temperature=%f",
360
+ max_area_height, max_area_width, memory_height,
361
+ area_key_mode, area_value_mode,
362
+ area_temperature)
363
+ with tf.variable_scope(
364
+ name, default_name="dot_product_area_attention",
365
+ values=[q, k, v]) as scope:
366
+ mem_shape = common_layers.shape_list(k)
367
+ batch_size = mem_shape[0]
368
+ head_size = mem_shape[1]
369
+ length = mem_shape[2]
370
+ depth = mem_shape[3]
371
+ k_area = compute_area_key(
372
+ tf.reshape(k, [-1, length, depth]),
373
+ max_area_width=max_area_width,
374
+ max_area_height=max_area_height,
375
+ height=memory_height,
376
+ mode=area_key_mode,
377
+ training=training)
378
+ if area_value_mode == "mean":
379
+ v_area, _, _, _, _ = compute_area_features(
380
+ tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width,
381
+ max_area_height=max_area_height, height=memory_height)
382
+ elif area_value_mode == "max":
383
+ v_area, _, _ = basic_pool(tf.reshape(v, [-1, length, depth]),
384
+ max_area_width=max_area_width,
385
+ max_area_height=max_area_height,
386
+ height=memory_height,
387
+ fn=tf.reduce_max)
388
+ elif area_value_mode == "sum":
389
+ _, _, v_area, _, _ = compute_area_features(
390
+ tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width,
391
+ max_area_height=max_area_height, height=memory_height)
392
+ else:
393
+ raise ValueError("Unsupported area value mode=%s" % area_value_mode)
394
+ k = tf.reshape(k_area, [batch_size, head_size, -1, depth])
395
+ v = tf.reshape(v_area, [batch_size, head_size, -1, depth])
396
+ logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
397
+ if bias is not None:
398
+ bias = common_layers.cast_like(bias, logits)
399
+ with tf.name_scope("compute_area_att_bias", values=[bias]):
400
+ bias_shape = common_layers.shape_list(bias)
401
+ mem_length = bias_shape[-1]
402
+ bias_values = tf.reshape(
403
+ tf.cast(tf.less(bias, -1), tf.float32), [-1, mem_length, 1])
404
+ _, _, padding_sum, _, _ = compute_area_features(
405
+ bias_values, max_area_width=max_area_width,
406
+ max_area_height=max_area_height, height=memory_height)
407
+ bias = tf.where(
408
+ tf.cast(tf.to_int32(padding_sum), tf.bool),
409
+ tf.fill(tf.shape(padding_sum), -np.inf),
410
+ tf.zeros_like(padding_sum, dtype=tf.float32))
411
+ bias = tf.reshape(bias,
412
+ [bias_shape[0], bias_shape[1],
413
+ bias_shape[2], -1])
414
+ logits += bias
415
+ logits = logits / area_temperature
416
+ weights = tf.nn.softmax(logits, name="attention_weights")
417
+ if top_k_areas > 0:
418
+ tf.logging.info("area_attention top_k_areas=%d", top_k_areas)
419
+ top_k = tf.minimum(common_layers.shape_list(weights)[-1], top_k_areas)
420
+ top_weights, _ = tf.nn.top_k(weights, k=top_k)
421
+ min_values = tf.reduce_min(top_weights, -1, keepdims=True)
422
+ weights = tf.where(tf.greater_equal(weights, min_values),
423
+ weights, tf.zeros_like(weights))
424
+ weights = tf.div(weights, tf.reduce_sum(weights, -1, keepdims=True))
425
+ if save_weights_to is not None:
426
+ save_weights_to[scope.name] = weights
427
+ save_weights_to[scope.name + "/logits"] = logits
428
+ # Drop out attention links for each head.
429
+ weights = common_layers.dropout_with_broadcast_dims(
430
+ weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
431
+ if common_layers.should_generate_summaries() and attention_image_summary:
432
+ attention_image_summary(weights, image_shapes)
433
+ return tf.matmul(weights, v)
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_attention.py ADDED
The diff for this file is too large to render. See raw diff
 
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/common_layers.py ADDED
The diff for this file is too large to render. See raw diff
 
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/modalities.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Modalities, which specify a feature's domain.
17
+
18
+ T2TModel applies a default transformation to each feature according to its
19
+ modality. Override them by specifying a model's
20
+ hparams.{bottom,loss,top,weights_fn}.
21
+ """
22
+ from __future__ import absolute_import
23
+ from __future__ import division
24
+ from __future__ import print_function
25
+
26
+ from six.moves import range # pylint: disable=redefined-builtin
27
+
28
+ from TensorFlow.nlp.transformer.layers import common_attention
29
+ from TensorFlow.nlp.transformer.layers import common_layers
30
+
31
+ import tensorflow.compat.v1 as tf
32
+ import tensorflow_probability as tfp
33
+
34
+
35
+ class ModalityType(object):
36
+ """Types of modalities."""
37
+
38
+ IDENTITY = "identity" # identity top and bottom
39
+ IDENTITY_SYMBOL = "identity_symbol" # symbol with identity top and bottom
40
+ SYMBOL = "symbol"
41
+
42
+
43
+ @staticmethod
44
+ def get_choices():
45
+ return [
46
+ ModalityType.IDENTITY,
47
+ ModalityType.IDENTITY_SYMBOL,
48
+ ModalityType.SYMBOL,
49
+ ]
50
+
51
+
52
+ def class_label_targets_bottom(x, model_hparams, vocab_size):
53
+ with tf.variable_scope("class_label_modality_%d_%d" % (
54
+ vocab_size, model_hparams.hidden_size)):
55
+ return tf.zeros([common_layers.shape_list(x)[0],
56
+ 1,
57
+ 1,
58
+ model_hparams.hidden_size])
59
+
60
+
61
+ def identity_bottom(x, model_hparams, vocab_size):
62
+ del model_hparams, vocab_size # unused arg
63
+ return tf.cast(x, tf.float32)
64
+
65
+
66
+ def make_targets_bottom(bottom):
67
+ def targets_bottom(x, model_hparams, vocab_size):
68
+ with tf.variable_scope("targets_bottom"):
69
+ return bottom(x, model_hparams, vocab_size)
70
+ return targets_bottom
71
+
72
+
73
+ def real_bottom(x, model_hparams, vocab_size):
74
+ del vocab_size # unused arg
75
+ with tf.variable_scope("real"):
76
+ return tf.layers.dense(
77
+ tf.cast(x, tf.float32), model_hparams.hidden_size, name="bottom")
78
+
79
+
80
+ def get_weights(model_hparams, vocab_size, hidden_dim=None):
81
+ """Create or get concatenated embedding or softmax variable.
82
+
83
+ Args:
84
+ model_hparams: HParams, model hyperparmeters.
85
+ vocab_size: int, vocabulary size.
86
+ hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size
87
+
88
+ Returns:
89
+ a list of num_shards Tensors.
90
+ """
91
+ if hidden_dim is None:
92
+ hidden_dim = model_hparams.hidden_size
93
+ num_shards = model_hparams.symbol_modality_num_shards
94
+ shards = []
95
+ for i in range(num_shards):
96
+ shard_size = (vocab_size // num_shards) + (
97
+ 1 if i < vocab_size % num_shards else 0)
98
+ var_name = "weights_%d" % i
99
+ shards.append(
100
+ tf.get_variable(
101
+ var_name, [shard_size, hidden_dim],
102
+ initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))
103
+ if num_shards == 1:
104
+ ret = shards[0]
105
+ else:
106
+ ret = tf.concat(shards, 0)
107
+ # Convert ret to tensor.
108
+ if not tf.executing_eagerly():
109
+ ret = common_layers.convert_gradient_to_tensor(ret)
110
+ return ret
111
+
112
+
113
+ def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse):
114
+ """Bottom transformation for symbols."""
115
+ with tf.variable_scope(name, reuse=reuse):
116
+ # Ensure the inputs are 3-D
117
+ if len(x.get_shape()) == 4:
118
+ x = tf.squeeze(x, axis=3)
119
+ while len(x.get_shape()) < 3:
120
+ x = tf.expand_dims(x, axis=-1)
121
+
122
+ var = get_weights(model_hparams, vocab_size)
123
+ x = common_layers.dropout_no_scaling(
124
+ x, 1.0 - model_hparams.symbol_dropout)
125
+ ret = common_layers.gather(var, x)
126
+ if model_hparams.multiply_embedding_mode == "sqrt_depth":
127
+ ret *= model_hparams.hidden_size**0.5
128
+ ret *= tf.expand_dims(
129
+ common_layers.cast_like(tf.not_equal(x, 0), ret), -1)
130
+ return ret
131
+
132
+
133
+ def symbol_bottom(x, model_hparams, vocab_size):
134
+ if (model_hparams.shared_embedding_and_softmax_weights or
135
+ model_hparams.get("shared_embedding")):
136
+ return _symbol_bottom_simple(
137
+ x, model_hparams, vocab_size, "shared", reuse=None)
138
+ return _symbol_bottom_simple(
139
+ x, model_hparams, vocab_size, "input_emb", reuse=None)
140
+
141
+
142
+ def symbol_targets_bottom(x, model_hparams, vocab_size):
143
+ if (model_hparams.shared_embedding_and_softmax_weights or
144
+ model_hparams.get("shared_embedding")):
145
+ try:
146
+ return _symbol_bottom_simple(
147
+ x, model_hparams, vocab_size, "shared", reuse=True)
148
+ except ValueError:
149
+ # perhaps there were no inputs, and this is a new variable.
150
+ return _symbol_bottom_simple(
151
+ x, model_hparams, vocab_size, "shared", reuse=None)
152
+ else:
153
+ return _symbol_bottom_simple(
154
+ x, model_hparams, vocab_size, "target_emb", reuse=None)
155
+
156
+
157
+ # Loss transformations, applied to target features
158
+
159
+
160
+ def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
161
+ """Compute loss numerator and denominator for one shard of output."""
162
+ del vocab_size # unused arg
163
+ logits = top_out
164
+ logits = common_attention.maybe_upcast(logits, hparams=model_hparams)
165
+ cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0)
166
+
167
+ return common_layers.padded_cross_entropy(
168
+ logits,
169
+ targets,
170
+ model_hparams.label_smoothing,
171
+ cutoff=cutoff,
172
+ weights_fn=weights_fn)
173
+
174
+
175
+ # Top transformations, applied to target features
176
+
177
+
178
+ def is_pointwise(func):
179
+ """Decorator for whether the function is pointwise.
180
+
181
+ An example of a pointwise function is a linear layer followed by
182
+ a softmax. Given a tensor [batch, length, height, depth] it operates
183
+ only on the last axis, on every point in [batch, length, height] fully
184
+ independently. In contrast, a classifier that first averages over length
185
+ and height is not pointwise, as it depends on the whole field. It is useful
186
+ to know if top functions are pointwise to speed up decoding in certain models.
187
+
188
+ Args:
189
+ func: Function to decorate.
190
+
191
+ Returns:
192
+ Original function with an attribute pointwise set to True.
193
+ """
194
+ func.pointwise = True
195
+ return func
196
+
197
+
198
+ def identity_top(body_output, targets, model_hparams, vocab_size):
199
+ del targets, model_hparams, vocab_size # unused arg
200
+ return body_output
201
+
202
+
203
+ @is_pointwise
204
+ def symbol_top(body_output, targets, model_hparams, vocab_size):
205
+ """Generate logits.
206
+
207
+ Args:
208
+ body_output: A Tensor with shape
209
+ [batch, p0, p1, model_hparams.hidden_size].
210
+ targets: Unused.
211
+ model_hparams: HParams, model hyperparmeters.
212
+ vocab_size: int, vocabulary size.
213
+
214
+ Returns:
215
+ logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].
216
+ """
217
+ del targets # unused arg
218
+ if model_hparams.shared_embedding_and_softmax_weights:
219
+ scope_name = "shared"
220
+ reuse = tf.AUTO_REUSE
221
+ else:
222
+ scope_name = "softmax"
223
+ reuse = False
224
+ with tf.variable_scope(scope_name, reuse=reuse):
225
+ body_output_shape = common_layers.shape_list(body_output)
226
+ var = get_weights(model_hparams, vocab_size, body_output_shape[-1])
227
+ if (model_hparams.factored_logits and
228
+ model_hparams.mode == tf.estimator.ModeKeys.TRAIN):
229
+ # insert channels dimension
230
+ body_output = tf.expand_dims(body_output, 3)
231
+ return common_layers.FactoredTensor(body_output, var)
232
+ else:
233
+ body_output = tf.reshape(body_output, [-1, body_output_shape[-1]])
234
+ logits = tf.matmul(body_output, var, transpose_b=True)
235
+ return tf.reshape(logits,
236
+ body_output_shape[:-1] + [1, vocab_size])
237
+
238
+
239
+ # Utility functions similar to tf.keras for default transformations
240
+
241
+
242
+ def get_bottom(modality_type, value=None):
243
+ """Gets default bottom transformation; if none available, return value."""
244
+ if modality_type == ModalityType.SYMBOL:
245
+ return symbol_bottom
246
+ elif modality_type in (ModalityType.IDENTITY,
247
+ ModalityType.IDENTITY_SYMBOL):
248
+ return identity_bottom
249
+ return value
250
+
251
+
252
+ def get_loss(modality_type, value=None):
253
+ """Gets default loss transformation; if none available, return value."""
254
+ if modality_type in (ModalityType.IDENTITY,
255
+ ModalityType.IDENTITY_SYMBOL,
256
+ ModalityType.SYMBOL):
257
+ return generic_loss
258
+ return value
259
+
260
+
261
+ def get_name(modality_type, value=None):
262
+ """Gets default name for transformations; if none available, return value."""
263
+ # For legacy reasons, modalities vary in their naming scheme. Future plans are
264
+ # to remove any need for get_name. We do not recommend using it.
265
+ if modality_type == ModalityType.IDENTITY:
266
+ return lambda model_hparams, vocab_size: "identity_modality"
267
+ elif modality_type == ModalityType.SYMBOL:
268
+ def name(model_hparams, vocab_size):
269
+ return "symbol_modality_%d_%d" % (vocab_size, model_hparams.hidden_size)
270
+ return name
271
+ return value
272
+
273
+
274
+ def get_targets_bottom(modality_type, value=None):
275
+ """Gets default bottom transformation for targets; if none, return value."""
276
+ if modality_type == ModalityType.SYMBOL:
277
+ return symbol_targets_bottom
278
+ elif modality_type == ModalityType.IDENTITY_SYMBOL:
279
+ return identity_bottom
280
+ elif modality_type == ModalityType.IDENTITY:
281
+ return make_targets_bottom(identity_bottom)
282
+ return value
283
+
284
+
285
+ def get_top(modality_type, value=None):
286
+ """Gets default top transformation; if none available, return value."""
287
+ if modality_type in (ModalityType.IDENTITY,
288
+ ModalityType.IDENTITY_SYMBOL):
289
+ return identity_top
290
+ elif modality_type == ModalityType.SYMBOL:
291
+ return symbol_top
292
+ return value
293
+
294
+
295
+ def get_weights_fn(modality_type, value=None):
296
+ """Gets default weights function; if none available, return value."""
297
+ if modality_type in (ModalityType.IDENTITY_SYMBOL,
298
+ ModalityType.SYMBOL):
299
+ return common_layers.weights_nonzero
300
+ elif modality_type in ModalityType.get_choices():
301
+ return common_layers.weights_all
302
+ return value
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_layers.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Commonly re-used transformer layers."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ from TensorFlow.nlp.transformer.layers import common_attention
23
+ from TensorFlow.nlp.transformer.layers import common_layers
24
+ from TensorFlow.nlp.transformer.utils import expert_utils
25
+
26
+ import tensorflow.compat.v1 as tf
27
+
28
+
29
+ # TODO(lukaszkaiser): remove this function when not needed any more.
30
+ def layers():
31
+ return common_layers.layers()
32
+
33
+
34
+ def transformer_prepare_encoder(inputs, target_space, hparams, features=None,
35
+ type_ids=None, num_types=None,
36
+ reuse_target_embedding=tf.AUTO_REUSE):
37
+ """Prepare one shard of the model for the encoder.
38
+
39
+ Args:
40
+ inputs: a Tensor.
41
+ target_space: a Tensor.
42
+ hparams: run hyperparameters
43
+ features: optionally pass the entire features dictionary as well.
44
+ This is needed now for "packed" datasets.
45
+ type_ids: optional, an int64 Tensor of shape [batch, length] that allows
46
+ for adding type embeddings, similar to positional embeddings.
47
+ num_types: optional, an int that decides the number of types in type_ids.
48
+ reuse_target_embedding: option to reuse variable name in the case that
49
+ symbol modalities are reused between inputs/targets.
50
+
51
+ Returns:
52
+ encoder_input: a Tensor, bottom of encoder stack
53
+ encoder_self_attention_bias: a bias tensor for use in encoder self-attention
54
+ encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder
55
+ attention
56
+ """
57
+ ishape_static = inputs.shape.as_list()
58
+ encoder_input = inputs
59
+ if features and "inputs_segmentation" in features:
60
+ # Packed dataset. Keep the examples from seeing each other.
61
+ inputs_segmentation = features["inputs_segmentation"]
62
+ inputs_position = features["inputs_position"]
63
+ targets_segmentation = features["targets_segmentation"]
64
+ if (hasattr(hparams, "unidirectional_encoder") and
65
+ hparams.unidirectional_encoder):
66
+ tf.logging.info("Using unidirectional encoder")
67
+ encoder_self_attention_bias = (
68
+ common_attention.attention_bias_lower_triangle(
69
+ common_layers.shape_list(inputs)[1]))
70
+ else:
71
+ encoder_self_attention_bias = (
72
+ common_attention.attention_bias_same_segment(
73
+ inputs_segmentation, inputs_segmentation))
74
+ encoder_decoder_attention_bias = (
75
+ common_attention.attention_bias_same_segment(targets_segmentation,
76
+ inputs_segmentation))
77
+ else:
78
+ encoder_padding = common_attention.embedding_to_padding(encoder_input)
79
+ ignore_padding = common_attention.attention_bias_ignore_padding(
80
+ encoder_padding)
81
+ if (hasattr(hparams, "unidirectional_encoder") and
82
+ hparams.unidirectional_encoder):
83
+ tf.logging.info("Using unidirectional encoder")
84
+ encoder_self_attention_bias = (
85
+ common_attention.attention_bias_lower_triangle(
86
+ common_layers.shape_list(inputs)[1]))
87
+ else:
88
+ # Usual case - not a packed dataset.
89
+ encoder_self_attention_bias = ignore_padding
90
+ encoder_decoder_attention_bias = ignore_padding
91
+ inputs_position = None
92
+ if hparams.proximity_bias:
93
+ encoder_self_attention_bias += common_attention.attention_bias_proximal(
94
+ common_layers.shape_list(inputs)[1])
95
+ if target_space is not None and hparams.get("use_target_space_embedding",
96
+ True):
97
+ # Append target_space_id embedding to inputs.
98
+ emb_target_space = common_layers.embedding(
99
+ target_space,
100
+ 32,
101
+ ishape_static[-1],
102
+ name="target_space_embedding",
103
+ dtype=hparams.get("activation_dtype", "float32"),
104
+ reuse=reuse_target_embedding)
105
+ emb_target_space = tf.reshape(emb_target_space, [1, 1, -1])
106
+ encoder_input += emb_target_space
107
+ if hparams.pos == "timing":
108
+ if inputs_position is not None:
109
+ encoder_input = common_attention.add_timing_signal_1d_given_position(
110
+ encoder_input, inputs_position)
111
+ else:
112
+ encoder_input = common_attention.add_timing_signal_1d(encoder_input)
113
+ elif hparams.pos == "timing_from_features":
114
+ encoder_input = common_attention.add_timing_signals_from_features(
115
+ encoder_input, features, hparams.position_features)
116
+ elif hparams.pos == "emb":
117
+ encoder_input = common_attention.add_positional_embedding(
118
+ encoder_input, hparams.max_length, "inputs_positional_embedding",
119
+ inputs_position)
120
+
121
+ # Add type embeddings
122
+ if type_ids is not None:
123
+ if not num_types:
124
+ raise ValueError("Need to set num_types as well.")
125
+ encoder_input = common_attention.add_positional_embedding(
126
+ encoder_input, num_types, "inputs_type_embedding", type_ids)
127
+
128
+ encoder_self_attention_bias = common_layers.cast_like(
129
+ encoder_self_attention_bias, encoder_input)
130
+ encoder_decoder_attention_bias = common_layers.cast_like(
131
+ encoder_decoder_attention_bias, encoder_input)
132
+ return (encoder_input, encoder_self_attention_bias,
133
+ encoder_decoder_attention_bias)
134
+
135
+
136
+ def transformer_encoder(encoder_input,
137
+ encoder_self_attention_bias,
138
+ hparams,
139
+ name="encoder",
140
+ nonpadding=None,
141
+ save_weights_to=None,
142
+ make_image_summary=False,
143
+ losses=None,
144
+ attn_bias_for_padding=None):
145
+ """A stack of transformer layers.
146
+
147
+ Args:
148
+ encoder_input: a Tensor
149
+ encoder_self_attention_bias: bias Tensor for self-attention
150
+ (see common_attention.attention_bias())
151
+ hparams: hyperparameters for model
152
+ name: a string
153
+ nonpadding: optional Tensor with shape [batch_size, encoder_length]
154
+ indicating what positions are not padding. This must either be
155
+ passed in, which we do for "packed" datasets, or inferred from
156
+ encoder_self_attention_bias. The knowledge about padding is used
157
+ for pad_remover(efficiency) and to mask out padding in convolutional
158
+ layers.
159
+ save_weights_to: an optional dictionary to capture attention weights
160
+ for visualization; the weights tensor will be appended there under
161
+ a string key created from the variable scope (including name).
162
+ make_image_summary: Whether to make an attention image summary.
163
+ losses: optional list onto which to append extra training losses
164
+ attn_bias_for_padding: Padded attention bias in case a unidirectional
165
+ encoder is being used where future attention is masked.
166
+
167
+ Returns:
168
+ y: a Tensors
169
+ """
170
+ x = encoder_input
171
+ attention_dropout_broadcast_dims = (
172
+ common_layers.comma_separated_string_to_integer_list(
173
+ getattr(hparams, "attention_dropout_broadcast_dims", "")))
174
+
175
+ with tf.variable_scope(name):
176
+ if nonpadding is not None:
177
+ padding = 1.0 - nonpadding
178
+ else:
179
+ attention_bias = encoder_self_attention_bias
180
+ if attn_bias_for_padding is not None:
181
+ attention_bias = attn_bias_for_padding
182
+ padding = common_attention.attention_bias_to_padding(attention_bias)
183
+ nonpadding = 1.0 - padding
184
+ pad_remover = None
185
+ if hparams.use_pad_remover and not common_layers.is_xla_compiled():
186
+ pad_remover = expert_utils.PadRemover(padding)
187
+ for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
188
+ with tf.variable_scope("layer_%d" % layer):
189
+ with tf.variable_scope("self_attention"):
190
+ if layer < hparams.get("num_area_layers", 0):
191
+ max_area_width = hparams.get("max_area_width", 1)
192
+ max_area_height = hparams.get("max_area_height", 1)
193
+ memory_height = hparams.get("memory_height", 1)
194
+ else:
195
+ max_area_width = 1
196
+ max_area_height = 1
197
+ memory_height = 1
198
+ y = common_attention.multihead_attention(
199
+ common_layers.layer_preprocess(x, hparams),
200
+ None,
201
+ encoder_self_attention_bias,
202
+ hparams.attention_key_channels or hparams.hidden_size,
203
+ hparams.attention_value_channels or hparams.hidden_size,
204
+ hparams.hidden_size,
205
+ hparams.num_heads,
206
+ hparams.attention_dropout,
207
+ attention_type=hparams.self_attention_type,
208
+ max_relative_position=hparams.max_relative_position,
209
+ heads_share_relative_embedding=(
210
+ hparams.heads_share_relative_embedding),
211
+ add_relative_to_values=hparams.add_relative_to_values,
212
+ save_weights_to=save_weights_to,
213
+ make_image_summary=make_image_summary,
214
+ dropout_broadcast_dims=attention_dropout_broadcast_dims,
215
+ max_length=hparams.get("max_length"),
216
+ vars_3d=hparams.get("attention_variables_3d"),
217
+ activation_dtype=hparams.get("activation_dtype", "float32"),
218
+ weight_dtype=hparams.get("weight_dtype", "float32"),
219
+ hard_attention_k=hparams.get("hard_attention_k", 0),
220
+ gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0),
221
+ max_area_width=max_area_width,
222
+ max_area_height=max_area_height,
223
+ memory_height=memory_height,
224
+ area_key_mode=hparams.get("area_key_mode", "none"),
225
+ area_value_mode=hparams.get("area_value_mode", "none"),
226
+ training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN)
227
+ == tf.estimator.ModeKeys.TRAIN))
228
+ x = common_layers.layer_postprocess(x, y, hparams)
229
+ with tf.variable_scope("ffn"):
230
+ y = transformer_ffn_layer(
231
+ common_layers.layer_preprocess(x, hparams),
232
+ hparams,
233
+ pad_remover,
234
+ conv_padding="SAME",
235
+ nonpadding_mask=nonpadding,
236
+ losses=losses)
237
+ x = common_layers.layer_postprocess(x, y, hparams)
238
+ # if normalization is done in layer_preprocess, then it should also be done
239
+ # on the output, since the output can grow very large, being the sum of
240
+ # a whole stack of unnormalized layer outputs.
241
+ return common_layers.layer_preprocess(x, hparams)
242
+
243
+
244
+ def transformer_ffn_layer(x,
245
+ hparams,
246
+ pad_remover=None,
247
+ conv_padding="LEFT",
248
+ nonpadding_mask=None,
249
+ losses=None,
250
+ cache=None,
251
+ decode_loop_step=None,
252
+ readout_filter_size=0,
253
+ layer_collection=None):
254
+ """Feed-forward layer in the transformer.
255
+
256
+ Args:
257
+ x: a Tensor of shape [batch_size, length, hparams.hidden_size]
258
+ hparams: hyperparameters for model
259
+ pad_remover: an expert_utils.PadRemover object tracking the padding
260
+ positions. If provided, when using convolutional settings, the padding
261
+ is removed before applying the convolution, and restored afterward. This
262
+ can give a significant speedup.
263
+ conv_padding: a string - either "LEFT" or "SAME".
264
+ nonpadding_mask: an optional Tensor with shape [batch_size, length].
265
+ needed for convolutional layers with "SAME" padding.
266
+ Contains 1.0 in positions corresponding to nonpadding.
267
+ losses: optional list onto which to append extra training losses
268
+ cache: dict, containing tensors which are the results of previous
269
+ attentions, used for fast decoding.
270
+ decode_loop_step: An integer, step number of the decoding loop.
271
+ Only used for inference on TPU.
272
+ readout_filter_size: if it's greater than 0, then it will be used instead of
273
+ filter_size
274
+ layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
275
+ KFAC optimizer. Default is None.
276
+
277
+
278
+ Returns:
279
+ a Tensor of shape [batch_size, length, hparams.hidden_size]
280
+
281
+ Raises:
282
+ ValueError: If losses arg is None, but layer generates extra losses.
283
+ """
284
+ ffn_layer = hparams.ffn_layer
285
+ relu_dropout_broadcast_dims = (
286
+ common_layers.comma_separated_string_to_integer_list(
287
+ getattr(hparams, "relu_dropout_broadcast_dims", "")))
288
+ if ffn_layer == "conv_hidden_relu":
289
+ # Backwards compatibility
290
+ ffn_layer = "dense_relu_dense"
291
+ if ffn_layer == "dense_relu_dense":
292
+ if pad_remover:
293
+ original_shape = common_layers.shape_list(x)
294
+ # Collapse `x` across examples, and remove padding positions.
295
+ x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0))
296
+ x = tf.expand_dims(pad_remover.remove(x), axis=0)
297
+ conv_output = common_layers.dense_relu_dense(
298
+ x,
299
+ hparams.filter_size,
300
+ hparams.hidden_size,
301
+ dropout=hparams.relu_dropout,
302
+ dropout_broadcast_dims=relu_dropout_broadcast_dims,
303
+ layer_collection=layer_collection)
304
+ if pad_remover:
305
+ # Restore `conv_output` to the original shape of `x`, including padding.
306
+ conv_output = tf.reshape(
307
+ pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape)
308
+ return conv_output
309
+ elif ffn_layer == "conv_relu_conv":
310
+ return common_layers.conv_relu_conv(
311
+ x,
312
+ readout_filter_size or hparams.filter_size,
313
+ hparams.hidden_size,
314
+ first_kernel_size=hparams.conv_first_kernel,
315
+ second_kernel_size=1,
316
+ padding=conv_padding,
317
+ nonpadding_mask=nonpadding_mask,
318
+ dropout=hparams.relu_dropout,
319
+ cache=cache,
320
+ decode_loop_step=decode_loop_step)
321
+ elif ffn_layer == "parameter_attention":
322
+ return common_attention.parameter_attention(
323
+ x, hparams.parameter_attention_key_channels or hparams.hidden_size,
324
+ hparams.parameter_attention_value_channels or hparams.hidden_size,
325
+ hparams.hidden_size, readout_filter_size or hparams.filter_size,
326
+ hparams.num_heads,
327
+ hparams.attention_dropout)
328
+ elif ffn_layer == "conv_hidden_relu_with_sepconv":
329
+ return common_layers.conv_hidden_relu(
330
+ x,
331
+ readout_filter_size or hparams.filter_size,
332
+ hparams.hidden_size,
333
+ kernel_size=(3, 1),
334
+ second_kernel_size=(31, 1),
335
+ padding="LEFT",
336
+ dropout=hparams.relu_dropout)
337
+ elif ffn_layer == "sru":
338
+ return common_layers.sru(x)
339
+ elif ffn_layer == "local_moe_tpu":
340
+ overhead = hparams.moe_overhead_eval
341
+ if hparams.mode == tf.estimator.ModeKeys.TRAIN:
342
+ overhead = hparams.moe_overhead_train
343
+ ret, loss = expert_utils.local_moe_tpu(
344
+ x,
345
+ hparams.filter_size // 2,
346
+ hparams.hidden_size,
347
+ hparams.moe_num_experts,
348
+ overhead=overhead,
349
+ loss_coef=hparams.moe_loss_coef)
350
+ elif ffn_layer == "local_moe":
351
+ overhead = hparams.moe_overhead_eval
352
+ if hparams.mode == tf.estimator.ModeKeys.TRAIN:
353
+ overhead = hparams.moe_overhead_train
354
+ ret, loss = expert_utils.local_moe(
355
+ x,
356
+ True,
357
+ expert_utils.ffn_expert_fn(hparams.hidden_size, [hparams.filter_size],
358
+ hparams.hidden_size),
359
+ hparams.moe_num_experts,
360
+ k=hparams.moe_k,
361
+ hparams=hparams)
362
+ losses.append(loss)
363
+ return ret
364
+ else:
365
+ assert ffn_layer == "none"
366
+ return x
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/layers/transformer_memory.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """The memory unit for Transformer."""
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ from TensorFlow.nlp.transformer.layers import common_layers
22
+ import tensorflow.compat.v1 as tf
23
+
24
+
25
+ class RecurrentMemory(object):
26
+ """Base class for recurrent memory.
27
+
28
+ This class defines the memory interface, but behaves like a no-op.
29
+ """
30
+
31
+ def pre_attention(self, segment, query_antecedent, memory_antecedent, bias):
32
+ """Called prior to self-attention, to incorporate memory items.
33
+
34
+ Args:
35
+ segment: an integer Tensor with shape [batch]
36
+ query_antecedent: a Tensor with shape [batch, length_q, channels]
37
+ memory_antecedent: must be None. Attention normally allows this to be a
38
+ Tensor with shape [batch, length_m, channels], but we currently only
39
+ support memory for decoder-side self-attention.
40
+ bias: bias Tensor (see attention_bias())
41
+ Returns:
42
+ (data, new_query_antecedent, new_memory_antecedent, new_bias)
43
+ """
44
+ del segment
45
+ return None, query_antecedent, memory_antecedent, bias
46
+
47
+ def post_attention(self, token, x):
48
+ """Called after self-attention. The memory can be updated here.
49
+
50
+ Args:
51
+ token: Data returned by pre_attention, which can be used to carry over
52
+ state related to the current memory operation.
53
+ x: a Tensor of data after self-attention and feed-forward
54
+ Returns:
55
+ a (possibly modified) version of the input x
56
+ """
57
+ assert token is None
58
+ return x
59
+
60
+
61
+ class RecentTokensMemory(RecurrentMemory):
62
+ """A memory module that caches features for recent tokens.
63
+
64
+ When the number of tokens cached is equal to the chunk size, this is
65
+ equivalent to the memory used by Transformer-XL
66
+ (https://arxiv.org/abs/1901.02860)
67
+ """
68
+
69
+ def __init__(self, name, hparams):
70
+ hidden_size = hparams.hidden_size
71
+ self.chunk_length = hparams.split_targets_chunk_length
72
+ assert self.chunk_length > 0, "Chunking is required to use recurrent memory"
73
+
74
+ if hasattr(hparams, "num_memory_items") and hparams.num_memory_items > 0:
75
+ self.tokens_to_cache = hparams.num_memory_items
76
+ else:
77
+ self.tokens_to_cache = self.chunk_length
78
+
79
+ # TODO(kitaev): The implementation of the chunking code makes it somewhat
80
+ # convoluted to figure out how many actual sequences we can have per batch.
81
+ # The data pipeline should be revisited at some point.
82
+ if (hasattr(hparams, "recurrent_memory_batch_size")
83
+ and hparams.recurrent_memory_batch_size > 0):
84
+ batch_size_in_sequences = hparams.recurrent_memory_batch_size
85
+ else:
86
+ batch_size_in_sequences = hparams.batch_size / hparams.max_length
87
+
88
+ memory_shape = [batch_size_in_sequences, self.tokens_to_cache, hidden_size]
89
+ bias_shape = [batch_size_in_sequences, 1, 1, self.tokens_to_cache]
90
+
91
+ with tf.variable_scope(name):
92
+ self.previous_segment = tf.get_variable(
93
+ "memsegment", (batch_size_in_sequences,),
94
+ dtype=tf.int32, trainable=False,
95
+ collections=[tf.GraphKeys.LOCAL_VARIABLES],
96
+ initializer=tf.constant_initializer(0))
97
+
98
+ self.previous_vals = tf.get_variable(
99
+ "memvals", memory_shape,
100
+ dtype=tf.float32, trainable=False,
101
+ collections=[tf.GraphKeys.LOCAL_VARIABLES],
102
+ initializer=tf.constant_initializer(.0))
103
+
104
+ self.previous_bias = tf.get_variable(
105
+ "membias", bias_shape,
106
+ dtype=tf.float32, trainable=False,
107
+ collections=[tf.GraphKeys.LOCAL_VARIABLES],
108
+ initializer=tf.constant_initializer(-1e9))
109
+
110
+ def pre_attention(self, segment, query_antecedent, memory_antecedent, bias):
111
+ """Called prior to self-attention, to incorporate memory items.
112
+
113
+ Args:
114
+ segment: an integer Tensor with shape [batch]
115
+ query_antecedent: a Tensor with shape [batch, length_q, channels]
116
+ memory_antecedent: must be None. Attention normally allows this to be a
117
+ Tensor with shape [batch, length_m, channels], but we currently only
118
+ support memory for decoder-side self-attention.
119
+ bias: bias Tensor (see attention_bias())
120
+ Returns:
121
+ (data, new_query_antecedent, new_memory_antecedent, new_bias)
122
+ """
123
+ assert memory_antecedent is None, "We only support language modeling"
124
+
125
+ # In eval mode, batch size may be variable
126
+ memory_batch_size = tf.shape(self.previous_vals)[0]
127
+ current_batch_size = tf.shape(query_antecedent)[0]
128
+ amount_to_pad = memory_batch_size - current_batch_size
129
+
130
+ # If segment id is zero, don't attend back to the memory
131
+ previous_bias = self.previous_bias[:current_batch_size, :, :, :] + tf.cast(
132
+ tf.equal(segment[:, None, None, None], 0), tf.float32) * -1e9
133
+
134
+ sliced_previous_vals = self.previous_vals[:current_batch_size, :, :]
135
+
136
+ new_memory_antecedent = tf.concat(
137
+ [tf.stop_gradient(sliced_previous_vals), query_antecedent], 1)
138
+ new_bias = tf.concat([
139
+ tf.tile(tf.stop_gradient(previous_bias), [1, 1, self.chunk_length, 1]),
140
+ tf.tile(bias, [current_batch_size, 1, 1, 1]),
141
+ ], -1)
142
+
143
+ remember_segment = tf.pad(segment, [[0, amount_to_pad]])
144
+ # TODO(kitaev): The code assumes that we always either increment the chunk
145
+ # number or reset it to zero. This assumption will not hold if we re-run the
146
+ # model for each token, e.g. for autoregressive greedy/beam/sampling decode.
147
+ remember_vals = tf.pad(query_antecedent,
148
+ [[0, amount_to_pad], [0, 0], [0, 0]])
149
+ # Query position is on axis -2 for bias: as long as a token can be attended
150
+ # to from at least one query position (i.e. it's not padding), memorize it.
151
+ remember_bias = tf.tile(
152
+ tf.reduce_max(bias, -2, keepdims=True), [memory_batch_size, 1, 1, 1])
153
+ # Assume that query_antecedent is always a full chunk (i.e. not truncated)
154
+ if self.chunk_length < self.tokens_to_cache:
155
+ remember_vals = tf.concat([self.previous_vals, remember_vals], 1)
156
+ remember_bias = tf.concat([
157
+ self.previous_bias - 1e9 * tf.cast(
158
+ tf.equal(
159
+ tf.pad(segment, [[0, amount_to_pad]])[:, None, None, None],
160
+ 0), tf.float32),
161
+ remember_bias
162
+ ], -1)
163
+ if self.chunk_length != self.tokens_to_cache:
164
+ remember_vals = remember_vals[:, -self.tokens_to_cache:, :]
165
+ remember_bias = remember_bias[:, :, :, -self.tokens_to_cache:]
166
+ token = (remember_segment, remember_vals, remember_bias)
167
+
168
+ return token, query_antecedent, new_memory_antecedent, new_bias
169
+
170
+ def post_attention(self, token, x):
171
+ """Called after self-attention. The memory can be updated here.
172
+
173
+ Args:
174
+ token: Data returned by pre_attention, which can be used to carry over
175
+ state related to the current memory operation.
176
+ x: a Tensor of data after self-attention and feed-forward
177
+ Returns:
178
+ a (possibly modified) version of the input x
179
+ """
180
+ with tf.control_dependencies([
181
+ self.previous_segment.assign(token[0]),
182
+ self.previous_vals.assign(token[1]),
183
+ self.previous_bias.assign(token[2]),
184
+ ]):
185
+ return tf.identity(x)
186
+
187
+
188
+ class TransformerMemory(object):
189
+ """Implements the Memory module.
190
+
191
+ Based on Neural Turing Machines: arXiv:1410.5401 [cs.NE]
192
+ """
193
+
194
+ def __init__(self, batch_size, key_depth, val_depth, memory_size,
195
+ sharpen_factor=1., name="neural_memory"):
196
+ """Initialize the memory object.
197
+
198
+ Args:
199
+ batch_size: the batch size.
200
+ key_depth: the depth of the memory keys.
201
+ val_depth: the depth of the memory values.
202
+ memory_size: the number of items in the memory.
203
+ sharpen_factor: the sharpen_factor for addressing the memory.
204
+ name: the optional variable scope.
205
+ """
206
+ self.name = name
207
+ self.batch_size = batch_size
208
+ self.key_depth = key_depth
209
+ self.val_depth = val_depth
210
+ self.memory_size = memory_size
211
+ self.sharpen_factor = sharpen_factor
212
+ with tf.variable_scope(name):
213
+ self.segment_number = tf.get_variable(
214
+ "segment_number", [self.batch_size],
215
+ dtype=tf.int32, trainable=False,
216
+ initializer=tf.constant_initializer(100000))
217
+ self.mem_vals = tf.get_variable(
218
+ "memvals", [self.batch_size, self.memory_size, self.val_depth],
219
+ dtype=tf.float32, trainable=False,
220
+ initializer=tf.constant_initializer(.0))
221
+ self.mean_logits = tf.get_variable(
222
+ "meanlogits", [self.batch_size, self.memory_size],
223
+ dtype=tf.float32, trainable=False,
224
+ initializer=tf.constant_initializer(.0))
225
+
226
+ def _norm(self, x):
227
+ """Compute the safe norm."""
228
+ return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7)
229
+
230
+ def _address_content(self, x):
231
+ """Address the memory based on content similarity.
232
+
233
+ Args:
234
+ x: a tensor in the shape of [batch_size, length, depth].
235
+ Returns:
236
+ the logits for each memory entry [batch_size, length, memory_size].
237
+ """
238
+ mem_keys = tf.layers.dense(self.mem_vals, self.key_depth,
239
+ bias_initializer=tf.constant_initializer(1.0),
240
+ name="mem_key")
241
+ mem_query = tf.layers.dense(x, self.key_depth,
242
+ bias_initializer=tf.constant_initializer(1.0),
243
+ name="mem_query")
244
+ norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys),
245
+ transpose_b=True)
246
+ dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True)
247
+ cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist")
248
+ access_logits = self.sharpen_factor * cos_dist
249
+ return access_logits
250
+
251
+ def read(self, x):
252
+ """Read from the memory.
253
+
254
+ An external component can use the results via a simple MLP,
255
+ e.g., fn(x W_x + retrieved_mem W_m).
256
+
257
+ Args:
258
+ x: a tensor in the shape of [batch_size, length, depth].
259
+ Returns:
260
+ access_logits: the logits for accessing the memory in shape of
261
+ [batch_size, length, memory_size].
262
+ retrieved_mem: the retrieved results in the shape of
263
+ [batch_size, length, val_depth].
264
+ """
265
+ access_logits = self._address_content(x)
266
+ weights = tf.nn.softmax(access_logits)
267
+ retrieved_mem = tf.reduce_sum(
268
+ tf.multiply(tf.expand_dims(weights, 3),
269
+ tf.expand_dims(self.mem_vals, axis=1)), axis=2)
270
+ return access_logits, retrieved_mem
271
+
272
+ def write(self, x, access_logits):
273
+ """Write to the memory based on a combination of similarity and least used.
274
+
275
+ Based on arXiv:1607.00036v2 [cs.LG].
276
+
277
+ Args:
278
+ x: a tensor in the shape of [batch_size, length, depth].
279
+ access_logits: the logits for accessing the memory.
280
+ Returns:
281
+ the update op.
282
+ """
283
+ gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name="gamma")
284
+ write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1)
285
+ candidate_value = tf.layers.dense(x, self.val_depth,
286
+ activation=tf.nn.relu,
287
+ name="candidate_value")
288
+ erase_gates = tf.layers.dense(x, self.memory_size,
289
+ activation=tf.nn.sigmoid,
290
+ name="erase")
291
+ write_weights = tf.nn.softmax(write_logits)
292
+ erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3)
293
+ erase = tf.multiply(erase_weights,
294
+ tf.expand_dims(self.mem_vals, 1))
295
+ addition = tf.multiply(
296
+ tf.expand_dims(write_weights, 3),
297
+ tf.expand_dims(candidate_value, 2))
298
+ update_value_op = self.mem_vals.assign(
299
+ tf.reduce_mean(erase + addition, axis=1))
300
+ with tf.control_dependencies([update_value_op]):
301
+ write_op = self.mean_logits.assign(
302
+ self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1))
303
+ return write_op
304
+
305
+ def set(self, mem_vals, mean_logits):
306
+ set_op = tf.group([
307
+ self.mem_vals.assign(mem_vals),
308
+ self.mean_logits.assign(mean_logits)])
309
+ return set_op
310
+
311
+ def get(self):
312
+ return self.mem_vals, self.mean_logits
313
+
314
+ def update_segment_number(self, segment_number):
315
+ return self.segment_number.assign(segment_number)
316
+
317
+ def reset(self, entries_to_reset):
318
+ """Reset the entries in the memory.
319
+
320
+ Args:
321
+ entries_to_reset: a 1D tensor.
322
+ Returns:
323
+ the reset op.
324
+ """
325
+ num_updates = tf.size(entries_to_reset)
326
+ update_vals = tf.scatter_update(
327
+ self.mem_vals, entries_to_reset,
328
+ tf.tile(tf.expand_dims(
329
+ tf.fill([self.memory_size, self.val_depth], .0), 0),
330
+ [num_updates, 1, 1]))
331
+ update_logits = tf.scatter_update(
332
+ self.mean_logits, entries_to_reset,
333
+ tf.tile(tf.expand_dims(
334
+ tf.fill([self.memory_size], .0), 0),
335
+ [num_updates, 1]))
336
+ reset_op = tf.group([update_vals, update_logits])
337
+ return reset_op
338
+
339
+ def pre_attention(self, segment_number, query_antecedent,
340
+ memory_antecedent, bias):
341
+ """Called prior to self-attention, to incorporate memory items.
342
+
343
+ Args:
344
+ segment_number: an integer Tensor with shape [batch]
345
+ query_antecedent: a Tensor with shape [batch, length_q, channels]
346
+ memory_antecedent: must be None. Attention normally allows this to be a
347
+ Tensor with shape [batch, length_m, channels], but we currently only
348
+ support memory for decoder-side self-attention.
349
+ bias: bias Tensor (see attention_bias())
350
+ Returns:
351
+ (data, new_query_antecedent, new_memory_antecedent, new_bias)
352
+ """
353
+ with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE):
354
+ assert memory_antecedent is None, "We only support language modeling"
355
+ with tf.control_dependencies([
356
+ tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]):
357
+ difference = self.batch_size - tf.size(segment_number)
358
+ segment_number = tf.pad(segment_number, [[0, difference]])
359
+ reset_op = self.reset(tf.reshape(tf.where(
360
+ tf.less(segment_number, self.segment_number)), [-1]))
361
+ memory_results = {}
362
+ with tf.control_dependencies([reset_op]):
363
+ with tf.control_dependencies([
364
+ self.update_segment_number(segment_number)]):
365
+ x = tf.pad(query_antecedent, [
366
+ [0, difference], [0, 0], [0, 0]])
367
+ access_logits, retrieved_mem = self.read(x)
368
+ memory_results["x"] = x
369
+ memory_results["access_logits"] = access_logits
370
+ memory_results["retrieved_mem"] = retrieved_mem
371
+ return memory_results, query_antecedent, memory_antecedent, bias
372
+
373
+ def post_attention(self, token, x):
374
+ """Called after self-attention. The memory can be updated here.
375
+
376
+ Args:
377
+ token: Data returned by pre_attention, which can be used to carry over
378
+ state related to the current memory operation.
379
+ x: a Tensor of data after self-attention and feed-forward
380
+ Returns:
381
+ a (possibly modified) version of the input x
382
+ """
383
+ with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE):
384
+ depth = common_layers.shape_list(x)[-1]
385
+ actual_batch_size = common_layers.shape_list(x)[0]
386
+ memory_output = tf.gather(token["retrieved_mem"],
387
+ tf.range(actual_batch_size))
388
+ output = tf.add(tf.layers.dense(x, depth, use_bias=False),
389
+ tf.layers.dense(memory_output, depth))
390
+ with tf.control_dependencies([output]):
391
+ with tf.control_dependencies([
392
+ self.write(token["x"], token["access_logits"])]):
393
+ return tf.identity(output)