applied-ai-018 commited on
Commit
9a09fb2
·
verified ·
1 Parent(s): 14a7d24

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/CODEOWNERS +1 -0
  2. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/LICENSE +265 -0
  3. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/MANIFEST.in +2 -0
  4. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/SECURITY.md +41 -0
  5. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/README.md +5 -0
  6. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_books.sh +2 -0
  7. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_ckpt.sh +8 -0
  8. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_vocab.sh +2 -0
  9. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/README.md +15 -0
  10. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/create_embeddings.sh +32 -0
  11. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/evaluate_ict_zeroshot_nq.sh +36 -0
  12. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_mnli_distributed.sh +44 -0
  13. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_race_distributed.sh +47 -0
  14. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_bert.sh +34 -0
  15. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed.sh +47 -0
  16. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed_with_mp.sh +48 -0
  17. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_bert.py +159 -0
  18. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_gpt.py +335 -0
  19. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_ict.py +167 -0
  20. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_t5.py +134 -0
  21. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_vit.py +91 -0
  22. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/requirements.txt +10 -0
  23. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/run_gpt.sh +433 -0
  24. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/setup.py +91 -0
  25. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/__init__.py +0 -0
  26. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/convert_paxml_optimizer.py +426 -0
  27. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_checkpoint.py +196 -0
  28. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_megatron.py +150 -0
  29. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_transformers.py +83 -0
  30. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/ds_to_universal.py +290 -0
  31. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merge.py +340 -0
  32. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merged_to_ds_universal_convert.py +321 -0
  33. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/verify_checkpoint_non_tp_consistency.py +290 -0
  34. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/create_doc_index.py +33 -0
  35. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/generate_samples_gpt.py +173 -0
  36. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/merge_mp_partitions.py +352 -0
  37. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/add_id.py +67 -0
  38. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/cleanup_dataset.py +115 -0
  39. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/filter_ngrams.py +492 -0
  40. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/merge_jsons.py +55 -0
  41. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/__init__.py +1 -0
  42. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/utils.py +4 -0
  43. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/batch_256.cfg +39 -0
  44. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/launch_keras_resnet_hvd.sh +5 -0
  45. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/list_affinity_topology_bare_metal.sh +149 -0
  46. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/debug.py +107 -0
  47. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/modeling/__init__.py +0 -0
  48. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/modeling/performance.py +56 -0
  49. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/tb_utils.py +357 -0
  50. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/__init__.py +14 -0
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/CODEOWNERS ADDED
@@ -0,0 +1 @@
 
 
1
+ * @jeffra @samyam @tjruwase @ShadenSmith @conglongli @awan-10 @cli99 @eltonzheng @minjiaz @RezaYazdaniAminabadi @duli2012 @mrwyattii @yaozhewei @arashb @xiaoxiawu-microsoft @guanhuawang
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/LICENSE ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The following applies to all files unless otherwise noted:
2
+ # Copyright (c) 2023 Habana Labs, Ltd. an Intel Company
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Redistribution and use in source and binary forms, with or without
6
+ # modification, are permitted provided that the following conditions
7
+ # are met:
8
+ # * Redistributions of source code must retain the above copyright
9
+ # notice, this list of conditions and the following disclaimer.
10
+ # * Redistributions in binary form must reproduce the above copyright
11
+ # notice, this list of conditions and the following disclaimer in the
12
+ # documentation and/or other materials provided with the distribution.
13
+ # * Neither the name of NVIDIA CORPORATION nor the names of its
14
+ # contributors may be used to endorse or promote products derived
15
+ # from this software without specific prior written permission.
16
+ #
17
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
18
+ # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20
+ # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21
+ # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22
+ # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23
+ # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24
+ # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
25
+ # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+ --
30
+
31
+ This repository also contains code from Hugging Face Inc., Google Research,
32
+ Facebook (from their Fairseq project), and Philip Popien. Files from these
33
+ organizations have notices at the top of each file. Below are licenses
34
+ used in those files, as indicated.
35
+
36
+
37
+ ------------- LICENSE FOR huggingface and Google Research code --------------
38
+
39
+
40
+ Apache License
41
+ Version 2.0, January 2004
42
+ http://www.apache.org/licenses/
43
+
44
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
45
+
46
+ 1. Definitions.
47
+
48
+ "License" shall mean the terms and conditions for use, reproduction,
49
+ and distribution as defined by Sections 1 through 9 of this document.
50
+
51
+ "Licensor" shall mean the copyright owner or entity authorized by
52
+ the copyright owner that is granting the License.
53
+
54
+ "Legal Entity" shall mean the union of the acting entity and all
55
+ other entities that control, are controlled by, or are under common
56
+ control with that entity. For the purposes of this definition,
57
+ "control" means (i) the power, direct or indirect, to cause the
58
+ direction or management of such entity, whether by contract or
59
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
60
+ outstanding shares, or (iii) beneficial ownership of such entity.
61
+
62
+ "You" (or "Your") shall mean an individual or Legal Entity
63
+ exercising permissions granted by this License.
64
+
65
+ "Source" form shall mean the preferred form for making modifications,
66
+ including but not limited to software source code, documentation
67
+ source, and configuration files.
68
+
69
+ "Object" form shall mean any form resulting from mechanical
70
+ transformation or translation of a Source form, including but
71
+ not limited to compiled object code, generated documentation,
72
+ and conversions to other media types.
73
+
74
+ "Work" shall mean the work of authorship, whether in Source or
75
+ Object form, made available under the License, as indicated by a
76
+ copyright notice that is included in or attached to the work
77
+ (an example is provided in the Appendix below).
78
+
79
+ "Derivative Works" shall mean any work, whether in Source or Object
80
+ form, that is based on (or derived from) the Work and for which the
81
+ editorial revisions, annotations, elaborations, or other modifications
82
+ represent, as a whole, an original work of authorship. For the purposes
83
+ of this License, Derivative Works shall not include works that remain
84
+ separable from, or merely link (or bind by name) to the interfaces of,
85
+ the Work and Derivative Works thereof.
86
+
87
+ "Contribution" shall mean any work of authorship, including
88
+ the original version of the Work and any modifications or additions
89
+ to that Work or Derivative Works thereof, that is intentionally
90
+ submitted to Licensor for inclusion in the Work by the copyright owner
91
+ or by an individual or Legal Entity authorized to submit on behalf of
92
+ the copyright owner. For the purposes of this definition, "submitted"
93
+ means any form of electronic, verbal, or written communication sent
94
+ to the Licensor or its representatives, including but not limited to
95
+ communication on electronic mailing lists, source code control systems,
96
+ and issue tracking systems that are managed by, or on behalf of, the
97
+ Licensor for the purpose of discussing and improving the Work, but
98
+ excluding communication that is conspicuously marked or otherwise
99
+ designated in writing by the copyright owner as "Not a Contribution."
100
+
101
+ "Contributor" shall mean Licensor and any individual or Legal Entity
102
+ on behalf of whom a Contribution has been received by Licensor and
103
+ subsequently incorporated within the Work.
104
+
105
+ 2. Grant of Copyright License. Subject to the terms and conditions of
106
+ this License, each Contributor hereby grants to You a perpetual,
107
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
108
+ copyright license to reproduce, prepare Derivative Works of,
109
+ publicly display, publicly perform, sublicense, and distribute the
110
+ Work and such Derivative Works in Source or Object form.
111
+
112
+ 3. Grant of Patent License. Subject to the terms and conditions of
113
+ this License, each Contributor hereby grants to You a perpetual,
114
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
115
+ (except as stated in this section) patent license to make, have made,
116
+ use, offer to sell, sell, import, and otherwise transfer the Work,
117
+ where such license applies only to those patent claims licensable
118
+ by such Contributor that are necessarily infringed by their
119
+ Contribution(s) alone or by combination of their Contribution(s)
120
+ with the Work to which such Contribution(s) was submitted. If You
121
+ institute patent litigation against any entity (including a
122
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
123
+ or a Contribution incorporated within the Work constitutes direct
124
+ or contributory patent infringement, then any patent licenses
125
+ granted to You under this License for that Work shall terminate
126
+ as of the date such litigation is filed.
127
+
128
+ 4. Redistribution. You may reproduce and distribute copies of the
129
+ Work or Derivative Works thereof in any medium, with or without
130
+ modifications, and in Source or Object form, provided that You
131
+ meet the following conditions:
132
+
133
+ (a) You must give any other recipients of the Work or
134
+ Derivative Works a copy of this License; and
135
+
136
+ (b) You must cause any modified files to carry prominent notices
137
+ stating that You changed the files; and
138
+
139
+ (c) You must retain, in the Source form of any Derivative Works
140
+ that You distribute, all copyright, patent, trademark, and
141
+ attribution notices from the Source form of the Work,
142
+ excluding those notices that do not pertain to any part of
143
+ the Derivative Works; and
144
+
145
+ (d) If the Work includes a "NOTICE" text file as part of its
146
+ distribution, then any Derivative Works that You distribute must
147
+ include a readable copy of the attribution notices contained
148
+ within such NOTICE file, excluding those notices that do not
149
+ pertain to any part of the Derivative Works, in at least one
150
+ of the following places: within a NOTICE text file distributed
151
+ as part of the Derivative Works; within the Source form or
152
+ documentation, if provided along with the Derivative Works; or,
153
+ within a display generated by the Derivative Works, if and
154
+ wherever such third-party notices normally appear. The contents
155
+ of the NOTICE file are for informational purposes only and
156
+ do not modify the License. You may add Your own attribution
157
+ notices within Derivative Works that You distribute, alongside
158
+ or as an addendum to the NOTICE text from the Work, provided
159
+ that such additional attribution notices cannot be construed
160
+ as modifying the License.
161
+
162
+ You may add Your own copyright statement to Your modifications and
163
+ may provide additional or different license terms and conditions
164
+ for use, reproduction, or distribution of Your modifications, or
165
+ for any such Derivative Works as a whole, provided Your use,
166
+ reproduction, and distribution of the Work otherwise complies with
167
+ the conditions stated in this License.
168
+
169
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
170
+ any Contribution intentionally submitted for inclusion in the Work
171
+ by You to the Licensor shall be under the terms and conditions of
172
+ this License, without any additional terms or conditions.
173
+ Notwithstanding the above, nothing herein shall supersede or modify
174
+ the terms of any separate license agreement you may have executed
175
+ with Licensor regarding such Contributions.
176
+
177
+ 6. Trademarks. This License does not grant permission to use the trade
178
+ names, trademarks, service marks, or product names of the Licensor,
179
+ except as required for reasonable and customary use in describing the
180
+ origin of the Work and reproducing the content of the NOTICE file.
181
+
182
+ 7. Disclaimer of Warranty. Unless required by applicable law or
183
+ agreed to in writing, Licensor provides the Work (and each
184
+ Contributor provides its Contributions) on an "AS IS" BASIS,
185
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
186
+ implied, including, without limitation, any warranties or conditions
187
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
188
+ PARTICULAR PURPOSE. You are solely responsible for determining the
189
+ appropriateness of using or redistributing the Work and assume any
190
+ risks associated with Your exercise of permissions under this License.
191
+
192
+ 8. Limitation of Liability. In no event and under no legal theory,
193
+ whether in tort (including negligence), contract, or otherwise,
194
+ unless required by applicable law (such as deliberate and grossly
195
+ negligent acts) or agreed to in writing, shall any Contributor be
196
+ liable to You for damages, including any direct, indirect, special,
197
+ incidental, or consequential damages of any character arising as a
198
+ result of this License or out of the use or inability to use the
199
+ Work (including but not limited to damages for loss of goodwill,
200
+ work stoppage, computer failure or malfunction, or any and all
201
+ other commercial damages or losses), even if such Contributor
202
+ has been advised of the possibility of such damages.
203
+
204
+ 9. Accepting Warranty or Additional Liability. While redistributing
205
+ the Work or Derivative Works thereof, You may choose to offer,
206
+ and charge a fee for, acceptance of support, warranty, indemnity,
207
+ or other liability obligations and/or rights consistent with this
208
+ License. However, in accepting such obligations, You may act only
209
+ on Your own behalf and on Your sole responsibility, not on behalf
210
+ of any other Contributor, and only if You agree to indemnify,
211
+ defend, and hold each Contributor harmless for any liability
212
+ incurred by, or claims asserted against, such Contributor by reason
213
+ of your accepting any such warranty or additional liability.
214
+
215
+ END OF TERMS AND CONDITIONS
216
+
217
+ APPENDIX: How to apply the Apache License to your work.
218
+
219
+ To apply the Apache License to your work, attach the following
220
+ boilerplate notice, with the fields enclosed by brackets "[]"
221
+ replaced with your own identifying information. (Don't include
222
+ the brackets!) The text should be enclosed in the appropriate
223
+ comment syntax for the file format. We also recommend that a
224
+ file or class name and description of purpose be included on the
225
+ same "printed page" as the copyright notice for easier
226
+ identification within third-party archives.
227
+
228
+ Copyright [yyyy] [name of copyright owner]
229
+
230
+ Licensed under the Apache License, Version 2.0 (the "License");
231
+ you may not use this file except in compliance with the License.
232
+ You may obtain a copy of the License at
233
+
234
+ http://www.apache.org/licenses/LICENSE-2.0
235
+
236
+ Unless required by applicable law or agreed to in writing, software
237
+ distributed under the License is distributed on an "AS IS" BASIS,
238
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
239
+ See the License for the specific language governing permissions and
240
+ limitations under the License.
241
+
242
+ ------------- LICENSE FOR Facebook Fairseq code --------------
243
+
244
+ MIT License
245
+
246
+ Copyright (c) Facebook, Inc. and its affiliates.
247
+
248
+ Permission is hereby granted, free of charge, to any person obtaining a copy
249
+ of this software and associated documentation files (the "Software"), to deal
250
+ in the Software without restriction, including without limitation the rights
251
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
252
+ copies of the Software, and to permit persons to whom the Software is
253
+ furnished to do so, subject to the following conditions:
254
+
255
+ The above copyright notice and this permission notice shall be included in all
256
+ copies or substantial portions of the Software.
257
+
258
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
259
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
260
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
261
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
262
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
263
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
264
+ SOFTWARE.
265
+
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/MANIFEST.in ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ include megatron/data/Makefile
2
+ include megatron/data/helpers.cpp
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/SECURITY.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
2
+
3
+ ## Security
4
+
5
+ Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
6
+
7
+ If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
8
+
9
+ ## Reporting Security Issues
10
+
11
+ **Please do not report security vulnerabilities through public GitHub issues.**
12
+
13
+ Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
14
+
15
+ If you prefer to submit without logging in, send email to [[email protected]](mailto:[email protected]). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
16
+
17
+ You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
18
+
19
+ Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
20
+
21
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
22
+ * Full paths of source file(s) related to the manifestation of the issue
23
+ * The location of the affected source code (tag/branch/commit or direct URL)
24
+ * Any special configuration required to reproduce the issue
25
+ * Step-by-step instructions to reproduce the issue
26
+ * Proof-of-concept or exploit code (if possible)
27
+ * Impact of the issue, including how an attacker might exploit the issue
28
+
29
+ This information will help us triage your report more quickly.
30
+
31
+ If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
32
+
33
+ ## Preferred Languages
34
+
35
+ We prefer all communications to be in English.
36
+
37
+ ## Policy
38
+
39
+ Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
40
+
41
+ <!-- END MICROSOFT SECURITY.MD BLOCK -->
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Run the scripts below to setup dataset
2
+
3
+ bash download_books.sh
4
+
5
+ bash download_vocab.sh
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_books.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ wget https://the-eye.eu/public/AI/pile_neox/data/BookCorpusDataset_text_document.bin
2
+ wget https://the-eye.eu/public/AI/pile_neox/data/BookCorpusDataset_text_document.idx
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_ckpt.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ mkdir -p checkpoints/gpt2_345m
2
+
3
+ cd checkpoints/gpt2_345m
4
+ wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip
5
+ unzip megatron_lm_345m_v0.0.zip
6
+ rm megatron_lm_345m_v0.0.zip
7
+ cd ../..
8
+
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_vocab.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json
2
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Recipes and Scripts
2
+
3
+ ### Azure
4
+
5
+ We strongly recommend to start with AzureML recipe in the ```azureml``` folder.
6
+
7
+ If you have a custom infrastructure (e.g. HPC clusters) or Azure VM and VMSS based environments, please refer to the bash scripts in the ```azure``` folder.
8
+
9
+ ### MoE
10
+
11
+ Please see the ```MoE``` folder for different training recipes and scripts for Mixture-of-expert based models.
12
+
13
+ ### Curriculum Learning
14
+
15
+ Curriculum learning recipes are in the ```curriculum_learning``` folder. Please refer to the detailed tutorials linked inside.
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/create_embeddings.sh ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Compute embeddings for each entry of a given dataset (e.g. Wikipedia)
4
+
5
+ RANK=0
6
+ WORLD_SIZE=1
7
+
8
+ # Wikipedia data can be downloaded from the following link:
9
+ # https://github.com/facebookresearch/DPR/blob/master/data/download_data.py
10
+ EVIDENCE_DATA_DIR=<Specify path of Wikipedia dataset>
11
+ EMBEDDING_PATH=<Specify path to store embeddings>
12
+ CHECKPOINT_PATH=<Specify path of pretrained ICT model>
13
+
14
+ python tools/create_doc_index.py \
15
+ --num-layers 12 \
16
+ --hidden-size 768 \
17
+ --num-attention-heads 12 \
18
+ --tensor-model-parallel-size 1 \
19
+ --micro-batch-size 128 \
20
+ --checkpoint-activations \
21
+ --seq-length 512 \
22
+ --retriever-seq-length 256 \
23
+ --max-position-embeddings 512 \
24
+ --load ${CHECKPOINT_PATH} \
25
+ --evidence-data-path ${EVIDENCE_DATA_DIR} \
26
+ --embedding-path ${EMBEDDING_PATH} \
27
+ --indexer-log-interval 1000 \
28
+ --indexer-batch-size 128 \
29
+ --vocab-file bert-vocab.txt \
30
+ --num-workers 2 \
31
+ --fp16
32
+
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/evaluate_ict_zeroshot_nq.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Evaluate natural question test data given Wikipedia embeddings and pretrained
4
+ # ICT model
5
+
6
+ # Datasets can be downloaded from the following link:
7
+ # https://github.com/facebookresearch/DPR/blob/master/data/download_data.py
8
+
9
+ EVIDENCE_DATA_DIR=<Specify path of Wikipedia dataset>
10
+ EMBEDDING_PATH=<Specify path of the embeddings>
11
+ CHECKPOINT_PATH=<Specify path of pretrained ICT model>
12
+
13
+ QA_FILE=<Path of the natural question test dataset>
14
+
15
+ python tasks/main.py \
16
+ --task ICT-ZEROSHOT-NQ \
17
+ --tokenizer-type BertWordPieceLowerCase \
18
+ --num-layers 12 \
19
+ --hidden-size 768 \
20
+ --num-attention-heads 12 \
21
+ --tensor-model-parallel-size 1 \
22
+ --micro-batch-size 128 \
23
+ --checkpoint-activations \
24
+ --seq-length 512 \
25
+ --max-position-embeddings 512 \
26
+ --load ${CHECKPOINT_PATH} \
27
+ --evidence-data-path ${EVIDENCE_DATA_DIR} \
28
+ --embedding-path ${EMBEDDING_PATH} \
29
+ --retriever-seq-length 256 \
30
+ --vocab-file bert-vocab.txt\
31
+ --qa-data-test ${QA_FILE} \
32
+ --num-workers 2 \
33
+ --faiss-use-gpu \
34
+ --retriever-report-topk-accuracies 1 5 20 100 \
35
+ --fp16
36
+
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_mnli_distributed.sh ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ WORLD_SIZE=8
4
+
5
+ DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \
6
+ --nnodes 1 \
7
+ --node_rank 0 \
8
+ --master_addr localhost \
9
+ --master_port 6000"
10
+
11
+ TRAIN_DATA="data/glue_data/MNLI/train.tsv"
12
+ VALID_DATA="data/glue_data/MNLI/dev_matched.tsv \
13
+ data/glue_data/MNLI/dev_mismatched.tsv"
14
+ PRETRAINED_CHECKPOINT=checkpoints/bert_345m
15
+ VOCAB_FILE=bert-vocab.txt
16
+ CHECKPOINT_PATH=checkpoints/bert_345m_mnli
17
+
18
+ python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \
19
+ --task MNLI \
20
+ --seed 1234 \
21
+ --train-data $TRAIN_DATA \
22
+ --valid-data $VALID_DATA \
23
+ --tokenizer-type BertWordPieceLowerCase \
24
+ --vocab-file $VOCAB_FILE \
25
+ --epochs 5 \
26
+ --pretrained-checkpoint $PRETRAINED_CHECKPOINT \
27
+ --tensor-model-parallel-size 1 \
28
+ --num-layers 24 \
29
+ --hidden-size 1024 \
30
+ --num-attention-heads 16 \
31
+ --micro-batch-size 8 \
32
+ --checkpoint-activations \
33
+ --lr 5.0e-5 \
34
+ --lr-decay-style linear \
35
+ --lr-warmup-fraction 0.065 \
36
+ --seq-length 512 \
37
+ --max-position-embeddings 512 \
38
+ --save-interval 500000 \
39
+ --save $CHECKPOINT_PATH \
40
+ --log-interval 10 \
41
+ --eval-interval 100 \
42
+ --eval-iters 50 \
43
+ --weight-decay 1.0e-1 \
44
+ --fp16
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_race_distributed.sh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ WORLD_SIZE=8
4
+
5
+ DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \
6
+ --nnodes 1 \
7
+ --node_rank 0 \
8
+ --master_addr localhost \
9
+ --master_port 6000"
10
+
11
+ TRAIN_DATA="data/RACE/train/middle"
12
+ VALID_DATA="data/RACE/dev/middle \
13
+ data/RACE/dev/high"
14
+ VOCAB_FILE=bert-vocab.txt
15
+ PRETRAINED_CHECKPOINT=checkpoints/bert_345m
16
+ CHECKPOINT_PATH=checkpoints/bert_345m_race
17
+
18
+ python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \
19
+ --task RACE \
20
+ --seed 1234 \
21
+ --train-data $TRAIN_DATA \
22
+ --valid-data $VALID_DATA \
23
+ --tokenizer-type BertWordPieceLowerCase \
24
+ --vocab-file $VOCAB_FILE \
25
+ --epochs 3 \
26
+ --pretrained-checkpoint $PRETRAINED_CHECKPOINT \
27
+ --tensor-model-parallel-size 1 \
28
+ --num-layers 24 \
29
+ --hidden-size 1024 \
30
+ --num-attention-heads 16 \
31
+ --micro-batch-size 4 \
32
+ --checkpoint-activations \
33
+ --lr 1.0e-5 \
34
+ --lr-decay-style linear \
35
+ --lr-warmup-fraction 0.06 \
36
+ --seq-length 512 \
37
+ --max-position-embeddings 512 \
38
+ --save-interval 100000 \
39
+ --save $CHECKPOINT_PATH \
40
+ --log-interval 10 \
41
+ --eval-interval 100 \
42
+ --eval-iters 50 \
43
+ --weight-decay 1.0e-1 \
44
+ --clip-grad 1.0 \
45
+ --hidden-dropout 0.1 \
46
+ --attention-dropout 0.1 \
47
+ --fp16
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_bert.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ RANK=0
4
+ WORLD_SIZE=1
5
+ DATA_PATH=<Specify path and file prefix>_text_sentence
6
+ CHECKPOINT_PATH=<Specify path>
7
+
8
+ python pretrain_bert.py \
9
+ --num-layers 24 \
10
+ --hidden-size 1024 \
11
+ --num-attention-heads 16 \
12
+ --micro-batch-size 4 \
13
+ --global-batch-size 8 \
14
+ --seq-length 512 \
15
+ --max-position-embeddings 512 \
16
+ --train-iters 2000000 \
17
+ --lr-decay-iters 990000 \
18
+ --save $CHECKPOINT_PATH \
19
+ --load $CHECKPOINT_PATH \
20
+ --data-path $DATA_PATH \
21
+ --vocab-file bert-vocab.txt \
22
+ --data-impl mmap \
23
+ --split 949,50,1 \
24
+ --lr 0.0001 \
25
+ --min-lr 0.00001 \
26
+ --lr-decay-style linear \
27
+ --lr-warmup-fraction .01 \
28
+ --weight-decay 1e-2 \
29
+ --clip-grad 1.0 \
30
+ --log-interval 100 \
31
+ --save-interval 10000 \
32
+ --eval-interval 1000 \
33
+ --eval-iters 10 \
34
+ --fp16
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed.sh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ GPUS_PER_NODE=8
4
+ # Change for multinode config
5
+ MASTER_ADDR=localhost
6
+ MASTER_PORT=6000
7
+ NNODES=1
8
+ NODE_RANK=0
9
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
10
+
11
+ DATA_PATH=<Specify path and file prefix>
12
+ VOCAB_FILE=<Specify path to vocab.txt>
13
+ CHECKPOINT_PATH=<Specify path>
14
+
15
+ DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
16
+
17
+ python -m torch.distributed.launch $DISTRIBUTED_ARGS \
18
+ pretrain_t5.py \
19
+ --num-layers 12 \
20
+ --hidden-size 768 \
21
+ --num-attention-heads 12 \
22
+ --kv-channels 64 \
23
+ --ffn-hidden-size 3072 \
24
+ --encoder-seq-length 512 \
25
+ --decoder-seq-length 128 \
26
+ --micro-batch-size 16 \
27
+ --global-batch-size 2048 \
28
+ --max-position-embeddings 512 \
29
+ --train-iters 1000000 \
30
+ --lr-decay-iters 1000000 \
31
+ --save $CHECKPOINT_PATH \
32
+ --load $CHECKPOINT_PATH \
33
+ --data-path $DATA_PATH \
34
+ --vocab-file $VOCAB_FILE \
35
+ --data-impl mmap \
36
+ --split 949,50,1 \
37
+ --lr 0.0001 \
38
+ --min-lr 0.00001 \
39
+ --lr-decay-style linear \
40
+ --lr-warmup-fraction .01 \
41
+ --weight-decay 1e-2 \
42
+ --clip-grad 1.0 \
43
+ --log-interval 100 \
44
+ --save-interval 10000 \
45
+ --eval-interval 1000 \
46
+ --eval-iters 10 \
47
+ --fp16
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed_with_mp.sh ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ GPUS_PER_NODE=8
4
+ # Change for multinode config
5
+ MASTER_ADDR=localhost
6
+ MASTER_PORT=6000
7
+ NNODES=1
8
+ NODE_RANK=0
9
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
10
+
11
+ DATA_PATH=<Specify path and file prefix>
12
+ CHECKPOINT_PATH=<Specify path>
13
+
14
+ DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
15
+
16
+ python -m torch.distributed.launch $DISTRIBUTED_ARGS \
17
+ pretrain_t5.py \
18
+ --tensor-model-parallel-size 2 \
19
+ --num-layers 12 \
20
+ --hidden-size 768 \
21
+ --num-attention-heads 12 \
22
+ --kv-channels 64 \
23
+ --ffn-hidden-size 3072 \
24
+ --encoder-seq-length 512 \
25
+ --decoder-seq-length 128 \
26
+ --micro-batch-size 16 \
27
+ --global-batch-size 2048 \
28
+ --seq-length 512 \
29
+ --max-position-embeddings 512 \
30
+ --train-iters 1000000 \
31
+ --lr-decay-iters 1000000 \
32
+ --save $CHECKPOINT_PATH \
33
+ --load $CHECKPOINT_PATH \
34
+ --data-path $DATA_PATH \
35
+ --vocab-file t5-vocab.txt \
36
+ --data-impl mmap \
37
+ --split 949,50,1 \
38
+ --lr 0.0001 \
39
+ --min-lr 0.00001 \
40
+ --lr-decay-style linear \
41
+ --lr-warmup-fraction .01 \
42
+ --weight-decay 1e-2 \
43
+ --clip-grad 1.0 \
44
+ --log-interval 100 \
45
+ --save-interval 10000 \
46
+ --eval-interval 1000 \
47
+ --eval-iters 10 \
48
+ --fp16
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_bert.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2023 Habana Labs, Ltd. an Intel Company.
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Pretrain BERT"""
18
+
19
+ from functools import partial
20
+
21
+ import torch
22
+ import torch.nn.functional as F
23
+
24
+ from megatron import get_args
25
+ from megatron import print_rank_0
26
+ from megatron import get_timers
27
+ from megatron import mpu
28
+ from megatron.data.dataset_utils import build_train_valid_test_datasets
29
+ from megatron.model import BertModel
30
+ from megatron.training import pretrain
31
+ from megatron.utils import average_losses_across_data_parallel_group
32
+
33
+
34
+ def model_provider(pre_process=True, post_process=True):
35
+ """Build the model."""
36
+
37
+ print_rank_0('building BERT model ...')
38
+
39
+ args = get_args()
40
+ num_tokentypes = 2 if args.bert_binary_head else 0
41
+ model = BertModel(
42
+ num_tokentypes=num_tokentypes,
43
+ add_binary_head=args.bert_binary_head,
44
+ parallel_output=True,
45
+ pre_process=pre_process,
46
+ post_process=post_process)
47
+
48
+ return model
49
+
50
+
51
+ def get_batch(data_iterator):
52
+ """Build the batch."""
53
+
54
+ # Items and their type.
55
+ keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask']
56
+ datatype = torch.int64 if get_args.device.type=="cuda" else torch.int32
57
+
58
+ # Broadcast data.
59
+ if data_iterator is not None:
60
+ data = next(data_iterator)
61
+ # TODO (SW-62395): Implement proper Long -> Int casting
62
+ for key, val in data.items():
63
+ data[key] = val.to(datatype)
64
+ else:
65
+ data = None
66
+ data_b = mpu.broadcast_data(keys, data, datatype)
67
+
68
+ # Unpack.
69
+ if (datatype == torch.int64):
70
+ tokens = data_b['text'].long()
71
+ types = data_b['types'].long()
72
+ sentence_order = data_b['is_random'].long()
73
+ loss_mask = data_b['loss_mask'].float()
74
+ lm_labels = data_b['labels'].long()
75
+ padding_mask = data_b['padding_mask'].long()
76
+ else:
77
+ tokens = data_b['text'].int()
78
+ types = data_b['types'].int()
79
+ sentence_order = data_b['is_random'].int()
80
+ loss_mask = data_b['loss_mask'].float()
81
+ lm_labels = data_b['labels'].int()
82
+ padding_mask = data_b['padding_mask'].int()
83
+
84
+ return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
85
+
86
+
87
+ def loss_func(loss_mask, sentence_order, output_tensor):
88
+ lm_loss_, sop_logits = output_tensor
89
+
90
+ lm_loss_ = lm_loss_.float()
91
+ loss_mask = loss_mask.float()
92
+ lm_loss = torch.sum(
93
+ lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
94
+
95
+ if sop_logits is not None:
96
+ sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(),
97
+ sentence_order.view(-1),
98
+ ignore_index=-1)
99
+ sop_loss = sop_loss.float()
100
+ loss = lm_loss + sop_loss
101
+ averaged_losses = average_losses_across_data_parallel_group(
102
+ [lm_loss, sop_loss])
103
+ return loss, {'lm loss': averaged_losses[0],
104
+ 'sop loss': averaged_losses[1]}
105
+
106
+ else:
107
+ loss = lm_loss
108
+ averaged_losses = average_losses_across_data_parallel_group(
109
+ [lm_loss])
110
+ return loss, {'lm loss': averaged_losses[0]}
111
+
112
+
113
+ def forward_step(data_iterator, model):
114
+ """Forward step."""
115
+ args = get_args()
116
+ timers = get_timers()
117
+
118
+ # Get the batch.
119
+ timers('batch-generator').start()
120
+ tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch(
121
+ data_iterator)
122
+ timers('batch-generator').stop()
123
+
124
+ if not args.bert_binary_head:
125
+ types = None
126
+
127
+ # Forward pass through the model.
128
+ output_tensor = model(tokens, padding_mask, tokentype_ids=types,
129
+ lm_labels=lm_labels)
130
+
131
+ return output_tensor, partial(loss_func, loss_mask, sentence_order)
132
+
133
+
134
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
135
+ """Build train, valid, and test datasets."""
136
+ args = get_args()
137
+
138
+ print_rank_0('> building train, validation, and test datasets '
139
+ 'for BERT ...')
140
+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
141
+ data_prefix=args.data_path,
142
+ data_impl=args.data_impl,
143
+ splits_string=args.split,
144
+ train_valid_test_num_samples=train_val_test_num_samples,
145
+ max_seq_length=args.seq_length,
146
+ masked_lm_prob=args.mask_prob,
147
+ short_seq_prob=args.short_seq_prob,
148
+ seed=args.seed,
149
+ skip_warmup=(not args.mmap_warmup),
150
+ binary_head=args.bert_binary_head)
151
+ print_rank_0("> finished creating BERT datasets ...")
152
+
153
+ return train_ds, valid_ds, test_ds
154
+
155
+
156
+ if __name__ == "__main__":
157
+
158
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
159
+ args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_gpt.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2023 Habana Labs, Ltd. an Intel Company.
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Pretrain GPT"""
18
+
19
+ import torch
20
+ from functools import partial
21
+ from megatron import get_args
22
+ from megatron import print_rank_0
23
+ from megatron import get_timers
24
+ from megatron import get_tokenizer
25
+ from megatron import mpu
26
+ from megatron.data.gpt_dataset import build_train_valid_test_datasets
27
+ from megatron.model import GPTModel, GPTModelPipe
28
+ from megatron.training import pretrain
29
+ from megatron.utils import get_ltor_masks_and_position_ids
30
+ from megatron.utils import average_losses_across_data_parallel_group
31
+ from megatron.global_vars import get_current_device
32
+ from megatron.enums import PositionEmbeddingType
33
+ import deepspeed
34
+ from deepspeed.runtime.utils import see_memory_usage
35
+ import os
36
+ import subprocess
37
+
38
+ from torch import nn
39
+ import torch.nn.functional as F
40
+
41
+ def model_provider(pre_process=True, post_process=True, parallel_output=True):
42
+ """Build the model."""
43
+
44
+ print_rank_0('building GPT model ...')
45
+ see_memory_usage(f"Before Building Model", force=True)
46
+
47
+ args = get_args()
48
+ with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(),
49
+ remote_device=None if args.remote_device == 'none' else args.remote_device,
50
+ config_dict_or_path=args.deepspeed_config,
51
+ enabled=args.zero_stage == 3,
52
+ mpu=mpu):
53
+ current_device = get_current_device()
54
+ if args.deepspeed and not args.no_pipeline_parallel:
55
+
56
+ # verify --deepspeed_activation_checkpointing
57
+ # mandatory! otherwise the model uses fork() mapping to Megatron's RNGStatesTrackerSingleton
58
+ # while GPTModelPipe uses DS checkpoint activations that uses DS's RNGStatesTracker
59
+ if args.checkpoint_activations and args.checkpoint_activations_granularity == "full":
60
+ assert args.deepspeed_activation_checkpointing, \
61
+ "Flag --deepspeed_activation_checkpointing is mandatory when using GPTModelPipe" \
62
+ " with checkpoint activations granularity full."
63
+
64
+ model = GPTModelPipe(
65
+ num_tokentypes=0,
66
+ parallel_output=parallel_output,
67
+ )
68
+ # This is a hack to give us a reference to get_batch_pipe from within training.py
69
+ # We need to call model.set_batch_fn after deepspeed.initialize
70
+ model._megatron_batch_fn = get_batch_pipe
71
+
72
+ # Predompute the attention mask and store it in args. This avoids having to
73
+ # pipeline it as an activation during training. The mask is constant, and thus
74
+ # we can reuse it.
75
+ attention_mask = torch.tril(torch.ones(
76
+ (1, args.seq_length, args.seq_length), device=current_device)).view(
77
+ 1, 1, args.seq_length, args.seq_length)
78
+
79
+ # Convert attention mask to binary:
80
+ attention_mask = (attention_mask < 0.5)
81
+ if args.fp16:
82
+ attention_mask = attention_mask.half()
83
+ elif args.bf16:
84
+ attention_mask = attention_mask.bfloat16()
85
+
86
+ if args.mask_tensor_adding:
87
+ args.attn_mask = attention_mask * -10000.0
88
+ else:
89
+ args.attn_mask = attention_mask.to(torch.bool)
90
+
91
+ else:
92
+ assert args.position_embedding_type != PositionEmbeddingType.alibi, \
93
+ "GPTModel doesn't yet support ALiBi positional encoding"
94
+ model = GPTModel(
95
+ num_tokentypes=0,
96
+ parallel_output=parallel_output,
97
+ pre_process=pre_process,
98
+ post_process=post_process
99
+ ).to(current_device)
100
+ see_memory_usage(f"After Building Model", force=True)
101
+ return model
102
+
103
+
104
+ def get_batch(data_iterator):
105
+ """Generate a batch"""
106
+ args = get_args()
107
+ tokenizer = get_tokenizer()
108
+
109
+ # Items and their type.
110
+ keys = ['text']
111
+ datatype = torch.int64
112
+
113
+ # Broadcast data.
114
+ if data_iterator is not None:
115
+ data = next(data_iterator)
116
+ else:
117
+ data = None
118
+ data_b = mpu.broadcast_data(keys, data, datatype)
119
+
120
+ # Unpack.
121
+ tokens_ = data_b['text'].long()
122
+ if not args.use_seq_len_plus_one_tokens:
123
+ labels = torch.roll(tokens_, shifts=-1, dims=1)
124
+ labels[:, -1] = -1
125
+ tokens = tokens_
126
+ else:
127
+ labels = tokens_[:, 1:].contiguous()
128
+ tokens = tokens_[:, :-1].contiguous()
129
+
130
+ # Get the masks and postition ids.
131
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
132
+ tokens,
133
+ tokenizer.eod,
134
+ args.reset_position_ids,
135
+ args.reset_attention_mask,
136
+ args.eod_mask_loss,
137
+ labels = labels,
138
+ dummy_sample= None,)
139
+
140
+ tokens[tokens == -1] = 0
141
+ labels[labels == -1] = 0
142
+
143
+ return tokens, labels, loss_mask, attention_mask, position_ids
144
+
145
+
146
+ def get_batch_pipe(data):
147
+ """Modification of `get_batch` to work on `next(data_iterator)` instead of `data_iterator`"""
148
+ args = get_args()
149
+ tokenizer = get_tokenizer()
150
+
151
+ # Items and their type.
152
+ keys = ['text']
153
+ datatype = torch.int64
154
+
155
+ # Broadcast data.
156
+ data_b = mpu.broadcast_data(keys, data, datatype)
157
+
158
+ # Unpack.
159
+ tokens_ = data_b['text'].long()
160
+ if not args.use_seq_len_plus_one_tokens:
161
+ labels = torch.roll(tokens_, shifts=-1, dims=1)
162
+ labels[:, -1] = -1
163
+ tokens = tokens_
164
+ else:
165
+ labels = tokens_[:, 1:].contiguous()
166
+ tokens = tokens_[:, :-1].contiguous()
167
+
168
+ # Get the masks and postition ids.
169
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
170
+ tokens,
171
+ tokenizer.eod,
172
+ args.reset_position_ids,
173
+ args.reset_attention_mask,
174
+ args.eod_mask_loss,
175
+ labels = labels,
176
+ dummy_sample = None,
177
+ )
178
+ tokens[tokens == -1] = 0
179
+ labels[labels == -1] = 0
180
+
181
+
182
+ if args.curriculum_learning and args.curriculum_seqlen < tokens.size()[1]:
183
+ # seqlen-based curriculum learning
184
+ # tokens, position_ids, labels, loss_mask have size [batch size, seqlen]
185
+ tokens = tokens[:, :args.curriculum_seqlen].contiguous()
186
+ position_ids = position_ids[:, :args.curriculum_seqlen].contiguous()
187
+ if labels is not None:
188
+ labels = labels[:, :args.curriculum_seqlen].contiguous()
189
+ loss_mask = loss_mask[:, :args.curriculum_seqlen].contiguous()
190
+
191
+ return (tokens, position_ids, attention_mask), (labels, loss_mask)
192
+
193
+
194
+ def loss_func(loss_mask, moe_loss, mos_loss, output_tensor):
195
+ args = get_args()
196
+ losses = output_tensor.float()
197
+ loss_mask = loss_mask.view(-1).float()
198
+ loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
199
+
200
+ # Reduce loss for logging.
201
+ averaged_loss = average_losses_across_data_parallel_group([loss])
202
+ if args.mos or args.kd:
203
+ # assert max(args.num_experts) >= 1
204
+ loss = loss + moe_loss + mos_loss
205
+ if args.mos:
206
+ return loss, {'total loss': loss, 'lm loss': averaged_loss[0], 'moe loss': moe_loss, 'mos loss': mos_loss}
207
+ elif args.kd:
208
+ return loss, {'total loss': loss, 'lm loss': averaged_loss[0], 'moe loss': moe_loss, 'kd loss': mos_loss}
209
+ print_rank_0('>>> total loss: {}, lm loss {}, kd loss {}'.format(loss, averaged_loss[0], mos_loss))
210
+ else:
211
+ if max(args.num_experts) <= 1:
212
+ return loss, {'lm loss': averaged_loss[0]}
213
+ else:
214
+ loss = loss + moe_loss
215
+ return loss, {'lm loss': averaged_loss[0], 'moe loss': moe_loss}
216
+
217
+ def calculate_mos_loss(args, stu_output, teacher_model, tokens, position_ids, attention_mask):
218
+ mos_loss = 0
219
+ alpha = args.kd_alpha_ce
220
+ beta = args.kd_beta_ce
221
+ kd_temp = args.kd_temp
222
+
223
+ if teacher_model:
224
+ with torch.no_grad():
225
+ if args.curriculum_learning and args.curriculum_seqlen < args.seq_length:
226
+ assert args.curriculum_seqlen is not None
227
+ curriculum_seqlen = args.curriculum_seqlen
228
+ tokens = tokens[:, :curriculum_seqlen].contiguous()
229
+ position_ids = position_ids[:, :curriculum_seqlen].contiguous()
230
+ attention_mask = attention_mask[:, :, :curriculum_seqlen, :curriculum_seqlen].contiguous()
231
+ # No need to truncate labels as we do not need it for the teacher logits
232
+ tea_output, *tea_other_losses = teacher_model(tokens, position_ids, attention_mask)
233
+ assert stu_output.size() == tea_output.size(), 'teacher and student output should match in size. Student: {}, Teacher: {}, CL seq length {}'.format(stu_output.size(), tea_output.size(), args.curriculum_seqlen)
234
+
235
+ student_logits = F.log_softmax(stu_output / kd_temp, dim=2)
236
+ tea_logits = F.softmax(tea_output / kd_temp, dim=2) # The target logits is expected to be probabilities. If we use log_softmax, then we need to set target_log to true when initializing the KLDivLoss.
237
+
238
+ mos_loss = kd_temp * kd_temp * nn.KLDivLoss(reduction='batchmean')(student_logits, tea_logits)
239
+
240
+ mos_loss = mos_loss.div(args.seq_length) * beta
241
+ return mos_loss
242
+
243
+ def forward_step(data_iterator, model, teacher_model=None):
244
+ """Forward step."""
245
+ args = get_args()
246
+ timers = get_timers()
247
+
248
+ # Get the batch.
249
+ timers('batch-generator').start()
250
+ tokens, labels, loss_mask, attention_mask, position_ids = get_batch(
251
+ data_iterator)
252
+ timers('batch-generator').stop()
253
+
254
+ if args.mos or args.kd:
255
+ # The forward func can return either the loss or the logits, depending on whether passing in the labels or not.
256
+ stu_output, *other_losses = model(tokens, position_ids, attention_mask)
257
+ if args.curriculum_learning and args.curriculum_seqlen < args.seq_length:
258
+ assert args.curriculum_seqlen is not None
259
+ labels = labels[:, :args.curriculum_seqlen].contiguous()
260
+ output_tensor = mpu.vocab_parallel_cross_entropy(stu_output.contiguous().float(), labels)
261
+ else:
262
+ output_tensor, *other_losses = model(tokens, position_ids, attention_mask,
263
+ labels=labels)
264
+ if args.curriculum_learning and args.curriculum_seqlen < args.seq_length:
265
+ loss_mask = loss_mask[:, :args.curriculum_seqlen].contiguous()
266
+
267
+ moe_losses = []
268
+ for moe_loss in other_losses:
269
+ if moe_loss is not None:
270
+ moe_losses.append(moe_loss)
271
+ moe_loss = sum(moe_losses) * args.moe_loss_coeff
272
+
273
+ mos_loss = 0
274
+ if args.mos or args.kd:
275
+ assert model.training
276
+ mos_loss = calculate_mos_loss(args, stu_output, teacher_model, tokens, position_ids, attention_mask)
277
+
278
+ # Output_tensor stores the standard loss, loos_func calculates the total loss.
279
+ return output_tensor, partial(loss_func, loss_mask, moe_loss, mos_loss)
280
+
281
+
282
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
283
+ """Build train, valid, and test datasets."""
284
+ args = get_args()
285
+
286
+ print_rank_0('> building train, validation, and test datasets '
287
+ 'for GPT ...')
288
+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
289
+ data_prefix=args.data_path,
290
+ train_data_prefix=args.train_data_path,
291
+ valid_data_prefix=args.valid_data_path,
292
+ test_data_prefix=args.test_data_path,
293
+ data_impl=args.data_impl,
294
+ splits_string=args.split,
295
+ train_valid_test_num_samples=train_val_test_num_samples,
296
+ seq_length=args.seq_length,
297
+ seed=args.seed,
298
+ skip_warmup=(not args.mmap_warmup),
299
+ use_seq_len_plus_one_tokens=args.use_seq_len_plus_one_tokens)
300
+ print_rank_0("> finished creating GPT datasets ...")
301
+
302
+ return train_ds, valid_ds, test_ds
303
+
304
+
305
+ def command_exists(cmd):
306
+ result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
307
+ return result.wait() == 0
308
+
309
+
310
+ def git_ds_info():
311
+ from deepspeed.env_report import main as ds_report
312
+ ds_report()
313
+
314
+ # Write out version/git info
315
+ git_hash_cmd = "git rev-parse --short HEAD"
316
+ git_branch_cmd = "git rev-parse --abbrev-ref HEAD"
317
+ if command_exists('git'):
318
+ try:
319
+ result = subprocess.check_output(git_hash_cmd, shell=True)
320
+ git_hash = result.decode('utf-8').strip()
321
+ result = subprocess.check_output(git_branch_cmd, shell=True)
322
+ git_branch = result.decode('utf-8').strip()
323
+ except subprocess.CalledProcessError:
324
+ git_hash = "unknown"
325
+ git_branch = "unknown"
326
+ else:
327
+ git_hash = "unknown"
328
+ git_branch = "unknown"
329
+ print(f'**** Git info for Megatron: git_hash={git_hash} git_branch={git_branch} ****')
330
+
331
+
332
+ if __name__ == "__main__":
333
+ git_ds_info()
334
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
335
+ args_defaults={'tokenizer_type': 'GPT2BPETokenizer'})
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_ict.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Pretrain BERT for Inverse Cloze Task"""
17
+ import math
18
+
19
+ import torch
20
+ import torch.distributed as dist
21
+ import torch.nn.functional as F
22
+
23
+ from megatron import get_args
24
+ from megatron import print_rank_0
25
+ from megatron import get_timers
26
+ from megatron import mpu
27
+ from megatron.data.biencoder_dataset_utils import get_ict_batch
28
+ from megatron.data.dataset_utils import build_train_valid_test_datasets
29
+ from megatron.model.biencoder_model import biencoder_model_provider
30
+ from megatron.training import pretrain
31
+ from megatron.utils import average_losses_across_data_parallel_group
32
+
33
+
34
+ def pretrain_ict_model_provider():
35
+ args = get_args()
36
+ model = biencoder_model_provider(
37
+ only_context_model=False,
38
+ only_query_model=False,
39
+ biencoder_shared_query_context_model=\
40
+ args.biencoder_shared_query_context_model)
41
+ return model
42
+
43
+ def get_group_world_size_rank():
44
+
45
+ group = mpu.get_data_parallel_group()
46
+ rank = torch.distributed.get_rank(group=group)
47
+ world_size = torch.distributed.get_world_size(group=group)
48
+
49
+ return group, rank, world_size
50
+
51
+
52
+ class AllgatherFromDataParallelRegion(torch.autograd.Function):
53
+
54
+ @staticmethod
55
+ def forward(ctx, input_):
56
+ assert input_.dim() == 2
57
+ group, rank, world_size = get_group_world_size_rank()
58
+
59
+ tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
60
+ tensor_list[rank] = input_
61
+ torch.distributed.all_gather(tensor_list, input_, group=group)
62
+
63
+ output = torch.cat(tensor_list, dim=0).contiguous()
64
+
65
+ return output
66
+
67
+
68
+ @staticmethod
69
+ def backward(ctx, grad_output):
70
+ group, rank, world_size = get_group_world_size_rank()
71
+
72
+ assert grad_output.shape[0] % world_size == 0
73
+ dim_size = grad_output.shape[0] // world_size
74
+ output_list = torch.split(grad_output, dim_size, dim=0)
75
+
76
+ # get chunk from this rank
77
+ output = output_list[rank].contiguous()
78
+ return output
79
+
80
+ def forward_step(data_iterator, model, input_tensor):
81
+ """Forward step."""
82
+ args = get_args()
83
+ timers = get_timers()
84
+
85
+ # Get the batch.
86
+ timers('batch-generator').start()
87
+ query_tokens, query_mask, \
88
+ context_tokens, context_mask, context_indices = get_ict_batch(data_iterator)
89
+ timers('batch-generator').stop()
90
+
91
+ # Query and Context Types
92
+ query_types = torch.cuda.LongTensor(*query_tokens.shape).fill_(0)
93
+ context_types = torch.cuda.LongTensor(*context_tokens.shape).fill_(0)
94
+
95
+ # Forward model.
96
+ query_logits, context_logits = model(query_tokens, query_mask,
97
+ query_types, context_tokens,
98
+ context_mask, context_types)
99
+
100
+ micro_batch_size = query_logits.shape[0]
101
+ # recall we assert that tensor_model_parallel_size == 1
102
+ assert mpu.get_tensor_model_parallel_world_size() == 1, \
103
+ "Model parallel size > 1 not supported for ICT"
104
+
105
+ global_batch_size = dist.get_world_size() * micro_batch_size
106
+ all_query_logits = AllgatherFromDataParallelRegion.apply(query_logits)
107
+ all_context_logits = AllgatherFromDataParallelRegion.apply(context_logits)
108
+
109
+ # scores are inner products between query and context embeddings
110
+ retrieval_scores = torch.matmul(all_query_logits,
111
+ torch.transpose(all_context_logits, 0, 1))
112
+ # scaling the retriever scores
113
+ if args.retriever_score_scaling:
114
+ retrieval_scores = retrieval_scores / math.sqrt(args.hidden_size)
115
+
116
+ softmax_scores = F.log_softmax(retrieval_scores, dim=1)
117
+ sorted_vals, sorted_indices = torch.topk(softmax_scores,
118
+ k=softmax_scores.shape[1], sorted=True)
119
+
120
+ def topk_accuracy(k):
121
+ return torch.cuda.FloatTensor([sum([int(i in sorted_indices[i, :k]) \
122
+ for i in range(global_batch_size)]) / global_batch_size])
123
+
124
+ topk_accs = [topk_accuracy(int(k)) for k in args.retriever_report_topk_accuracies]
125
+
126
+ labels = torch.arange(global_batch_size).long().cuda()
127
+ loss = F.nll_loss(softmax_scores, labels, reduction='mean')
128
+ reduced_losses = average_losses_across_data_parallel_group([loss, *topk_accs])
129
+
130
+ # Scale the retrieval loss
131
+ loss = loss * mpu.get_data_parallel_world_size()
132
+
133
+ # create stats_dict with retrieval loss and all specified top-k accuracies
134
+ topk_acc_dict = {'top{}_acc'.format(k): v * 100 for k, v in \
135
+ zip(args.retriever_report_topk_accuracies, reduced_losses[1:])}
136
+ stats_dict = dict(loss=reduced_losses[0], **topk_acc_dict)
137
+ return loss, stats_dict
138
+
139
+
140
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
141
+ """Build train, valid and test datasets."""
142
+ args = get_args()
143
+ print_rank_0('> building train, validation, and test datasets '
144
+ 'for BERT ICT...')
145
+
146
+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
147
+ data_prefix=args.data_path,
148
+ data_impl=args.data_impl,
149
+ splits_string=args.split,
150
+ train_valid_test_num_samples=train_val_test_num_samples,
151
+ max_seq_length=args.seq_length,
152
+ masked_lm_prob=args.mask_prob,
153
+ short_seq_prob=args.short_seq_prob,
154
+ seed=args.seed,
155
+ skip_warmup=(not args.mmap_warmup),
156
+ binary_head=False,
157
+ dataset_type='ict')
158
+ print_rank_0("> finished creating BERT ICT datasets ...")
159
+
160
+ return train_ds, valid_ds, test_ds
161
+
162
+
163
+ if __name__ == "__main__":
164
+ pretrain(train_valid_test_datasets_provider,
165
+ pretrain_ict_model_provider,
166
+ forward_step,
167
+ args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_t5.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Pretrain T5"""
17
+
18
+ from functools import partial
19
+
20
+ import torch
21
+
22
+ from megatron import (
23
+ get_args,
24
+ get_timers,
25
+ mpu,
26
+ print_rank_0
27
+ )
28
+ from megatron.data.dataset_utils import build_train_valid_test_datasets
29
+ from megatron.model import T5Model
30
+ from megatron.training import pretrain
31
+ from megatron.utils import average_losses_across_data_parallel_group
32
+
33
+
34
+ def model_provider(pre_process=True, post_process=True):
35
+ """Build the model."""
36
+ assert pre_process and post_process, "T5 doesn't yet support pipelining"
37
+
38
+ print_rank_0('building T5 model ...')
39
+ model = T5Model(num_tokentypes=0,
40
+ parallel_output=True)
41
+ return model
42
+
43
+
44
+ def get_batch(data_iterator):
45
+ """Build the batch."""
46
+
47
+ keys = ['text_enc', 'text_dec', 'labels', 'loss_mask',
48
+ 'enc_mask', 'dec_mask', 'enc_dec_mask']
49
+ datatype = torch.int64
50
+
51
+ # Broadcast data.
52
+ if data_iterator is not None:
53
+ data = next(data_iterator)
54
+ else:
55
+ data = None
56
+ data_b = mpu.broadcast_data(keys, data, datatype)
57
+
58
+ # Unpack.
59
+ tokens_enc = data_b['text_enc'].long()
60
+ tokens_dec = data_b['text_dec'].long()
61
+ labels = data_b['labels'].long()
62
+ loss_mask = data_b['loss_mask'].float()
63
+
64
+ enc_mask = (data_b['enc_mask'] < 0.5)
65
+ dec_mask = (data_b['dec_mask'] < 0.5)
66
+ enc_dec_mask = (data_b['enc_dec_mask'] < 0.5)
67
+
68
+ return tokens_enc, tokens_dec, loss_mask, labels, \
69
+ enc_mask, dec_mask, enc_dec_mask
70
+
71
+
72
+ def loss_func(loss_mask, output_tensor):
73
+ lm_loss_, _ = output_tensor
74
+
75
+ lm_loss_ = lm_loss_.float()
76
+ lm_loss = torch.sum(
77
+ lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
78
+
79
+ loss = lm_loss
80
+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])
81
+
82
+ return loss, {'lm loss': averaged_losses[0]}
83
+
84
+
85
+ def forward_step(data_iterator, model):
86
+ """Forward step."""
87
+ args = get_args()
88
+ timers = get_timers()
89
+
90
+ # Get the batch.
91
+ timers('batch generator').start()
92
+ tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask \
93
+ = get_batch(data_iterator)
94
+ timers('batch generator').stop()
95
+
96
+ # Forward model lm_labels
97
+ output_tensor = model(tokens_enc,
98
+ tokens_dec,
99
+ enc_mask,
100
+ dec_mask,
101
+ enc_dec_mask,
102
+ tokentype_ids=None,
103
+ lm_labels=lm_labels)
104
+
105
+ return output_tensor, partial(loss_func, loss_mask)
106
+
107
+
108
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
109
+ """Build train, valid, and test datasets."""
110
+ args = get_args()
111
+
112
+ print_rank_0('> building train, validation, and test datasets '
113
+ 'for T5 ...')
114
+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
115
+ data_prefix=args.data_path,
116
+ data_impl=args.data_impl,
117
+ splits_string=args.split,
118
+ train_valid_test_num_samples=train_val_test_num_samples,
119
+ max_seq_length=args.encoder_seq_length,
120
+ max_seq_length_dec=args.decoder_seq_length,
121
+ masked_lm_prob=args.mask_prob,
122
+ short_seq_prob=args.short_seq_prob,
123
+ seed=args.seed,
124
+ skip_warmup=(not args.mmap_warmup),
125
+ dataset_type='t5')
126
+ print_rank_0("> finished creating T5 datasets ...")
127
+
128
+ return train_ds, valid_ds, test_ds
129
+
130
+
131
+ if __name__ == "__main__":
132
+
133
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
134
+ args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_vit.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Pretrain VIT"""
17
+
18
+ import torch
19
+ import torch.nn.functional as F
20
+ from megatron import get_args, get_timers, mpu, print_rank_0
21
+ from megatron.data.vit_dataset import build_train_valid_datasets
22
+ from megatron.model.vit_model import VitModel
23
+ from megatron.training import pretrain
24
+ from megatron.utils import average_losses_across_data_parallel_group
25
+
26
+ def model_provider():
27
+ """Build the model."""
28
+
29
+ print_rank_0("building VIT model ...")
30
+ args = get_args()
31
+
32
+ model = VitModel(num_classes=args.num_classes)
33
+ return model
34
+
35
+ def get_batch(data_iterator):
36
+ """Build the batch."""
37
+ data = next(data_iterator)
38
+
39
+ # only data parallelism; no need for broadcast
40
+ images = data[0].cuda()
41
+ labels = data[1].cuda()
42
+
43
+ return images, labels
44
+
45
+ def forward_step(data_iterator, model, input_tensor):
46
+ """Forward step."""
47
+ timers = get_timers()
48
+ assert input_tensor is None
49
+
50
+ # Get the batch.
51
+ timers("batch-generator").start()
52
+ (
53
+ images,
54
+ labels,
55
+ ) = get_batch(data_iterator)
56
+ timers("batch-generator").stop()
57
+
58
+ # Forward model. lm_labels
59
+ logits = model(images).contiguous().float()
60
+ loss = F.cross_entropy(logits, labels)
61
+
62
+ outputs = torch.argmax(logits, -1)
63
+ correct = (outputs == labels).float()
64
+ accuracy = torch.mean(correct)
65
+
66
+ averaged_loss = average_losses_across_data_parallel_group([loss, accuracy])
67
+
68
+ return loss, {"loss": averaged_loss[0], "accuracy": averaged_loss[1]}
69
+
70
+
71
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
72
+ """Build train, valid, and test datasets."""
73
+ args = get_args()
74
+
75
+ print_rank_0(
76
+ "> building train, validation, and test datasets " "for VIT ..."
77
+ )
78
+ train_ds, valid_ds = build_train_valid_datasets(data_path=args.data_path)
79
+ print_rank_0("> finished creating VIT datasets ...")
80
+
81
+ return train_ds, valid_ds, None
82
+
83
+
84
+ if __name__ == "__main__":
85
+
86
+ pretrain(
87
+ train_valid_test_datasets_provider,
88
+ model_provider,
89
+ forward_step,
90
+ args_defaults={'dataloader_type': 'cyclic'}
91
+ )
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ git+https://github.com/mlcommons/[email protected]
2
+ git+https://github.com/NVIDIA/mlperf-common.git
3
+ pybind11
4
+ torch
5
+ six
6
+ regex
7
+ numpy
8
+ sentencepiece
9
+ torchmetrics>=0.8.0
10
+ einops
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/run_gpt.sh ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright (c) 2023 Habana Labs, Ltd. an Intel Company.
3
+
4
+ set -ex
5
+ function parse_args()
6
+ {
7
+ while true; do
8
+ case "$1" in
9
+ --data-dir )
10
+ DATA_DIR="$2"
11
+ shift 2 ;;
12
+ --num-nodes )
13
+ NUM_NODES="$2"
14
+ shift 2 ;;
15
+ --devices-per-node )
16
+ DEVICES_PER_NODE="$2"
17
+ shift 2 ;;
18
+ --data-parallel-size )
19
+ DP="$2"
20
+ shift 2 ;;
21
+ --tensor-model-parallel-size )
22
+ TP="$2"
23
+ shift 2 ;;
24
+ --pipeline-model-parallel-size )
25
+ PP="$2"
26
+ shift 2 ;;
27
+ --num-layers )
28
+ NUM_LAYERS="$2"
29
+ shift 2 ;;
30
+ --hidden-size )
31
+ HIDDEN_SIZE="$2"
32
+ shift 2 ;;
33
+ --num-attention-heads )
34
+ NUM_ATTENTION_HEADS="$2"
35
+ shift 2 ;;
36
+ --seq-length )
37
+ SEQ_LENGTH="$2"
38
+ shift 2 ;;
39
+ --dropout )
40
+ DROPOUT="$2"
41
+ shift 2 ;;
42
+ --micro-batch-size )
43
+ MICRO_BATCH="$2"
44
+ shift 2 ;;
45
+ --eval-micro-batch-size )
46
+ EVAL_MICRO_BATCH="$2"
47
+ shift 2 ;;
48
+ --global-batch-size )
49
+ GLOBAL_BATCH="$2"
50
+ shift 2 ;;
51
+ --train-samples )
52
+ TRAIN_SAMPLES="$2"
53
+ shift 2 ;;
54
+ --lr )
55
+ LR="$2"
56
+ shift 2 ;;
57
+ --min-lr )
58
+ MIN_LR="$2"
59
+ shift 2 ;;
60
+ --lr-decay-samples )
61
+ LR_DECAY_SAMPLES="$2"
62
+ shift 2 ;;
63
+ --lr-warmup-samples )
64
+ LR_WARMUP_SAMPLES="$2"
65
+ shift 2 ;;
66
+ --seed )
67
+ SEED="$2"
68
+ shift 2 ;;
69
+ --eval-iters )
70
+ EVAL_ITERS="$2"
71
+ shift 2 ;;
72
+ --eval-interval )
73
+ EVAL_INTERVAL="$2"
74
+ shift 2 ;;
75
+ --exit-interval )
76
+ EXIT_INTERVAL="$2"
77
+ shift 2 ;;
78
+ --output-dir )
79
+ OUTPUT_DIR="$2"
80
+ shift 2 ;;
81
+ --start-from-ckpt )
82
+ START_FROM_CKPT="$2"
83
+ shift 2 ;;
84
+ --universal-ckpt-path )
85
+ UNIVERSAL_CKPT_PATH="$2"
86
+ shift 2 ;;
87
+ --save-checkpoints )
88
+ SAVE_CKPT="$2"
89
+ shift 2 ;;
90
+ --save-checkpoints-dir )
91
+ SAVE_CKPT_DIR="$2"
92
+ shift 2 ;;
93
+ --save-interval )
94
+ SAVE_INTERVAL="$2"
95
+ shift 2 ;;
96
+ --log-interval )
97
+ LOG_INTERVAL="$2"
98
+ shift 2 ;;
99
+ --tensorboard-dir )
100
+ TENSORBOARD_DIR="$2"
101
+ shift 2 ;;
102
+ --kill-switch-file )
103
+ KILL_SWITCH_FILE="$2"
104
+ shift 2 ;;
105
+ --hosts )
106
+ HOSTS="$2"
107
+ shift 2 ;;
108
+ --hostsfile )
109
+ HOSTSFILE="$2"
110
+ shift 2 ;;
111
+ --mllog-output-path )
112
+ MLLOG_FILE="$2"
113
+ shift 2 ;;
114
+ --eval-loss-exit-value )
115
+ EVAL_LOSS_EXIT_VALUE="$2"
116
+ shift 2 ;;
117
+ --profile )
118
+ PROFILE_FLAG="--profile $2"
119
+ shift 2 ;;
120
+ --profile-steps )
121
+ PROFILE_STEPS_FLAG="--profile-steps $2"
122
+ shift 2 ;;
123
+ -te | --use-fp8-transformer-engine )
124
+ TRANSFORMER_ENGINE_FLAG="--use-hpu-fp8-transformer-engine"
125
+ shift 1 ;;
126
+ -fsdpa | --use-fused-sdpa )
127
+ USE_FUSED_SDPA="--use-fused-sdpa $2"
128
+ shift 2 ;;
129
+ -fsdpa-recompute | --use-fused-sdpa-with-recompute )
130
+ USE_FUSED_SDPA_WITH_RECOMPUTE_ARG="$2"
131
+ shift 2 ;;
132
+ --fp8-measure-interval )
133
+ FP8_MEASURE_INTERVAL="$2"
134
+ shift 2 ;;
135
+ --use-hpu-graphs )
136
+ HPU_GRAPHS_FLAG="--use-hpu-graphs $2"
137
+ shift 2 ;;
138
+ --cache-fp8-weight-fwd )
139
+ HPU_GRAPHS_FLAG="--cache-fp8-weight-fwd $2"
140
+ shift 2 ;;
141
+ --ext-train-iters )
142
+ EXTERNAL_TRAINING_ITERATIONS="$2"
143
+ shift 2 ;;
144
+ -sp | --sequence-parallel )
145
+ SEQUENCE_PARALLEL="$2"
146
+ shift 2 ;;
147
+ --device-warmup )
148
+ DEVICE_WARMUP=$2
149
+ shift 2 ;;
150
+ --device-warmup-dataset-path )
151
+ WARMUP_DATASET_PATH=$2
152
+ shift 2 ;;
153
+ --device-warmup-iterations )
154
+ WARMUP_ITERATIONS=$2
155
+ shift 2 ;;
156
+ -- )
157
+ shift
158
+ break ;;
159
+ * )
160
+ if [[ -n "$1" ]]; then
161
+ echo "error: invalid parameter: $1"
162
+ exit -1
163
+ fi
164
+ break ;;
165
+ esac
166
+ done
167
+
168
+ }
169
+
170
+ function generate_hostsfile()
171
+ {
172
+ HOSTS_PATH=$1
173
+ HOSTSFILE_PATH=$2
174
+ local num_nodes=${3:-8}
175
+
176
+ rm -rf $HOSTSFILE_PATH
177
+ touch $HOSTSFILE_PATH
178
+
179
+ while IFS= read -r ip; do
180
+ echo "$ip slots=$num_nodes" >> $HOSTSFILE_PATH
181
+ done < "$HOSTS_PATH"
182
+
183
+ echo "hostsfile: "
184
+ cat $HOSTSFILE_PATH
185
+ }
186
+
187
+
188
+ # Default values for arguments, that can be overridden from cmd by parse_args func or env variable
189
+ DATA_DIR="/mnt/weka/data/mlperf_datasets/gpt-3/c4_mlperf_19_12_2022/preprocessed_c4_spm"
190
+ NUM_NODES=8
191
+ DEVICES_PER_NODE=8
192
+ DP=1
193
+ TP=8
194
+ PP=8
195
+ NUM_LAYERS=96
196
+ HIDDEN_SIZE=12288
197
+ NUM_ATTENTION_HEADS=96
198
+ SEQ_LENGTH=2048
199
+ DROPOUT=0.0
200
+ MICRO_BATCH=2
201
+ EVAL_MICRO_BATCH=8
202
+ GLOBAL_BATCH=2048
203
+ CLIP_GRAD=1.0
204
+ ZERO_STAGE=0
205
+ TRAIN_SAMPLES=84500000
206
+ LR=2.0e-5
207
+ MIN_LR=2.0e-6
208
+ LR_DECAY_SAMPLES=166809600
209
+ LR_WARMUP_SAMPLES=407040
210
+ SEED=${RANDOM}
211
+ EVAL_ITERS=-1
212
+ EVAL_INTERVAL=12
213
+ EXIT_INTERVAL=500
214
+ START_FROM_CKPT=true
215
+ SAVE_CKPT=true
216
+ SAVE_INTERVAL=500
217
+ LOG_INTERVAL=1
218
+ UNIVERSAL_CKPT_PATH="/mnt/weka/data/pytorch/gpt3/gpt3_spmd1x64x24_tpuv4-3072_v84_20221101_universal4000"
219
+ OUTPUT_DIR=${OUTPUT_DIR:-"/tmp"}
220
+ HOSTS=""
221
+ HOSTSFILE="/root/shared/hostsfile"
222
+ MLLOG_FILE="/tmp/result_0.txt"
223
+ EVAL_LOSS_EXIT_VALUE=2.69
224
+ TRANSFORMER_ENGINE_FLAG=""
225
+ USE_FUSED_SDPA="--use-fused-sdpa true"
226
+ USE_FUSED_SDPA_WITH_RECOMPUTE_ARG="false"
227
+ FP8_MEASURE_INTERVAL=16
228
+ CACHE_FP8_WEIGHT_FWD_FLAG="--cache-fp8-weight-fwd true"
229
+ HPU_GRAPHS_FLAG="--use-hpu-graphs false"
230
+ ACCUMULATE_GRADS_VIA_HOOKS="true"
231
+ EXTERNAL_TRAINING_ITERATIONS=4000
232
+ EXTERNAL_GBS=1536
233
+ SEQUENCE_PARALLEL=true
234
+ DEVICE_WARMUP=true
235
+ WARMUP_DATASET_PATH="/mnt/weka/data/mlperf_datasets/gpt-3/synthetic_dataset/warmup_dataset"
236
+ WARMUP_ITERATIONS=5
237
+ CACHE_FP8_WEIGHT_FLAG="--cache-fp8-weight"
238
+
239
+ parse_args "$@"
240
+
241
+ if [ -f "$HOSTS" ]; then
242
+ generate_hostsfile $HOSTS $HOSTSFILE 8
243
+ fi
244
+
245
+ # data and model dir paths
246
+ DATA_PATH_6=$DATA_DIR/c4_en_6_c4_spm_text_document
247
+ DATA_PATH_7=$DATA_DIR/c4_en_7_c4_spm_text_document
248
+ VALID_DATA_PATH=$DATA_DIR/c4_en_validation_c4_spm_text_document
249
+ MODEL_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
250
+ # allow to override /proc file system in case it is mounted with different name on docker container
251
+ PROC_FS=${PROC_FS:-"/proc"}
252
+
253
+ # output log path
254
+ if [ -z "$OUTPUT_DIR" ]; then
255
+ RUNTIME=`date +"%Y%m%d_%H%M"`
256
+ OUTPUT_DIR=out/gpt3/ds_z${ZERO_STAGE}_nl${NUM_LAYERS}_hs${HIDDEN_SIZE}_gb${GLOBAL_BATCH}_mb${MICRO_BATCH}_D${DP}_T${TP}_P${PP}_${RUNTIME}
257
+ fi
258
+ if [ -z "$TENSORBOARD_DIR" ]; then
259
+ TENSORBOARD_DIR=$OUTPUT_DIR/tensorboard
260
+ fi
261
+
262
+ # saving checkpoint args
263
+ if [ $SAVE_CKPT = true ] || [ $SAVE_CKPT = 1 ]; then
264
+ if [ -z "$SAVE_CKPT_DIR" ]; then
265
+ SAVE_CKPT_DIR=$OUTPUT_DIR/checkpoints
266
+ fi
267
+ SAVE_CKPT_ARGS=" --save $SAVE_CKPT_DIR --save-interval $SAVE_INTERVAL "
268
+ fi
269
+
270
+ if [ "$DEVICE_WARMUP" == "true" ]; then
271
+ DEVICE_WARMUP_ARG=" --device-warmup --warmup-dataset-path $WARMUP_DATASET_PATH --device-warmup-iterations $WARMUP_ITERATIONS"
272
+ fi
273
+
274
+ # handle kill switch argument
275
+ if [ -n "$KILL_SWITCH_FILE" ]; then
276
+ KILL_SWITCH_ARG="--kill-switch-path $KILL_SWITCH_FILE"
277
+ fi
278
+
279
+ # Checkpoint loading configure
280
+ LOAD_CHECKPOINT_ARGS=""
281
+ if [ $START_FROM_CKPT = true ] || [ $START_FROM_CKPT = 1 ]; then
282
+ CHECKPOINTS_BACKUP="$OUTPUT_DIR/../../checkpoints"
283
+ if [ "$(ls -A $CHECKPOINTS_BACKUP 2>/dev/null)" ]; then
284
+ LOAD_CHECKPOINT_ARGS=" --load $CHECKPOINTS_BACKUP "
285
+ else
286
+ LOAD_CHECKPOINT_ARGS=" --load $UNIVERSAL_CKPT_PATH --universal-checkpoint --no-load-rng "
287
+ fi
288
+ fi
289
+
290
+ # Sequence parallelism
291
+ SEQUENCE_PARALLEL_ARG="--sequence-parallel"
292
+ PARTITIONED_MODE="false"
293
+ if [ $SEQUENCE_PARALLEL = false ]; then
294
+ SEQUENCE_PARALLEL_ARG=""
295
+ PARTITIONED_MODE="true"
296
+ fi
297
+
298
+ # Activation checkpointing or recompute
299
+ if [[ $USE_FUSED_SDPA_WITH_RECOMPUTE_ARG == "false" ]]; then
300
+ ACTIVATION_CHECKPOINTING="--checkpoint-activations \
301
+ --checkpoint-activations-granularity=selective "
302
+ else
303
+ ACTIVATION_CHECKPOINTING=""
304
+ fi
305
+
306
+ mkdir -p ${OUTPUT_DIR}
307
+ # create DS config
308
+ DS_CONFIG=${OUTPUT_DIR}/ds_config.json
309
+ cat << EOT > $DS_CONFIG
310
+ {
311
+ "train_batch_size" : $GLOBAL_BATCH,
312
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH,
313
+ "steps_per_print": $LOG_INTERVAL,
314
+
315
+ "zero_optimization": {
316
+ "stage": $ZERO_STAGE
317
+ },
318
+ "gradient_clipping": $CLIP_GRAD,
319
+ "bf16": {
320
+ "enabled": true,
321
+ "accumulate_grads_via_hooks": $ACCUMULATE_GRADS_VIA_HOOKS
322
+ },
323
+
324
+ "wall_clock_breakdown" : false,
325
+
326
+ "pipeline": {
327
+ "pipe_partitioned": $PARTITIONED_MODE,
328
+ "grad_partitioned": $PARTITIONED_MODE
329
+ }
330
+ }
331
+ EOT
332
+
333
+ echo "*******************************************************"
334
+ echo "Deepspeed config:"
335
+ cat $DS_CONFIG
336
+ echo "*******************************************************"
337
+
338
+ # DeepSpeed args
339
+ ds_args=""
340
+ ds_args=" --deepspeed ${ds_args}"
341
+ ds_args=" --deepspeed_config=$DS_CONFIG ${ds_args}"
342
+ ds_args=" --zero-stage=$ZERO_STAGE ${ds_args}"
343
+ ds_args=" --deepspeed-activation-checkpointing ${ds_args}"
344
+
345
+ CMD="sync && \
346
+ if [ \"\$LOCAL_RANK\" -eq \"0\" ]; then echo 3 > $PROC_FS/sys/vm/drop_caches ; fi && \
347
+ python -u $MODEL_DIR/pretrain_gpt.py \
348
+ --use_hpu \
349
+ --distributed-backend=hccl \
350
+ --tensor-model-parallel-size $TP \
351
+ --pipeline-model-parallel-size $PP \
352
+ --optimizer fusedadamw \
353
+ --num-layers $NUM_LAYERS \
354
+ --hidden-size $HIDDEN_SIZE \
355
+ --num-attention-heads $NUM_ATTENTION_HEADS \
356
+ --seq-length $SEQ_LENGTH \
357
+ --loss-scale 1 \
358
+ --max-position-embeddings $SEQ_LENGTH \
359
+ --micro-batch-size $MICRO_BATCH \
360
+ --eval-micro-batch-size $EVAL_MICRO_BATCH \
361
+ --global-batch-size $GLOBAL_BATCH \
362
+ --lr $LR \
363
+ --min-lr $MIN_LR \
364
+ --lr-decay-style cosine \
365
+ --train-samples $TRAIN_SAMPLES \
366
+ --lr-decay-samples $LR_DECAY_SAMPLES \
367
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
368
+ --log-interval $LOG_INTERVAL \
369
+ --train-data-path 0.5 $DATA_PATH_6 0.5 $DATA_PATH_7 \
370
+ --valid-data-path 1.0 $VALID_DATA_PATH \
371
+ --eval-iters $EVAL_ITERS \
372
+ --eval-interval $EVAL_INTERVAL \
373
+ --vocab-file $DATA_DIR/vocab.json \
374
+ --merge-file $DATA_DIR/merges.txt \
375
+ --split 100,0,0 \
376
+ --clip-grad $CLIP_GRAD \
377
+ --attention-dropout $DROPOUT \
378
+ --hidden-dropout $DROPOUT \
379
+ --no-query-key-layer-scaling \
380
+ --adam-beta1 0.9 \
381
+ --adam-beta2 0.95 \
382
+ --weight-decay 0.1 \
383
+ --init-method-std 0.006 \
384
+ --seed $SEED \
385
+ --bf16 \
386
+ $ACTIVATION_CHECKPOINTING \
387
+ --tensorboard-dir $TENSORBOARD_DIR \
388
+ --log-validation-ppl-to-tensorboard \
389
+ --no-bias-gelu-fusion \
390
+ --no-masked-softmax-fusion \
391
+ --no-bias-dropout-fusion \
392
+ --mask-tensor-adding \
393
+ --fix-position-emb-redundant-alloc \
394
+ --no-scaled-init \
395
+ --no-seq-len-plus-one-tokens \
396
+ --apply-layernorm-weight-plus-one \
397
+ --do-layernorm-bias-weight-decay \
398
+ --exit-interval $EXIT_INTERVAL \
399
+ --DDP-impl local \
400
+ --mllog-output-path $MLLOG_FILE \
401
+ --eval-loss-exit-value $EVAL_LOSS_EXIT_VALUE \
402
+ --ext-lr-steps $(($EXTERNAL_TRAINING_ITERATIONS*$EXTERNAL_GBS)) \
403
+ $LOAD_CHECKPOINT_ARGS \
404
+ $SAVE_CKPT_ARGS \
405
+ $KILL_SWITCH_ARG \
406
+ $TRANSFORMER_ENGINE_FLAG \
407
+ $USE_FUSED_SDPA \
408
+ $DEVICE_WARMUP_ARG \
409
+ --hpu-fp8-measure-interval $FP8_MEASURE_INTERVAL \
410
+ $CACHE_FP8_WEIGHT_FWD_FLAG \
411
+ $HPU_GRAPHS_FLAG \
412
+ $CACHE_FP8_WEIGHT_FLAG \
413
+ $PROFILE_FLAG \
414
+ $PROFILE_STEPS_FLAG \
415
+ $SEQUENCE_PARALLEL_ARG \
416
+ $ds_args"
417
+
418
+
419
+ # configure multinode
420
+ if [ "$NUM_NODES" -ne "1" -a -f "$HOSTSFILE" ]
421
+ then
422
+ MULTINODE_CMD="--force_multi \
423
+ --hostfile=$HOSTSFILE \
424
+ --master_addr $(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p) "
425
+ fi
426
+
427
+ # run gpt3
428
+ deepspeed --num_nodes ${NUM_NODES} \
429
+ --num_gpus ${DEVICES_PER_NODE} \
430
+ --no_local_rank \
431
+ --no_python \
432
+ $MULTINODE_CMD \
433
+ /usr/bin/bash -c "$CMD"
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/setup.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Setup for pip package."""
17
+
18
+ import os
19
+ import sys
20
+ import setuptools
21
+
22
+ if sys.version_info < (3,):
23
+ raise Exception("Python 2 is not supported by Megatron.")
24
+
25
+ from megatron.package_info import (
26
+ __description__,
27
+ __contact_names__,
28
+ __url__,
29
+ __download_url__,
30
+ __keywords__,
31
+ __license__,
32
+ __package_name__,
33
+ __version__,
34
+ )
35
+
36
+ with open("README.md", "r") as fh:
37
+ long_description = fh.read()
38
+
39
+ ###############################################################################
40
+ # Dependency Loading #
41
+ # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
42
+
43
+
44
+ def req_file(filename):
45
+ with open(filename) as f:
46
+ content = f.readlines()
47
+ return [x.strip() for x in content]
48
+
49
+
50
+ install_requires = req_file("requirements.txt")
51
+
52
+ setuptools.setup(
53
+ name=__package_name__,
54
+ # Versions should comply with PEP440. For a discussion on single-sourcing
55
+ # the version across setup.py and the project code, see
56
+ # https://packaging.python.org/en/latest/single_source_version.html
57
+ version=__version__,
58
+ description=__description__,
59
+ long_description=long_description,
60
+ long_description_content_type="text/markdown",
61
+ # The project's main homepage.
62
+ url=__url__,
63
+ author=__contact_names__,
64
+ maintainer=__contact_names__,
65
+ # The licence under which the project is released
66
+ license=__license__,
67
+ classifiers=[
68
+ 'Intended Audience :: Developers',
69
+ 'Intended Audience :: Science/Research',
70
+ 'Intended Audience :: Information Technology',
71
+ # Indicate what your project relates to
72
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
73
+ 'Topic :: Software Development :: Libraries :: Python Modules',
74
+ # Supported python versions
75
+ 'Programming Language :: Python :: 3.6',
76
+ 'Programming Language :: Python :: 3.7',
77
+ 'Programming Language :: Python :: 3.8',
78
+ # Additional Setting
79
+ 'Environment :: Console',
80
+ 'Natural Language :: English',
81
+ 'Operating System :: OS Independent',
82
+ ],
83
+ python_requires='>=3.6',
84
+ packages=setuptools.find_packages(),
85
+ install_requires=install_requires,
86
+ # Add in any packaged data.
87
+ include_package_data=True,
88
+ zip_safe=False,
89
+ # PyPI package information.
90
+ keywords=__keywords__
91
+ )
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/convert_paxml_optimizer.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ ###############################################################################
15
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
16
+ ###############################################################################
17
+ """Script to load layer(s) of the LLM checkpoint using TensorStore.
18
+ More details about TensorStore, please visit
19
+ https://github.com/google/tensorstore .
20
+ """
21
+
22
+ import argparse
23
+ import tensorstore as ts
24
+ import glob
25
+ import json
26
+ import torch
27
+ import os
28
+ import sys
29
+ import multiprocessing
30
+ from datetime import datetime
31
+
32
+ def get_numpy_array(filename):
33
+ spec = {'driver': 'zarr', 'metadata_key': '.zarray', 'kvstore': {}}
34
+ spec['kvstore'] = {
35
+ 'driver': 'file',
36
+ 'path': filename,
37
+ }
38
+
39
+ t = ts.open(ts.Spec(spec), open=True).result()
40
+ t_v = t.read().result()
41
+ return t_v
42
+
43
+ def get_torch_tensor(filename, dtype):
44
+ array = get_numpy_array(filename)
45
+ array_torch = torch.from_numpy(array)
46
+ array_torch = array_torch.to(dtype)
47
+ return array_torch
48
+
49
+ def get_layer_info(output_dir, lyr_num, nv_name):
50
+ lyr_dir = os.path.join(output_dir, F"layer_{str(lyr_num)}")
51
+ lyr_name = "language_model.encoder.layers."+str(lyr_num)+"."+nv_name
52
+ return lyr_dir, lyr_name
53
+
54
+ def store_tensor(save_tensor, lyr_dir, lyr_name, params_dict):
55
+ optim_state = {}
56
+ optim_state["state"] = {}
57
+ optim_state["state"]["exp_avg"] = save_tensor["m"]
58
+ optim_state["state"]["exp_avg_sq"] = save_tensor["v"]
59
+ optim_state["fp32_from_fp16_params"] = save_tensor["w"]
60
+ if params_dict is not None:
61
+ optim_state["param_groups"] = params_dict
62
+ torch.save(optim_state, os.path.join(lyr_dir, lyr_name + ".pt"))
63
+
64
+ def copy_layers(args, nv_name, g_name, prefix, params_dict):
65
+
66
+ array_torch = {}
67
+ g_name_path = os.path.join(args.google_ckpts, prefix + ".m." + g_name)
68
+ array_torch["m"] = get_torch_tensor(g_name_path, args.dtype)
69
+ g_name_path = os.path.join(args.google_ckpts, prefix + ".v." + g_name)
70
+ array_torch["v"] = get_torch_tensor(g_name_path, args.dtype)
71
+ g_name_path = os.path.join(args.google_ckpts, "mdl_vars." + g_name)
72
+ array_torch["w"] = get_torch_tensor(g_name_path, args.dtype)
73
+
74
+ print(F"G Name: {g_name}, shape: {array_torch['m'].shape}", flush=True)
75
+ save_tensor = {}
76
+ if nv_name == "language_model.embedding.position_embeddings.weight":
77
+ start_idx = 0
78
+ end_idx = 2048
79
+ for key in list(array_torch.keys()):
80
+ save_tensor[key] = array_torch[key][start_idx: end_idx, :].contiguous().detach().clone()
81
+ print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True)
82
+ store_tensor(save_tensor, args.output_dir, nv_name, params_dict)
83
+ elif nv_name == "language_model.embedding.word_embeddings.weight":
84
+ for key in list(array_torch.keys()):
85
+ save_tensor[key] = array_torch[key].transpose(0, 1).contiguous().detach().clone()
86
+ print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True)
87
+ store_tensor(save_tensor, args.output_dir, nv_name, params_dict)
88
+ store_tensor(save_tensor, args.output_dir, "word_embeddings.weight", params_dict)
89
+ else:
90
+ for key in list(array_torch.keys()):
91
+ save_tensor[key] = array_torch[key].detach().clone()
92
+ print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True)
93
+ store_tensor(save_tensor, args.output_dir, nv_name, params_dict)
94
+ del save_tensor
95
+ del array_torch
96
+
97
+ def split_encoder_layers(args, nv_name, g_name, prefix, params_dict):
98
+ array_torch = {}
99
+ g_name_path = os.path.join(args.google_ckpts, prefix + ".m." + g_name)
100
+ array_torch["m"] = get_torch_tensor(g_name_path, args.dtype)
101
+ g_name_path = os.path.join(args.google_ckpts, prefix + ".v." + g_name)
102
+ array_torch["v"] = get_torch_tensor(g_name_path, args.dtype)
103
+ g_name_path = os.path.join(args.google_ckpts, "mdl_vars." + g_name)
104
+ array_torch["w"] = get_torch_tensor(g_name_path, args.dtype)
105
+ print(F"G Name: {g_name}, shape: {array_torch['m'].shape}", flush=True)
106
+ save_tensor = {}
107
+ if (
108
+ nv_name == "mlp.dense_4h_to_h.bias"
109
+ or nv_name == "post_attention_layernorm.bias"
110
+ or nv_name == "post_attention_layernorm.weight"
111
+ or nv_name == "input_layernorm.bias"
112
+ or nv_name == "input_layernorm.weight"
113
+ or nv_name == "self_attention.dense.bias"
114
+ or nv_name == "mlp.dense_h_to_4h.bias"
115
+ or nv_name == "self_attention.dense.weight"
116
+ ):
117
+ print(F"1st Check: {nv_name}")
118
+ for lyr_num in range(args.num_layers):
119
+ print("layer_num=",lyr_num)
120
+ lyr_dir, lyr_name = get_layer_info(args.output_dir, lyr_num, nv_name)
121
+ for key in list(array_torch.keys()):
122
+ save_tensor[key] = array_torch[key][lyr_num].contiguous().detach().clone()
123
+ if lyr_num == (args.num_layers // 2):
124
+ print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True)
125
+ store_tensor(save_tensor, lyr_dir, lyr_name, params_dict)
126
+ save_tensor = {}
127
+
128
+ elif (
129
+ nv_name == "mlp.dense_h_to_4h.weight"
130
+ or nv_name == "mlp.dense_4h_to_h.weight"
131
+ ):
132
+ print(F"2nd Check: {nv_name}")
133
+ for lyr_num in range(args.num_layers):
134
+ print("layer_num=",lyr_num)
135
+ lyr_dir, lyr_name = get_layer_info(args.output_dir, lyr_num, nv_name)
136
+ for key in list(array_torch.keys()):
137
+ save_tensor[key] = array_torch[key][lyr_num].transpose(0, 1).contiguous().detach().clone()
138
+ #save_tensor = save_tensor.transpose(0, 1).clone()
139
+ if lyr_num == (args.num_layers // 2):
140
+ print(F"NV Name: {nv_name}, shape: {save_tensor['v'].shape}", flush=True)
141
+ store_tensor(save_tensor, lyr_dir, lyr_name, params_dict)
142
+ save_tensor = {}
143
+ elif nv_name == "self_attention.query_key_value.weight":
144
+ print(F"3nd Check: {nv_name}")
145
+ # nv shape [4608, 12288] => 4608 = 12 (heads) * 3 (qkv) * 128 (hidden_size / heads)
146
+ # google shape [96, 3, 12288, 96, 128]
147
+ for lyr_num in range(args.num_layers):
148
+ print("layer_num=",lyr_num)
149
+ lyr_dir, lyr_name = get_layer_info(args.output_dir, lyr_num, nv_name)
150
+ for key in list(array_torch.keys()):
151
+ save_tensor[key] = array_torch[key][lyr_num].permute(2, 0, 3, 1).contiguous().detach().clone()
152
+ #save_tensor = save_tensor.permute(2, 0, 3, 1).contiguous().clone()
153
+ if lyr_num == (args.num_layers // 2):
154
+ print(F"NV Name: {nv_name}, shape: {save_tensor['w'].shape}", flush=True)
155
+ store_tensor(save_tensor, lyr_dir, lyr_name, params_dict)
156
+ save_tensor = {}
157
+ elif nv_name == "self_attention.query_key_value.bias":
158
+ print(F"4rd Check: {nv_name}")
159
+ # nv shape [4608] => 4608 = 12 (heads) * 3 (qkv) * 128 (hidden_size / heads)
160
+ # google shape [96, 3, 96, 128]
161
+ for lyr_num in range(args.num_layers):
162
+ print("layer_num=",lyr_num)
163
+ lyr_dir, lyr_name = get_layer_info(args.output_dir, lyr_num, nv_name)
164
+ for key in list(array_torch.keys()):
165
+ save_tensor[key] = array_torch[key][lyr_num].permute(1, 0, 2).contiguous().detach().clone()
166
+ #save_tensor = save_tensor.permute(1, 0, 2).contiguous().clone()
167
+ if lyr_num == (args.num_layers // 2):
168
+ print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True)
169
+ store_tensor(save_tensor, lyr_dir, lyr_name, params_dict)
170
+ save_tensor = {}
171
+ else:
172
+ print(F"Not a valid layer name: {nv_name}", flush=True)
173
+ sys.exit()
174
+ del array_torch
175
+
176
+
177
+ def arrange_google_ckpts(args, prefix1, prefix2):
178
+
179
+ output_dir = args.output_dir
180
+ num_layers = args.num_layers
181
+
182
+ params_dict = None
183
+ if args.params_file is not None:
184
+ with open(args.params_file, 'r') as f:
185
+ params_dict = json.load(f)
186
+ else:
187
+ print(F"For Megatron-LM Optimizer to get the right optimizer params, provide params_file json", flush=True)
188
+
189
+ if args.dtype == "bf16":
190
+ args.dtype = torch.bfloat16
191
+ else:
192
+ args.dtype = torch.float
193
+
194
+ for lyr_num in range(num_layers):
195
+ pp_id_dir = os.path.join(output_dir, f"layer_{str(lyr_num)}")
196
+ os.makedirs(pp_id_dir, exist_ok=True)
197
+
198
+ #layers that are not part of encoder blocks.
199
+ torch.multiprocessing.set_start_method("spawn")
200
+ torch.multiprocessing.set_sharing_strategy("file_system")
201
+
202
+
203
+ nv_g_names_pairs = [
204
+ ("language_model.embedding.word_embeddings.weight", "params.lm.softmax.logits_ffn.linear.w"),
205
+ ("language_model.embedding.position_embeddings.weight", "params.lm.position_emb.emb_var"),
206
+ ("language_model.encoder.final_layernorm.weight", "params.lm.final_ln.scale"),
207
+ ("language_model.encoder.final_layernorm.bias", "params.lm.final_ln.bias"),
208
+ ]
209
+ pool = multiprocessing.Pool(args.pool)
210
+ pool.starmap(
211
+ copy_layers,
212
+ [
213
+ (
214
+ args,
215
+ nv_name,
216
+ g_name,
217
+ prefix1,
218
+ params_dict,
219
+ )
220
+ for (nv_name, g_name) in nv_g_names_pairs
221
+ ],
222
+ )
223
+ pool.close()
224
+ pool.join()
225
+
226
+
227
+
228
+ nv_g_names_pairs1 = [
229
+ ("mlp.dense_4h_to_h.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer2.bias.b"),
230
+ ]
231
+
232
+ pool = multiprocessing.Pool(args.pool)
233
+ pool.starmap(
234
+ split_encoder_layers,
235
+ [
236
+ (
237
+ args,
238
+ nv_name,
239
+ g_name,
240
+ prefix2,
241
+ params_dict,
242
+ )
243
+ for (nv_name, g_name) in nv_g_names_pairs1
244
+ ],
245
+ )
246
+ pool.close()
247
+ pool.join()
248
+
249
+ nv_g_names_pairs2 = [
250
+ ("post_attention_layernorm.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.layer_norm.bias"),
251
+ ("post_attention_layernorm.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.layer_norm.scale"),
252
+ ("input_layernorm.bias", "params.lm.transformer.repeat.sub.x_layers_0.layer_norm.bias"),
253
+ ("input_layernorm.weight", "params.lm.transformer.repeat.sub.x_layers_0.layer_norm.scale"),
254
+ ("self_attention.dense.bias", "params.lm.transformer.repeat.sub.x_layers_0.self_attention.post.b"),
255
+ ]
256
+
257
+ pool = multiprocessing.Pool(args.pool)
258
+ pool.starmap(
259
+ split_encoder_layers,
260
+ [
261
+ (
262
+ args,
263
+ nv_name,
264
+ g_name,
265
+ prefix2,
266
+ params_dict,
267
+ )
268
+ for (nv_name, g_name) in nv_g_names_pairs2
269
+ ],
270
+ )
271
+ pool.close()
272
+ pool.join()
273
+
274
+ nv_g_names_pairs3 = [
275
+ ("mlp.dense_h_to_4h.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1.bias.b"),
276
+ ]
277
+
278
+ pool = multiprocessing.Pool(args.pool)
279
+ pool.starmap(
280
+ split_encoder_layers,
281
+ [
282
+ (
283
+ args,
284
+ nv_name,
285
+ g_name,
286
+ prefix2,
287
+ params_dict,
288
+ )
289
+ for (nv_name, g_name) in nv_g_names_pairs3
290
+ ],
291
+ )
292
+ pool.close()
293
+ pool.join()
294
+
295
+ nv_g_names_pairs4 = [
296
+ ("mlp.dense_h_to_4h.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1.linear.w"),
297
+ ]
298
+
299
+ pool = multiprocessing.Pool(args.pool)
300
+ pool.starmap(
301
+ split_encoder_layers,
302
+ [
303
+ (
304
+ args,
305
+ nv_name,
306
+ g_name,
307
+ prefix2,
308
+ params_dict,
309
+ )
310
+ for (nv_name, g_name) in nv_g_names_pairs4
311
+ ],
312
+ )
313
+ pool.close()
314
+ pool.join()
315
+
316
+ nv_g_names_pairs5 = [
317
+ ("mlp.dense_4h_to_h.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer2.linear.w"),
318
+ ("self_attention.dense.weight", "params.lm.transformer.repeat.sub.x_layers_0.self_attention.post.w"),
319
+ ("self_attention.query_key_value.weight",
320
+ "params.lm.transformer.repeat.sub.x_layers_0.self_attention.combined_qkv.w"),
321
+ ("self_attention.query_key_value.bias",
322
+ "params.lm.transformer.repeat.sub.x_layers_0.self_attention.combined_qkv.b"),
323
+ ]
324
+
325
+ pool = multiprocessing.Pool(args.pool)
326
+ pool.starmap(
327
+ split_encoder_layers,
328
+ [
329
+ (
330
+ args,
331
+ nv_name,
332
+ g_name,
333
+ prefix2,
334
+ params_dict,
335
+ )
336
+ for (nv_name, g_name) in nv_g_names_pairs5
337
+ ],
338
+ )
339
+ pool.close()
340
+ pool.join()
341
+
342
+ exit(0)
343
+
344
+ nv_g_names_pairs = [
345
+ ("mlp.dense_4h_to_h.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer2.bias.b"),
346
+ ("post_attention_layernorm.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.layer_norm.bias"),
347
+ ("post_attention_layernorm.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.layer_norm.scale"),
348
+ ("input_layernorm.bias", "params.lm.transformer.repeat.sub.x_layers_0.layer_norm.bias"),
349
+ ("input_layernorm.weight", "params.lm.transformer.repeat.sub.x_layers_0.layer_norm.scale"),
350
+ ("self_attention.dense.bias", "params.lm.transformer.repeat.sub.x_layers_0.self_attention.post.b"),
351
+ ("mlp.dense_h_to_4h.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1.bias.b"),
352
+ ("mlp.dense_h_to_4h.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1.linear.w"),
353
+ ("mlp.dense_4h_to_h.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer2.linear.w"),
354
+ ("self_attention.dense.weight", "params.lm.transformer.repeat.sub.x_layers_0.self_attention.post.w"),
355
+ ("self_attention.query_key_value.weight",
356
+ "params.lm.transformer.repeat.sub.x_layers_0.self_attention.combined_qkv.w"),
357
+ ("self_attention.query_key_value.bias",
358
+ "params.lm.transformer.repeat.sub.x_layers_0.self_attention.combined_qkv.b"),
359
+ ]
360
+
361
+ pool = multiprocessing.Pool(args.pool)
362
+ pool.starmap(
363
+ split_encoder_layers,
364
+ [
365
+ (
366
+ args,
367
+ nv_name,
368
+ g_name,
369
+ prefix2,
370
+ params_dict,
371
+ )
372
+ for (nv_name, g_name) in nv_g_names_pairs
373
+ ],
374
+ )
375
+ pool.close()
376
+ pool.join()
377
+
378
+
379
+ if __name__ == "__main__":
380
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
381
+ parser.add_argument(
382
+ '--google_ckpts', "-gckpt",
383
+ type=str,
384
+ default='/workspace/data/checkpoint_00001300',
385
+ help='Google Checkpoint directory')
386
+ parser.add_argument(
387
+ '--output_dir', "-o",
388
+ type=str,
389
+ default='google_to_torch_output',
390
+ help='Output directory')
391
+ parser.add_argument(
392
+ '--dtype', "-dt",
393
+ type=str,
394
+ default="float",
395
+ help='datatype')
396
+ parser.add_argument(
397
+ '--num_layers', "-nl",
398
+ type=int,
399
+ default=96,
400
+ help='number of encoder layers')
401
+ parser.add_argument(
402
+ '--params_file', "-pl",
403
+ type=str,
404
+ default=None,
405
+ help='Json File for Param Groups')
406
+ parser.add_argument(
407
+ '--pool', "-p",
408
+ type=int,
409
+ default=4,
410
+ help='parallel processes')
411
+
412
+ args = parser.parse_args()
413
+ print("\n=============== Argument ===============")
414
+ for key in vars(args):
415
+ print(f"{key}: {vars(args)[key]}")
416
+ print("========================================")
417
+
418
+ param1 = "opt_states_0.no_prefix_2" #Assij
419
+ param2 = "opt_states_0.p#96#i-1_2"
420
+
421
+
422
+ start_time = datetime.now()
423
+ arrange_google_ckpts(args, param1, param2)
424
+ stop_time = datetime.now()
425
+ run_time = stop_time - start_time
426
+ print(f"[INFO] Spend {run_time} (h:m:s) to convert the model")
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_checkpoint.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict
3
+ import torch
4
+
5
+ ZERO_FILE_PREFIX = 'zero_pp_rank_'
6
+ LAYER_FILE_PREFIX = 'layer_'
7
+ MP_RANK_FILE_PREFIX = 'mp_rank_'
8
+ EMBEDDING_LAYER_INDEX = 0
9
+ FINAL_LAYER_NORM_INDEX = -1
10
+ ARGS_KEY = 'args'
11
+ ITERATION_KEY = 'iteration'
12
+ SEQUENTIAL_LAYERS = [
13
+ 'input_layernorm.weight', 'input_layernorm.bias',
14
+ 'self_attention.dense.bias',
15
+ 'post_attention_layernorm.weight', 'post_attention_layernorm.bias',
16
+ 'mlp.dense_4h_to_h.bias',
17
+ 'position_embeddings.weight'
18
+ ]
19
+
20
+ LAYER_CONCAT_DIM = {
21
+ 'self_attention.dense.weight': 1,
22
+ 'mlp.dense_4h_to_h.weight': 1
23
+ }
24
+
25
+ class DeepSpeedCheckpoint(object):
26
+ def __init__(self, dir, tp_degree=None, pp_degree=None, no_pp=False):
27
+ self.dir = dir
28
+ self.no_pp = no_pp
29
+ self.file_list = self._get_files(dir)
30
+ self.zero_files = self._get_files_with_prefix(self.file_list, ZERO_FILE_PREFIX)
31
+ self.layer_files = self._get_files_with_prefix(self.file_list, LAYER_FILE_PREFIX)
32
+ self.mp_rank_files = self._get_files_with_prefix(self.file_list, MP_RANK_FILE_PREFIX)
33
+ self.layer_keys = self._get_layer_keys()
34
+ self.layer_count = len(self.layer_keys)
35
+ if not self.no_pp:
36
+ self.original_tp_degree = len(self._get_files_with_prefix(self.layer_files, f'{LAYER_FILE_PREFIX}01'))
37
+ self.original_pp_degree = len(self.mp_rank_files) // self.original_tp_degree
38
+ else:
39
+ self.original_tp_degree = len(self.mp_rank_files)
40
+ self.original_pp_degree = 1
41
+ self.dp_degree = len(self.zero_files) // (self.original_pp_degree * self.original_tp_degree)
42
+ self.tp_degree = self.original_tp_degree if tp_degree is None else tp_degree
43
+ self.pp_degree = self.original_pp_degree if pp_degree is None else pp_degree
44
+ self.global_state = {}
45
+
46
+ self._sanity_check()
47
+ self.pp_to_transformer_map = self._build_pp_transformer_map()
48
+ self.transformer_file_map = self._build_transformer_file_map()
49
+ if not self.no_pp:
50
+ self.tp_to_embedding_map = self._build_tp_other_layer_map(EMBEDDING_LAYER_INDEX)
51
+ self.tp_to_final_norm_map = self._build_tp_other_layer_map(FINAL_LAYER_NORM_INDEX)
52
+ self._build_global_state()
53
+
54
+
55
+
56
+ def show_tp_embedding_map(self):
57
+ self._dump_mapping(self.tp_to_embedding_map, 'tp_to_embedding_layers')
58
+
59
+ def show_tp_final_norm_map(self):
60
+ self._dump_mapping(self.tp_to_final_norm_map, 'tp_to_final_norm_layers')
61
+
62
+ def show_pp_tranformer_map(self):
63
+ self._dump_mapping(self.pp_to_transformer_map, 'pp_to_tranformer_layers')
64
+
65
+ def show_transformer_file_map(self):
66
+ self._dump_mapping(self.transformer_file_map, 'rank_to_tranformer_files')
67
+
68
+ def _build_global_state(self):
69
+ sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
70
+ self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0)
71
+ self.global_state[ARGS_KEY] = sd.get(ARGS_KEY, None)
72
+
73
+ def get_iteration(self):
74
+ if not ITERATION_KEY in self.global_state:
75
+ sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
76
+ self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0)
77
+
78
+ return self.global_state[ITERATION_KEY]
79
+
80
+ def get_embedding_state(self, tp_index: int) -> Dict:
81
+ assert tp_index in self.tp_to_embedding_map.keys()
82
+ sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in self.tp_to_embedding_map[tp_index]]
83
+ sd = self._merge_state_dicts(sd_list)
84
+ return sd
85
+
86
+ def get_args(self):
87
+ if not ARGS_KEY in self.global_state:
88
+ sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
89
+ self.global_state[ARGS_KEY] = sd.get(ARGS_KEY, None)
90
+
91
+ return self.global_state[ARGS_KEY]
92
+
93
+
94
+ def get_transformer_state(self, tp_index: int, pp_index: int) -> list:
95
+ assert tp_index < self.tp_degree
96
+ assert pp_index < self.pp_degree
97
+ t_list = []
98
+ for fname_list in self.transformer_file_map[(tp_index, pp_index)]:
99
+ sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list]
100
+ sd = self._merge_state_dicts(sd_list)
101
+ t_list.append(sd)
102
+ return t_list
103
+
104
+ def get_final_norm_state(self, tp_index:int) -> Dict:
105
+ assert tp_index in self.tp_to_final_norm_map.keys()
106
+ sd = torch.load(self.tp_to_final_norm_map[tp_index][0], map_location=torch.device('cpu'))
107
+ return sd
108
+
109
+ def _build_tp_other_layer_map(self, layer_index:int):
110
+ assert layer_index < len(self.layer_files)
111
+ layer_files = self._get_files_with_prefix(self.layer_files, self.layer_keys[layer_index])
112
+ layer_file_partitions = self._partition_data(layer_files, self.tp_degree)
113
+ data_map = {i:flist for i, flist in enumerate(layer_file_partitions)}
114
+ return data_map
115
+
116
+ def _build_pp_transformer_map(self):
117
+ data_map = {}
118
+ transformer_layers = self.layer_keys[1:-1]
119
+ layers_per_pp = len(transformer_layers) // self.pp_degree
120
+ data_map = {i:transformer_layers[i*layers_per_pp:(i+1)*layers_per_pp] for i in range(0, self.pp_degree)}
121
+ return data_map
122
+
123
+ def _dump_mapping(self, data_map, map_tag = None):
124
+ if map_tag is not None:
125
+ print(f'Dump mapping: {map_tag}')
126
+ for k, v in data_map.items():
127
+ print(f'{k} = {v}')
128
+
129
+ def _build_transformer_file_map(self):
130
+ transformer_layer_keys = self.layer_keys[1:-1]
131
+ file_map = {}
132
+ layers_per_pp = len(transformer_layer_keys) // self.pp_degree
133
+ for key_index, layer_key in enumerate(transformer_layer_keys):
134
+ pp_index = key_index // layers_per_pp
135
+ layer_files = self._get_files_with_prefix(self.layer_files, layer_key)
136
+ layer_file_partitions = self._partition_data(layer_files, self.tp_degree)
137
+ for tp_index in range(self.tp_degree):
138
+ map_key = (tp_index, pp_index)
139
+ if not map_key in file_map.keys():
140
+ file_map[map_key] = []
141
+ file_map[map_key].append(layer_file_partitions[tp_index])
142
+
143
+ return file_map
144
+
145
+ def _sanity_check(self):
146
+ assert len(self.mp_rank_files) % self.tp_degree == 0
147
+ assert len(self.zero_files) % (self.pp_degree * self.tp_degree) == 0
148
+ if not self.no_pp:
149
+ assert len(self.layer_keys) > 2
150
+ assert (len(self.layer_keys) - 2) % self.pp_degree == 0
151
+
152
+ def _get_files_with_prefix(self, all_files, prefix):
153
+ file_list = []
154
+ for file_path in all_files:
155
+ _, fname = os.path.split(file_path)
156
+ if fname.startswith(prefix):
157
+ file_list.append(file_path)
158
+
159
+ return sorted(file_list)
160
+
161
+ def validate_files(self):
162
+ for file in self.file_list:
163
+ if not os.path.isfile(file):
164
+ print(f'Error: {file} is not existent')
165
+
166
+ def _get_files(self, dir):
167
+ file_list = []
168
+ for root, dirs, files in os.walk(dir):
169
+ for file in files:
170
+ file_list.append(os.path.join(root, file))
171
+ return file_list
172
+
173
+ def _get_layer_keys(self):
174
+ key_set = set()
175
+ key_len = len(LAYER_FILE_PREFIX) + 2
176
+ for file_path in self.layer_files:
177
+ _, fname = os.path.split(file_path)
178
+ key_set.add(fname[:key_len])
179
+ return sorted(list(key_set))
180
+
181
+ def _partition_data(self, data_list, num_partitions):
182
+ num_elems = len(data_list)
183
+ assert num_elems % num_partitions == 0
184
+ partition_size = num_elems // num_partitions
185
+ partitions_list = [data_list[i:i+partition_size] for i in range(0, num_elems, partition_size)]
186
+ return partitions_list
187
+
188
+ def _merge_state_dicts(self, sd_list):
189
+ merged_sd = {}
190
+ for key in sd_list[0].keys():
191
+ if not key in SEQUENTIAL_LAYERS:
192
+ cat_dim = LAYER_CONCAT_DIM.get(key, 0)
193
+ merged_sd[key] = torch.cat([sd[key] for sd in sd_list], dim=cat_dim)
194
+ else:
195
+ merged_sd[key] = sd_list[0][key]
196
+ return merged_sd
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_megatron.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import argparse
4
+ import os
5
+ import torch
6
+ from collections import OrderedDict
7
+ from .deepspeed_checkpoint import ARGS_KEY, DeepSpeedCheckpoint
8
+
9
+ MODEL_KEY = 'model'
10
+ ARGS_KEY = 'args'
11
+ LANGUGAGE_MODEL_KEY = 'language_model'
12
+ EMBEDDING_KEY = 'embedding'
13
+ ENCODER_KEY = 'encoder'
14
+ WORD_EMBEDDINGS_FOR_HEAD_KEY = 'word_embeddings_for_head'
15
+ WORD_EMBEDDINGS_KEY = 'word_embeddings'
16
+ FINAL_LAYER_NORM_KEY ='final_layernorm'
17
+ CHECKPOINT_VERSION_KEY = 'checkpoint_version'
18
+ CHECKPOINT_VERSION_VALUE = 3.0
19
+ ITERATION_KEY = 'iteration'
20
+
21
+ def parse_arguments():
22
+ parser = argparse.ArgumentParser()
23
+ parser.add_argument('--input_folder', default=None, type=str, help='Input DeepSpeed Checkpoint folder')
24
+ parser.add_argument('--output_folder', default=None, type=str, help='Output Megatron checkpoint folder')
25
+ parser.add_argument('--target_tp', default=1, type=int, help='Target TP degree')
26
+ parser.add_argument('--target_pp', default=1, type=int, help='Target PP degree')
27
+ parser.add_argument('--for_release', action='store_true', help='Convert for release purpose, reset some (progress) counters.')
28
+ args = parser.parse_args()
29
+ print(f'args = {args}')
30
+ return args
31
+
32
+
33
+ def _convert_ds_transformer_state(sd_list):
34
+ new_sd = OrderedDict()
35
+ for i, sd in enumerate(sd_list):
36
+ for key, value in sd.items():
37
+ new_key = f'layers.{i}.{key}'
38
+ new_sd[new_key] = value
39
+
40
+ return new_sd
41
+
42
+ def _create_checkpoint_paths(base_folder, iteration, tp_degree, pp_degree):
43
+ path_list = []
44
+ iter_folder = f'iter_{iteration:07d}'
45
+ for i in range(0, tp_degree):
46
+ path_list.append([])
47
+ for j in range(0, pp_degree):
48
+ rank_folder = f'mp_rank_{i:02d}' if pp_degree == 1 else f'mp_rank_{i:02d}_{j:03d}'
49
+ ckpt_path = os.path.join(rank_folder, 'model_optim_rng.pt')
50
+ path_list[i].append(os.path.join(base_folder, iter_folder, ckpt_path))
51
+
52
+ return path_list
53
+
54
+
55
+ def _create_megatron_dict():
56
+ language_model_dict = {
57
+ EMBEDDING_KEY: {},
58
+ ENCODER_KEY: {}
59
+ }
60
+ megatron_dict = {
61
+ MODEL_KEY: {LANGUGAGE_MODEL_KEY: language_model_dict},
62
+ CHECKPOINT_VERSION_KEY: CHECKPOINT_VERSION_VALUE
63
+ }
64
+ return megatron_dict
65
+
66
+
67
+ def _save_checkpoint(file_path, chkpt_sd):
68
+ dir, _ = os.path.split(file_path)
69
+ os.makedirs(dir, exist_ok=True)
70
+ torch.save(chkpt_sd, file_path)
71
+
72
+
73
+ def _renest_sd(sd):
74
+ new_sd = OrderedDict()
75
+ for key, value in sd.items():
76
+ a, b = key.split('.')
77
+ new_sd[a] = {b: value}
78
+ return new_sd
79
+
80
+
81
+ def _create_rank_checkpoint(ds_checkpoint, checkpoint_path, tp_index, pp_index, for_release=False):
82
+ meg_encoder_sd = OrderedDict()
83
+ meg_embedding_sd = OrderedDict()
84
+ meg_embedding_for_head_sd = OrderedDict()
85
+
86
+ transformer_sd = ds_checkpoint.get_transformer_state(tp_index, pp_index)
87
+ meg_encoder_sd.update(_convert_ds_transformer_state(transformer_sd))
88
+
89
+ if pp_index in [0, ds_checkpoint.pp_degree - 1]:
90
+ embedding_sd = ds_checkpoint.get_embedding_state(tp_index)
91
+ nested_embedding_sd = _renest_sd(embedding_sd)
92
+ if pp_index == 0:
93
+ meg_embedding_sd.update(nested_embedding_sd)
94
+
95
+ if pp_index == ds_checkpoint.pp_degree -1:
96
+ for key, value in embedding_sd.items():
97
+ if key.startswith(WORD_EMBEDDINGS_KEY):
98
+ fields = key.split('.')
99
+ new_fields = fields[1:]
100
+ new_key = '.'.join(new_fields)
101
+ meg_embedding_for_head_sd[new_key] = value
102
+
103
+ final_norm_sd = ds_checkpoint.get_final_norm_state(tp_index)
104
+ new_final_norm_sd = {f'{FINAL_LAYER_NORM_KEY}.{key}': value for key, value in final_norm_sd.items()}
105
+ meg_encoder_sd.update(new_final_norm_sd)
106
+
107
+ checkpoint_sd = _create_megatron_dict()
108
+
109
+ iteration = ds_checkpoint.get_iteration()
110
+ checkpoint_sd[ITERATION_KEY] = iteration
111
+ if pp_index == 0:
112
+ checkpoint_sd[MODEL_KEY][LANGUGAGE_MODEL_KEY][EMBEDDING_KEY] = meg_embedding_sd
113
+ checkpoint_sd[MODEL_KEY][LANGUGAGE_MODEL_KEY][ENCODER_KEY] = meg_encoder_sd
114
+ if pp_index == ds_checkpoint.pp_degree -1:
115
+ checkpoint_sd[MODEL_KEY][WORD_EMBEDDINGS_FOR_HEAD_KEY] = meg_embedding_for_head_sd
116
+
117
+ checkpoint_sd[ARGS_KEY] = ds_checkpoint.get_args()
118
+ # Adjust specific fields
119
+ checkpoint_sd[ARGS_KEY].tensor_model_parallel_size = ds_checkpoint.tp_degree
120
+ checkpoint_sd[ARGS_KEY].pipeline_model_parallel_size = ds_checkpoint.pp_degree
121
+ if for_release:
122
+ checkpoint_sd[ARGS_KEY].consumed_train_samples = 0
123
+ checkpoint_sd[ARGS_KEY].consumed_valid_samples = 0
124
+
125
+ return checkpoint_sd
126
+
127
+
128
+ def _create_latest_file(base_folder, iteration):
129
+ file_path = os.path.join(base_folder, 'latest_checkpointed_iteration.txt')
130
+ os.makedirs(base_folder, exist_ok=True)
131
+ with open(file_path, 'w') as f:
132
+ f.write(str(iteration))
133
+
134
+ def main():
135
+ print(f'Convert DeepSpeed Checkpoint to Megatron Checkpoint')
136
+
137
+ args = parse_arguments()
138
+ print(f'Converting DeepSpeed checkpoint in {args.input_folder} to Megatron checkpoint in {args.output_folder}')
139
+
140
+ ds_checkpoint = DeepSpeedCheckpoint(args.input_folder, args.target_tp, args.target_pp)
141
+ iteration = ds_checkpoint.get_iteration()
142
+ _create_latest_file(args.output_folder, iteration)
143
+ checkpoint_paths = _create_checkpoint_paths(args.output_folder, iteration, ds_checkpoint.tp_degree, ds_checkpoint.pp_degree)
144
+ for i in range(0, ds_checkpoint.tp_degree):
145
+ for j in range(0, ds_checkpoint.pp_degree):
146
+ sd = _create_rank_checkpoint(ds_checkpoint, i, j, args.for_release)
147
+ _save_checkpoint(checkpoint_paths[i][j], sd)
148
+
149
+ if __name__ == "__main__":
150
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_transformers.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import os
4
+ import torch
5
+ import json
6
+
7
+ from deepspeed_checkpoint import DeepSpeedCheckpoint
8
+ from deepspeed_to_megatron import _create_rank_checkpoint, parse_arguments
9
+
10
+ # the import was tested to work with this version
11
+ # https://github.com/huggingface/transformers/commit/0af901e83 if it diverges we may consider
12
+ # copying that version here instead
13
+ from transformers.models.megatron_gpt2.convert_megatron_gpt2_checkpoint import convert_megatron_checkpoint
14
+ from transformers import GPT2Config
15
+
16
+ def main():
17
+
18
+ # this first part comes mainly from deepspeed_to_megatron.main
19
+ args = parse_arguments()
20
+ print(f'Converting DeepSpeed checkpoint in {args.input_folder} to HF Transformers checkpoint in {args.output_folder}')
21
+
22
+ ds_checkpoint = DeepSpeedCheckpoint(args.input_folder, args.target_tp, args.target_pp)
23
+ iteration = ds_checkpoint.get_iteration()
24
+ input_state_dict = _create_rank_checkpoint(ds_checkpoint, 0, 0, args.for_release)
25
+
26
+ # the 2nd part comes from transformers.models.megatron_gpt2.convert_megatron_gpt2_checkpoint.main
27
+ # Spell out all parameters in case the defaults change.
28
+ config = GPT2Config(
29
+ vocab_size=50257,
30
+ n_positions=1024,
31
+ n_ctx=1024,
32
+ n_embd=1024,
33
+ n_layer=24,
34
+ n_head=16,
35
+ n_inner=4096,
36
+ activation_function="gelu", # used to be "gelu_new" in earlier versions
37
+ resid_pdrop=0.1,
38
+ embd_pdrop=0.1,
39
+ attn_pdrop=0.1,
40
+ layer_norm_epsilon=1e-5,
41
+ initializer_range=0.02,
42
+ summary_type="cls_index",
43
+ summary_use_proj=True,
44
+ summary_activation=None,
45
+ summary_proj_to_labels=True,
46
+ summary_first_dropout=0.1,
47
+ scale_attn_weights=True,
48
+ gradient_checkpointing=False,
49
+ use_cache=True,
50
+ bos_token_id=50256,
51
+ eos_token_id=50256,
52
+ )
53
+
54
+ # Convert.
55
+ print("Converting to HF Checkpoint")
56
+ output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config)
57
+
58
+ basename = args.output_folder
59
+ os.makedirs(basename, exist_ok=True)
60
+
61
+ # Print the structure of converted state dict.
62
+ #if args.print_checkpoint_structure:
63
+ # recursive_print(None, output_state_dict)
64
+
65
+ # Store the config to file.
66
+ output_config_file = os.path.join(basename, "config.json")
67
+ output_config = config.to_dict()
68
+ output_config["architectures"] = ["GPT2LMHeadModel"]
69
+ output_config["model_type"] = "gpt2"
70
+ print(f'Saving config to "{output_config_file}"')
71
+ with open(output_config_file, "w") as f:
72
+ json.dump(output_config, f)
73
+
74
+ # Store the state_dict to file.
75
+ output_checkpoint_file = os.path.join(basename, "pytorch_model.bin")
76
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
77
+ torch.save(output_state_dict, output_checkpoint_file)
78
+
79
+ print("Now add tokenizer files and upload to the hub")
80
+
81
+
82
+ if __name__ == "__main__":
83
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/ds_to_universal.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from collections import OrderedDict
4
+ from functools import partial
5
+ import argparse
6
+ import glob
7
+ import itertools
8
+ import multiprocessing
9
+ import os
10
+ import re
11
+ import shutil
12
+ import torch
13
+ import tqdm
14
+
15
+ from deepspeed.checkpoint import DeepSpeedCheckpoint
16
+
17
+ MODEL_KEY = 'model'
18
+ ARGS_KEY = 'args'
19
+ LANGUAGE_MODEL_KEY = 'language_model'
20
+ EMBEDDING_KEY = 'embedding'
21
+ ENCODER_KEY = 'encoder'
22
+ WORD_EMBEDDINGS_FOR_HEAD_KEY = 'word_embeddings_for_head'
23
+ WORD_EMBEDDINGS_KEY = 'word_embeddings'
24
+ FINAL_LAYER_NORM_KEY = 'final_layernorm'
25
+ CHECKPOINT_VERSION_KEY = 'checkpoint_version'
26
+ CHECKPOINT_VERSION_VALUE = 3.0
27
+ ITERATION_KEY = 'iteration'
28
+ ORIGINAL_VOCAB_SIZE = 'original_vocab_size'
29
+
30
+
31
+ def parse_arguments():
32
+ parser = argparse.ArgumentParser()
33
+ parser.add_argument(
34
+ '--input_folder',
35
+ type=str,
36
+ help='Input DeepSpeed Checkpoint folder')
37
+ parser.add_argument(
38
+ '--output_folder',
39
+ type=str,
40
+ help='Output Megatron checkpoint folder')
41
+ parser.add_argument(
42
+ '--num_extract_workers',
43
+ default=4,
44
+ type=int,
45
+ help='How many parallel processes to extract zero shards')
46
+ parser.add_argument(
47
+ '--num_merge_workers',
48
+ default=2,
49
+ type=int,
50
+ help='How many parallel processes to merge tp slices '
51
+ '(more memory intensive, use much fewer than --num_extract_workers))')
52
+
53
+ args = parser.parse_args()
54
+ print(f'args = {args}')
55
+ return args
56
+
57
+
58
+ def _convert_ds_transformer_state(sd_list):
59
+ new_sd = OrderedDict()
60
+ for i, sd in enumerate(sd_list):
61
+ for key, value in sd.items():
62
+ new_key = f'layers.{i}.{key}'
63
+ new_sd[new_key] = value
64
+
65
+ return new_sd
66
+
67
+
68
+ def _create_megatron_dict():
69
+ language_model_dict = {EMBEDDING_KEY: {}, ENCODER_KEY: {}}
70
+ megatron_dict = {
71
+ MODEL_KEY: {
72
+ LANGUAGE_MODEL_KEY: language_model_dict
73
+ },
74
+ CHECKPOINT_VERSION_KEY: CHECKPOINT_VERSION_VALUE
75
+ }
76
+ return megatron_dict
77
+
78
+
79
+ def _save_checkpoint(file_path, chkpt_sd):
80
+ ckp_dir, _ = os.path.split(file_path)
81
+ os.makedirs(ckp_dir, exist_ok=True)
82
+ torch.save(chkpt_sd, file_path)
83
+
84
+
85
+ def extract_zero_shards(out_path, ds_checkpoint, indices_3d):
86
+ pp_index, tp_index, dp_index = indices_3d
87
+ sd = ds_checkpoint.get_zero_checkpoint_state(
88
+ pp_index=pp_index,
89
+ tp_index=tp_index,
90
+ dp_index=dp_index)
91
+
92
+ optim_sd = sd["optimizer_state_dict"]
93
+ param_slice_mappings = optim_sd["param_slice_mappings"]
94
+
95
+ # dict
96
+ state_groups = optim_sd["base_optimizer_state"]["state"]
97
+
98
+ # list
99
+ fp32_groups = optim_sd["single_partition_of_fp32_groups"]
100
+ param_groups_cnt = len(state_groups)
101
+
102
+ for param_group_id in range(param_groups_cnt):
103
+ flat_state = dict(
104
+ exp_avg=state_groups[param_group_id]["exp_avg"],
105
+ exp_avg_sq=state_groups[param_group_id]["exp_avg_sq"],
106
+ fp32=fp32_groups[param_group_id],
107
+ )
108
+
109
+ for name, fragment_mapping in param_slice_mappings[param_group_id].items():
110
+ if "tied_modules.embed" in name and pp_index > 0:
111
+ # Skip word_embeddings.weight that is replicated in first and last pp stages
112
+ # Skip position_embeddings.weight that is only used in first pp stage
113
+ continue
114
+
115
+ for state_key in flat_state.keys():
116
+ dump_param_fragment(out_path, tp_index, dp_index, state_key,
117
+ flat_state[state_key], name,
118
+ fragment_mapping.start,
119
+ fragment_mapping.numel)
120
+
121
+
122
+ def dump_param_fragment(out_path, tp_index, dp_index, state_name,
123
+ state_flat_tensor, param_name, offset, numel):
124
+ param_base_path = os.path.join(out_path, param_name, str(tp_index))
125
+ os.makedirs(param_base_path, exist_ok=True)
126
+
127
+ counter = f"{dp_index:0>2d}"
128
+ path = os.path.join(param_base_path, f"{state_name}.{counter}")
129
+
130
+ # clone to force tensor storage to ignore views
131
+ t = state_flat_tensor.narrow(0, offset, numel).clone()
132
+ _save_checkpoint(path, t)
133
+
134
+
135
+ def _merge_zero_shards(param_base_path, state, tp_degree, slice_shape):
136
+ slices = []
137
+ for tp_index in range(tp_degree):
138
+ prefix_path = os.path.join(param_base_path, str(tp_index), f"{state}")
139
+ paths = sorted(list(glob.glob(f"{prefix_path}.*")))
140
+ shards = [torch.load(p) for p in paths]
141
+ param_slice = torch.cat(shards, dim=0).reshape(slice_shape)
142
+ slices.append(param_slice)
143
+
144
+ return slices
145
+
146
+
147
+ def _strip_vocab_padding(ds_checkpoint, padded_vocab_tensor):
148
+ checkpoint_info = ds_checkpoint.get_checkpoint_info()
149
+ return padded_vocab_tensor.narrow(0, 0, checkpoint_info[ORIGINAL_VOCAB_SIZE])
150
+
151
+
152
+ WEIGHTS_TO_AVERAGE_PATTERNS = [
153
+ r"tied_modules.embed.word_embeddings.norm.weight",
154
+ r"tied_modules.embed.word_embeddings.norm.bias",
155
+ r"tied_modules.embed.position_embeddings.weight",
156
+ r"\d+.input_layernorm.weight",
157
+ r"\d+.input_layernorm.bias",
158
+ r"\d+.post_attention_layernorm.weight",
159
+ r"\d+.post_attention_layernorm.bias",
160
+ r"\d+.self_attention.dense.bias",
161
+ r"\d+.attention.dense.bias",
162
+ r"\d+.mlp.dense_4h_to_h.bias",
163
+ r"\d+.weight",
164
+ r"\d+.bias",
165
+ ]
166
+
167
+ WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN = [
168
+ "dense_4h_to_h.weight",
169
+ "self_attention.dense.weight",
170
+ "attention.dense.weight",
171
+ ]
172
+
173
+
174
+ def _get_vocab_divisibility_padding_tensor(ds_checkpoint, padded_vocab_tensor):
175
+ checkpoint_info = ds_checkpoint.get_checkpoint_info()
176
+ if checkpoint_info and padded_vocab_tensor.shape[0] > checkpoint_info[ORIGINAL_VOCAB_SIZE]:
177
+ return padded_vocab_tensor[-1]
178
+ else:
179
+ return torch.zeros(padded_vocab_tensor.shape[1])
180
+
181
+
182
+ def _all_same_tensor(arr):
183
+ assert len(arr) > 0
184
+ if len(arr) == 1:
185
+ return True
186
+ res = all([x.eq(arr[0]).all().item() for x in arr[1:]])
187
+ return res
188
+
189
+
190
+ def merge_tp_slices(ds_checkpoint, out_path, slice_dir, tp_degree, name_and_shape):
191
+ name, shape = name_and_shape
192
+ slice_base_path = os.path.join(slice_dir, name)
193
+ param_base_path = os.path.join(out_path, name)
194
+
195
+ for state in ("fp32", "exp_avg", "exp_avg_sq"):
196
+ slices = _merge_zero_shards(slice_base_path, state, tp_degree, shape)
197
+ final_path = os.path.join(param_base_path, f"{state}.pt")
198
+
199
+ ckpt_dict = {}
200
+ if any(re.match(pattern, name) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS):
201
+ assert _all_same_tensor(slices), f'Checkpoint misalignment detected for parameter: {name}'
202
+ param = slices[0]
203
+ else:
204
+ cat_dim = 1 if any(text in name for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
205
+ param = torch.cat(slices, dim=cat_dim)
206
+ ckpt_dict['cat_dim'] = cat_dim
207
+
208
+ if "word_embeddings.weight" in name:
209
+ # strip padding
210
+ # param = _strip_vocab_padding(ds_checkpoint, param)
211
+ ckpt_dict['vocab_divisibility_padding_tensor'] = \
212
+ _get_vocab_divisibility_padding_tensor(ds_checkpoint, param)
213
+
214
+ ckpt_dict['param'] = param
215
+ _save_checkpoint(final_path, ckpt_dict)
216
+
217
+
218
+ def _get_chunks(l, n):
219
+ for i in range(0, len(l), n):
220
+ yield l[i:i + n]
221
+
222
+
223
+ def _do_parallel_work(do_work, work_chunks, num_workers):
224
+ pool = multiprocessing.Pool(num_workers)
225
+ for batch in tqdm.tqdm(work_chunks):
226
+ pool.map(do_work, batch)
227
+ pool.close()
228
+ pool.join()
229
+
230
+
231
+ def _extract_zero_shard_files(args, ds_checkpoint, temp_dir):
232
+ _3d_range_list = list(itertools.product(range(ds_checkpoint.pp_degree),
233
+ range(ds_checkpoint.tp_degree),
234
+ range(ds_checkpoint.dp_degree)))
235
+ work_chunks = list(_get_chunks(_3d_range_list, args.num_extract_workers))
236
+
237
+ do_work = partial(extract_zero_shards, temp_dir, ds_checkpoint)
238
+ _do_parallel_work(do_work, work_chunks, args.num_extract_workers)
239
+
240
+
241
+ def _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir):
242
+ work_chunks = list(_get_chunks(list(slice_shapes.items()), args.num_merge_workers))
243
+ zero_output_folder = os.path.join(args.output_folder, "zero")
244
+ do_work = partial(merge_tp_slices, ds_checkpoint, zero_output_folder, temp_dir, ds_checkpoint.tp_degree)
245
+ _do_parallel_work(do_work, work_chunks, args.num_merge_workers)
246
+
247
+
248
+ def main():
249
+ print(f'Convert DeepSpeed Checkpoint to Universal Checkpoint')
250
+
251
+ args = parse_arguments()
252
+ print(
253
+ f'Converting DeepSpeed checkpoint in {args.input_folder} '
254
+ f'to Universal checkpoint in {args.output_folder}'
255
+ )
256
+
257
+ ds_checkpoint = DeepSpeedCheckpoint(args.input_folder)
258
+
259
+ slice_shapes = []
260
+ for mp_rank_file in ds_checkpoint.mp_rank_files:
261
+ mp_sd = torch.load(mp_rank_file, map_location=torch.device('cpu'))
262
+ slice_shapes += mp_sd["param_shapes"]
263
+
264
+ # fix back to normal flat dict, merge duplicates for tp>1
265
+ slice_shapes = dict((k, v) for d in slice_shapes for k, v in d.items())
266
+ temp_dir = os.path.join(args.output_folder, 'tmp')
267
+
268
+ print('*** 1. Extracting ZeRO fragments')
269
+ _extract_zero_shard_files(args, ds_checkpoint, temp_dir)
270
+
271
+ print('*** 2. Merging slices')
272
+ _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir)
273
+
274
+ shutil.rmtree(temp_dir, ignore_errors=True)
275
+
276
+ # Copy mp* files into output folder
277
+ for f in glob.glob(os.path.join(args.input_folder, 'mp*')):
278
+ shutil.copy2(f, args.output_folder)
279
+
280
+ # Update latest to output folder
281
+ checkpoint_root_folder, step_folder = os.path.split(args.output_folder)
282
+ latest_file = os.path.join(checkpoint_root_folder, 'latest_universal')
283
+ with open(latest_file, "w") as f:
284
+ f.write(step_folder)
285
+
286
+ print('*** Done!')
287
+
288
+
289
+ if __name__ == "__main__":
290
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merge.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import multiprocessing
17
+ from datetime import datetime
18
+ from pathlib import Path
19
+ import os
20
+ import copy
21
+ import numpy as np
22
+ import torch # pytype: disable=import-error
23
+ import pickle
24
+
25
+ def save_numpy(optim_state, lyr_name, saved_dir):
26
+ for opt_key, opt_val in optim_state["state"].items():
27
+ np.save((saved_dir / F"{lyr_name}.{opt_key}.npy").as_posix(), opt_val.float().cpu().numpy().astype(np.float32))
28
+ np.save((saved_dir / F"{lyr_name}.fp32_from_fp16_params.npy").as_posix(), optim_state["fp32_from_fp16_params"].float().cpu().numpy().astype(np.float32))
29
+ with open((saved_dir / F"{lyr_name}.param.pickle").as_posix(), 'wb') as handle:
30
+ pickle.dump(optim_state["param_groups"], handle, protocol=pickle.HIGHEST_PROTOCOL)
31
+
32
+
33
+ # This tool is used to support the new megatron model trained by pipeline parallel + tensor parallel
34
+ def merge(
35
+ key, pp_id, saved_dir, model_args, optim_states, ckpt_ver, is_save_numpy
36
+ ):
37
+ #i, pipeline_para_rank, saved_dir, factor, key, model_args, transformer_model_list, ckpt_ver
38
+ saved_dir = Path(saved_dir)
39
+ if key.find("layers.") != -1:
40
+ # key name: language_model.encoder.layers
41
+ layer_index = (int)(key[30 : key.find(".", 30)])
42
+ saved_key = key.replace(
43
+ "layers.%d." % layer_index,
44
+ "layers.%d."
45
+ % (layer_index + pp_id * model_args.num_layers // model_args.pipeline_model_parallel_size),
46
+ )
47
+ abs_layer_index = "%d" % (layer_index + pp_id * model_args.num_layers // model_args.pipeline_model_parallel_size)
48
+ abs_layer_dir = "layer_" + abs_layer_index
49
+ saved_dir = saved_dir / abs_layer_dir
50
+ else:
51
+ saved_key = key
52
+ #major_device = transformer_model_list[0][key].device
53
+ #print(saved_key)
54
+ optim_state = copy.deepcopy(optim_states[key])
55
+ del optim_state['group_index']
56
+ del optim_state['index_within_group']
57
+
58
+ if (
59
+ key.find("input_layernorm.weight") != -1
60
+ or key.find("input_layernorm.bias") != -1
61
+ or key.find("attention.dense.bias") != -1
62
+ or key.find("post_attention_layernorm.weight") != -1
63
+ or key.find("post_attention_layernorm.bias") != -1
64
+ or key.find("mlp.dense_4h_to_h.bias") != -1
65
+ or key.find("final_layernorm.weight") != -1
66
+ or key.find("final_layernorm.bias") != -1
67
+ ):
68
+ # shared weights, only need to convert the weights from single tp instance
69
+ for opt_key, opt_val in optim_state["state"].items():
70
+ optim_state['state'][opt_key] = opt_val[0]
71
+ #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}")
72
+ optim_state["fp32_from_fp16_params"] = optim_state["fp32_from_fp16_params"][0]
73
+ #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}")
74
+ elif key.find("attention.dense.weight") != -1:
75
+ state_key = list(optim_state["state"].keys())[0]
76
+ head_num = model_args.num_attention_heads // model_args.tensor_model_parallel_size
77
+ hidden_dim = int(optim_state["state"][state_key][0].shape[0])
78
+ dim_per_head = int(optim_state["state"][state_key][0].shape[1] / head_num)
79
+ for opt_key, opt_val in optim_state["state"].items():
80
+ vals = []
81
+ for k in range(model_args.tensor_model_parallel_size):
82
+ val = opt_val[k]
83
+ val = val.reshape(hidden_dim, head_num, dim_per_head)
84
+ vals.append(val)
85
+ optim_state['state'][opt_key] = torch.cat(vals, dim=1)
86
+ #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}")
87
+ vals = []
88
+ for k in range(model_args.tensor_model_parallel_size):
89
+ val = optim_state["fp32_from_fp16_params"][k]
90
+ val = val.reshape(hidden_dim, head_num, dim_per_head)
91
+ vals.append(val)
92
+ optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=1)
93
+ #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}")
94
+ elif key.find("mlp.dense_4h_to_h.weight") != -1:
95
+ for opt_key, opt_val in optim_state["state"].items():
96
+ vals = []
97
+ for k in range(model_args.tensor_model_parallel_size):
98
+ vals.append(opt_val[k])
99
+ optim_state['state'][opt_key] = torch.cat(vals, dim=-1)
100
+ #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}")
101
+ vals = []
102
+ for k in range(model_args.tensor_model_parallel_size):
103
+ vals.append(optim_state["fp32_from_fp16_params"][k])
104
+ optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=-1)
105
+ #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}")
106
+ elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
107
+ for opt_key, opt_val in optim_state["state"].items():
108
+ vals = []
109
+ for k in range(model_args.tensor_model_parallel_size):
110
+ vals.append(opt_val[k])
111
+ optim_state['state'][opt_key] = torch.cat(vals, dim=0)
112
+ #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}")
113
+ vals = []
114
+ for k in range(model_args.tensor_model_parallel_size):
115
+ vals.append(optim_state["fp32_from_fp16_params"][k])
116
+ optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=0)
117
+ #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}")
118
+ elif key.find("attention.query_key_value.bias") != -1:
119
+ state_key = list(optim_state["state"].keys())[0]
120
+ num_splits = 3
121
+ head_num = model_args.num_attention_heads // model_args.tensor_model_parallel_size
122
+ size_per_head = int(optim_state["state"][state_key][0].shape[0] / num_splits / head_num)
123
+ for opt_key, opt_val in optim_state["state"].items():
124
+ vals = []
125
+ for k in range(model_args.tensor_model_parallel_size):
126
+ val = opt_val[k]
127
+ val = val.reshape(head_num, num_splits, size_per_head)
128
+ vals.append(val)
129
+ optim_state['state'][opt_key] = torch.cat(vals, dim=0)
130
+ #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}")
131
+ vals = []
132
+ for k in range(model_args.tensor_model_parallel_size):
133
+ val = optim_state["fp32_from_fp16_params"][k]
134
+ val = val.reshape(head_num, num_splits, size_per_head)
135
+ vals.append(val)
136
+ optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=0)
137
+ #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}")
138
+ elif key.find("attention.query_key_value.weight") != -1:
139
+ state_key = list(optim_state["state"].keys())[0]
140
+ num_splits = 3
141
+ hidden_dim = int(optim_state["state"][state_key][0].shape[1])
142
+ head_num = model_args.num_attention_heads // model_args.tensor_model_parallel_size
143
+ size_per_head = int(optim_state["state"][state_key][0].shape[0] / num_splits / head_num)
144
+ for opt_key, opt_val in optim_state["state"].items():
145
+ vals = []
146
+ for k in range(model_args.tensor_model_parallel_size):
147
+ val = opt_val[k]
148
+ val = val.reshape(head_num, num_splits, size_per_head, hidden_dim)
149
+ vals.append(val)
150
+ optim_state['state'][opt_key] = torch.cat(vals, dim=0)
151
+ #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}")
152
+ vals = []
153
+ for k in range(model_args.tensor_model_parallel_size):
154
+ val = optim_state["fp32_from_fp16_params"][k]
155
+ val = val.reshape(head_num, num_splits, size_per_head, hidden_dim)
156
+ vals.append(val)
157
+ optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=0)
158
+ #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}")
159
+ else:
160
+ print(f"[ERROR] cannot find key '{key}'")
161
+ exit(1)
162
+
163
+ #print(F"{saved_key}: {tmp.shape}")
164
+ if is_save_numpy:
165
+ save_numpy(optim_state, saved_key, saved_dir)
166
+ else:
167
+ saved_path = saved_dir / f"{saved_key}.pt"
168
+ torch.save(optim_state, saved_path)
169
+
170
+ def merge_checkpoint(args):
171
+ saved_dir = Path(args.saved_dir) / "gpu" / "optimizer"
172
+ saved_dir.mkdir(parents=True, exist_ok=True)
173
+
174
+ prefix = Path(args.in_dir)
175
+ ckpt_name = "model_optim_rng.pt"
176
+
177
+ # load position_embedding from rank 0
178
+ if (prefix / "mp_rank_00").is_dir():
179
+ model_00 = torch.load((prefix / "mp_rank_00" / ckpt_name).as_posix())
180
+ elif (prefix / "mp_rank_00_000").is_dir():
181
+ model_00 = torch.load((prefix / "mp_rank_00_000" / ckpt_name).as_posix())
182
+ else:
183
+ print(f"[ERROR] Cannot find checkpoint in {prefix}.")
184
+ exit(1)
185
+
186
+ model_args = model_00["args"]
187
+ with open((saved_dir / "args.txt").as_posix(), "w") as f:
188
+ for k, v in vars(model_args).items():
189
+ f.write(f"{k}:{v} \n")
190
+
191
+ del model_00
192
+
193
+ tp_size = model_args.tensor_model_parallel_size
194
+
195
+ for i in range(model_args.num_layers):
196
+ pp_id_dir = (saved_dir / f"layer_{i}").as_posix()
197
+ os.makedirs(pp_id_dir, exist_ok=True)
198
+
199
+ torch.multiprocessing.set_start_method("spawn")
200
+ torch.multiprocessing.set_sharing_strategy("file_system")
201
+ pool = multiprocessing.Pool(args.pool)
202
+ w_e_list = []
203
+ w_e_h_list = []
204
+ #for pp_id in [2]:
205
+ for pp_id in range(model_args.pipeline_model_parallel_size):
206
+ if model_args.pipeline_model_parallel_size == 1:
207
+ layer_rank_num = ""
208
+ else:
209
+ layer_rank_num = f"_{pp_id:03d}"
210
+ optim_states = {}
211
+ for tp_id in range(tp_size):
212
+ #if tp_id == 0:
213
+ print(F"Loading ckpt file from: mp_rank_{tp_id:02d}{layer_rank_num}")
214
+ m = torch.load((prefix / f"mp_rank_{tp_id:02d}{layer_rank_num}" / ckpt_name).as_posix(), map_location="cpu")
215
+ #m["model"]["language_model"]["encoder"] = {key: value for key, value in m["model"]["language_model"]["encoder"].items() if ("attention.dense.weight" in key) or ("mlp.dense_4h_to_h.weight" in key)}
216
+ #print(m["model"]["language_model"]["encoder"].keys())
217
+ target_optim_map_orig = m['optimizer_model_map']
218
+ target_optim_map = copy.deepcopy(target_optim_map_orig)
219
+ substr = "module.module."
220
+ for key, value in target_optim_map.items():
221
+ if value.startswith(substr):
222
+ target_optim_map[key] = value[len(substr):]
223
+ #del target_optim_map_orig
224
+ #for key, value in m["optimizer_model_map"].items():
225
+ for key, value in target_optim_map.items():
226
+ if value in optim_states:
227
+ for opt_key, opt_val in m["optimizer"]["optimizer"]["state"][key].items():
228
+ optim_states[value]["state"][opt_key].append(opt_val)
229
+ group_index = optim_states[value]["group_index"]
230
+ index_within_group = optim_states[value]["index_within_group"]
231
+ optim_states[value]["fp32_from_fp16_params"].append(m["optimizer"]["fp32_from_fp16_params"][group_index][index_within_group])
232
+ else:
233
+ optim_states[value] = {}
234
+ optim_states[value]["state"] = {}
235
+ for opt_key, opt_val in m["optimizer"]["optimizer"]["state"][key].items():
236
+ optim_states[value]["state"][opt_key] = []
237
+ optim_states[value]["state"][opt_key].append(opt_val)
238
+ # Find index param group
239
+ group_index = 0
240
+ index_within_group = 0
241
+ for index, group in enumerate(m["optimizer"]["optimizer"]["param_groups"]):
242
+ if key in group["params"]:
243
+ group_index = index
244
+ index_within_group = group["params"].index(key)
245
+ optim_states[value]["group_index"] = group_index
246
+ optim_states[value]["index_within_group"] = index_within_group
247
+ optim_states[value]["param_groups"] = copy.deepcopy(group)
248
+ if "params" in optim_states[value]["param_groups"]:
249
+ del optim_states[value]["param_groups"]["params"]
250
+ break
251
+ if "group_index" not in optim_states[value]:
252
+ print(F"couldn't find index for layer: {value}")
253
+ exit(1)
254
+ optim_states[value]["fp32_from_fp16_params"] = []
255
+ optim_states[value]["fp32_from_fp16_params"].append(m["optimizer"]["fp32_from_fp16_params"][group_index][index_within_group])
256
+
257
+ if pp_id == 0:
258
+ lyr_name = 'language_model.embedding.word_embeddings.weight'
259
+ optim_state = copy.deepcopy(optim_states[lyr_name])
260
+ for opt_key, opt_val in optim_state["state"].items():
261
+ optim_state['state'][opt_key] = torch.cat(opt_val, dim=0)
262
+ #print(F"lyr_name: {lyr_name} key: {opt_key}: {optim_state['state'][opt_key].shape}")
263
+ optim_state["fp32_from_fp16_params"] = torch.cat(optim_state["fp32_from_fp16_params"], dim=0)
264
+ #print(F"lyr_name: {lyr_name} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}")
265
+ del optim_state['group_index']
266
+ del optim_state['index_within_group']
267
+ if args.save_numpy:
268
+ save_numpy(optim_state, lyr_name, saved_dir)
269
+ else:
270
+ torch.save(optim_state, (saved_dir / F"{lyr_name}.pt").as_posix())
271
+ del optim_states[lyr_name]
272
+
273
+ lyr_name = 'language_model.embedding.position_embeddings.weight'
274
+ optim_state = copy.deepcopy(optim_states[lyr_name])
275
+ for opt_key, opt_val in optim_state["state"].items():
276
+ optim_state['state'][opt_key] = opt_val[0]
277
+ #print(F"lyr_name: {lyr_name} key: {opt_key}: {optim_state['state'][opt_key].shape}")
278
+ optim_state["fp32_from_fp16_params"] = optim_state["fp32_from_fp16_params"][0]
279
+ #print(F"lyr_name: {lyr_name} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}")
280
+ del optim_state['group_index']
281
+ del optim_state['index_within_group']
282
+ if args.save_numpy:
283
+ save_numpy(optim_state, lyr_name, saved_dir)
284
+ else:
285
+ torch.save(optim_state, (saved_dir / F"{lyr_name}.pt").as_posix())
286
+ del optim_states[lyr_name]
287
+
288
+ if pp_id == (model_args.pipeline_model_parallel_size - 1) and model_args.pipeline_model_parallel_size > 1:
289
+ lyr_name = 'word_embeddings.weight'
290
+ optim_state = copy.deepcopy(optim_states[lyr_name])
291
+ for opt_key, opt_val in optim_state["state"].items():
292
+ optim_state['state'][opt_key] = torch.cat(opt_val, dim=0)
293
+ #print(F"lyr_name: {lyr_name} key: {opt_key}: {optim_state['state'][opt_key].shape}")
294
+ optim_state["fp32_from_fp16_params"] = torch.cat(optim_state["fp32_from_fp16_params"], dim=0)
295
+ #print(F"lyr_name: {lyr_name} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}")
296
+ del optim_state['group_index']
297
+ del optim_state['index_within_group']
298
+ if args.save_numpy:
299
+ save_numpy(optim_state, lyr_name, saved_dir)
300
+ else:
301
+ torch.save(optim_state, (saved_dir / F"{lyr_name}.pt").as_posix())
302
+ del optim_states[lyr_name]
303
+
304
+ pool.starmap(
305
+ merge,
306
+ [
307
+ (
308
+ k,
309
+ pp_id,
310
+ saved_dir,
311
+ model_args,
312
+ optim_states,
313
+ m["checkpoint_version"],
314
+ args.save_numpy
315
+ )
316
+ for (k, _) in optim_states.items()
317
+ ],
318
+ )
319
+
320
+ pool.close()
321
+ pool.join()
322
+
323
+
324
+ if __name__ == "__main__":
325
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
326
+ parser.add_argument("-saved_dir", "-o", type=str, help="output directory for saving converted checkpoints", required=True)
327
+ parser.add_argument("-in_dir", "-i", type=str, help="input checkpoint directory path", required=True)
328
+ parser.add_argument("-save_numpy", "-npy", action='store_true', help="save output as numpy array", default=False)
329
+ parser.add_argument("-pool", "-pl", type=int, help="Process pool", default=4)
330
+ args = parser.parse_args()
331
+ print("\n=============== Argument ===============")
332
+ for key in vars(args):
333
+ print(f"{key}: {vars(args)[key]}")
334
+ print("========================================")
335
+
336
+ start_time = datetime.now()
337
+ merge_checkpoint(args)
338
+ stop_time = datetime.now()
339
+ run_time = stop_time - start_time
340
+ print(f"[INFO] Spent {run_time} (h:m:s) to convert the model")
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merged_to_ds_universal_convert.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (c) 2023 Habana Labs Ltd. All rights reserved.
3
+ ###############################################################################
4
+ import argparse
5
+ import multiprocessing
6
+ from datetime import datetime
7
+ from pathlib import Path
8
+ import os
9
+ import copy
10
+ import numpy as np
11
+ import torch # pytype: disable=import-error
12
+ import pickle
13
+ import glob
14
+ import re
15
+
16
+
17
+ WEIGHTS_TO_AVERAGE_PATTERNS = [
18
+ r"tied_modules.embed.word_embeddings.norm.weight",
19
+ r"tied_modules.embed.word_embeddings.norm.bias",
20
+ r"tied_modules.embed.position_embeddings.weight",
21
+ r"\d+.input_layernorm.weight",
22
+ r"\d+.input_layernorm.bias",
23
+ r"\d+.post_attention_layernorm.weight",
24
+ r"\d+.post_attention_layernorm.bias",
25
+ r"\d+.self_attention.dense.bias",
26
+ r"\d+.attention.dense.bias",
27
+ r"\d+.mlp.dense_4h_to_h.bias",
28
+ r"\d+.weight",
29
+ r"\d+.bias",
30
+ ]
31
+
32
+ WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN = [
33
+ "dense_4h_to_h.weight",
34
+ "self_attention.dense.weight",
35
+ "attention.dense.weight",
36
+ ]
37
+ def _get_vocab_divisibility_padding_tensor(padded_vocab_tensor):
38
+ return padded_vocab_tensor[-1]
39
+
40
+ def _save_checkpoint(file_path, chkpt_sd):
41
+ ckp_dir, _ = os.path.split(file_path)
42
+ os.makedirs(ckp_dir, exist_ok=True)
43
+ torch.save(chkpt_sd, file_path)
44
+
45
+ def tensor_convert(tensor_name_mapping, tensor_index):
46
+ fp32_ckpt = {}
47
+ exp_avg_ckpt = {}
48
+ exp_avg_sq_ckpt = {}
49
+
50
+ tensor_name = tensor_name_mapping[tensor_index]
51
+ megatron_optimizer_states = torch.load(tensor_name[1])
52
+ if 'self_attention.query_key_value' in tensor_name[1]:
53
+ dim = megatron_optimizer_states['fp32_from_fp16_params'].size()[len(megatron_optimizer_states['fp32_from_fp16_params'].size())-1]
54
+ fp32_ckpt['param'] = megatron_optimizer_states['fp32_from_fp16_params'].view(-1,dim)
55
+ exp_avg_ckpt['param'] = megatron_optimizer_states['state']['exp_avg'].view(-1,dim)
56
+ exp_avg_sq_ckpt['param'] = megatron_optimizer_states['state']['exp_avg_sq'].view(-1,dim)
57
+
58
+ cat_dim = 0
59
+ fp32_ckpt['cat_dim'] = cat_dim
60
+ exp_avg_ckpt['cat_dim'] = cat_dim
61
+ exp_avg_sq_ckpt['cat_dim'] = cat_dim
62
+ else:
63
+ fp32_ckpt['param'] = megatron_optimizer_states['fp32_from_fp16_params']
64
+ exp_avg_ckpt['param'] = megatron_optimizer_states['state']['exp_avg']
65
+ exp_avg_sq_ckpt['param'] = megatron_optimizer_states['state']['exp_avg_sq']
66
+
67
+ ds_tensor_name = os.path.split(tensor_name[0])[-1]
68
+ if not any(re.match(pattern, ds_tensor_name) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS):
69
+ cat_dim = 1 if any(text in ds_tensor_name for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
70
+ if '.bias' not in ds_tensor_name:
71
+ fp32_ckpt['cat_dim'] = cat_dim
72
+ exp_avg_ckpt['cat_dim'] = cat_dim
73
+ exp_avg_sq_ckpt['cat_dim'] = cat_dim
74
+
75
+ if 'word_embeddings.weight' in tensor_name[1]:
76
+ fp32_ckpt['vocab_divisibility_padding_tensor'] = \
77
+ _get_vocab_divisibility_padding_tensor(fp32_ckpt['param'])
78
+ exp_avg_ckpt['vocab_divisibility_padding_tensor'] = \
79
+ _get_vocab_divisibility_padding_tensor(exp_avg_ckpt['param'])
80
+ exp_avg_sq_ckpt['vocab_divisibility_padding_tensor'] = \
81
+ _get_vocab_divisibility_padding_tensor(exp_avg_sq_ckpt['param'])
82
+
83
+
84
+ fp32_weight_file_path = os.path.join(tensor_name[0], 'fp32.pt')
85
+ _save_checkpoint(fp32_weight_file_path, fp32_ckpt)
86
+
87
+ exp_avg_file_path = os.path.join(tensor_name[0], 'exp_avg.pt')
88
+ _save_checkpoint(exp_avg_file_path, exp_avg_ckpt)
89
+
90
+ exp_avg_sq_file_path = os.path.join(tensor_name[0], 'exp_avg_sq.pt')
91
+ _save_checkpoint(exp_avg_sq_file_path, exp_avg_sq_ckpt)
92
+
93
+ def mp_rank_files_info_adjustment(file,megatron_state_dict,same_config, ds_universal_checkpoints_path):
94
+ ds_state_dict = torch.load(file, map_location=torch.device('cpu'))
95
+ ds_state_dict['lr_scheduler']['num_steps'] = megatron_state_dict['opt_param_scheduler']['num_steps']
96
+ ds_state_dict['lr_scheduler']['warmup_steps'] = megatron_state_dict['opt_param_scheduler']['warmup_steps']
97
+ ds_state_dict['lr_scheduler']['decay_steps'] = megatron_state_dict['opt_param_scheduler']['decay_steps']
98
+ ds_state_dict['iteration'] = megatron_state_dict['iteration']
99
+ ds_state_dict['global_steps'] = megatron_state_dict['iteration']
100
+ ds_state_dict['global_samples'] = megatron_state_dict['args'].consumed_train_samples
101
+ ds_state_dict['tokens'] = megatron_state_dict['args'].consumed_train_samples* megatron_state_dict['args'].seq_length
102
+ ds_state_dict['args'].consumed_train_samples = megatron_state_dict['args'].consumed_train_samples
103
+ ds_state_dict['args'].consumed_valid_samples = megatron_state_dict['args'].consumed_valid_samples
104
+ ds_state_dict['args'].consumed_train_tokens = ds_state_dict['tokens']
105
+
106
+ # if both megatron-lm and megatron-deepspeed have the same TP, PP configuration, we copy the rng states from megatron-lm to megatron-deepspeed
107
+ if same_config == 'True':
108
+ ds_state_dict['random_rng_state'] = megatron_state_dict['rng_state'][0]['random_rng_state']
109
+ ds_state_dict['np_rng_state'] = megatron_state_dict['rng_state'][0]['np_rng_state']
110
+ ds_state_dict['torch_rng_state'] = megatron_state_dict['rng_state'][0]['torch_rng_state']
111
+ ds_state_dict['cuda_rng_state'] = megatron_state_dict['rng_state'][0]['cuda_rng_state']
112
+ ds_state_dict['rng_tracker_states'] = megatron_state_dict['rng_state'][0]['rng_tracker_states']
113
+
114
+ file = os.path.join(ds_universal_checkpoints_path,os.path.split(file)[1])
115
+ torch.save(ds_state_dict,file)
116
+
117
+
118
+ def mp_rank_files_info_adjustment_parallel_processing(ds_mp_rank_files_dir,ds_universal_checkpoints_path,megatron_lm_non_merged_input_dir, \
119
+ model_parallel_same_config,pp_index,tp_index,tp_rank):
120
+
121
+ state_dict = torch.load(os.path.join(megatron_lm_non_merged_input_dir,
122
+ 'mp_rank_{:02d}_{:03d}'.format(
123
+ tp_index,
124
+ pp_index),
125
+ 'model_optim_rng.pt'), map_location=torch.device('cpu'))
126
+
127
+ # Need to update according to how the mapping is done when tp_rank * pp_rank > 9
128
+ mp_rank_file_index = '0' + str(pp_index * tp_rank + tp_index)
129
+ mp_rank_file = os.path.join(ds_mp_rank_files_dir, 'mp_rank_' + mp_rank_file_index + '_model_states.pt')
130
+ mp_rank_files_info_adjustment(mp_rank_file, state_dict, model_parallel_same_config, ds_universal_checkpoints_path)
131
+
132
+
133
+
134
+ def ds_universal_convert(args):
135
+
136
+ torch.multiprocessing.set_start_method("spawn")
137
+ torch.multiprocessing.set_sharing_strategy("file_system")
138
+ pool = multiprocessing.Pool(args.pool)
139
+
140
+ ds_universal_checkpoints_path = args.ds_universal_dir
141
+ latest_file = os.path.join(ds_universal_checkpoints_path, 'latest_universal')
142
+ os.makedirs(ds_universal_checkpoints_path, exist_ok=True)
143
+ with open(latest_file, "w") as f:
144
+ f.write(str(args.iteration))
145
+
146
+ ds_universal_checkpoints_path = os.path.join(ds_universal_checkpoints_path, str(args.iteration))
147
+ os.makedirs(ds_universal_checkpoints_path, exist_ok=True)
148
+
149
+ if (args.update_only_mp_rank_files == False):
150
+ layers_per_model_pipeline_slice = args.num_layers // args.pp_rank
151
+ # tensor_name_mapping maps the ds tensor directory name to the megatron-lm merged optimizer tensor path
152
+ if args.pp_rank == 1:
153
+ tensor_name_mapping = [
154
+ [os.path.join(ds_universal_checkpoints_path, 'zero', 'tied_modules.embed.position_embeddings.weight'),os.path.join(args.megatron_lm_merged_input_dir, 'language_model.embedding.position_embeddings.weight.pt')], \
155
+ [os.path.join(ds_universal_checkpoints_path, 'zero', 'tied_modules.embed.word_embeddings.weight'), os.path.join(args.megatron_lm_merged_input_dir, 'language_model.embedding.word_embeddings.weight.pt')],
156
+ [os.path.join(ds_universal_checkpoints_path, 'zero', str(4 + args.num_layers) + '.bias'), os.path.join(args.megatron_lm_merged_input_dir, 'language_model.encoder.final_layernorm.bias.pt')],
157
+ [os.path.join(ds_universal_checkpoints_path, 'zero', str(4 + args.num_layers) + '.weight'), os.path.join(args.megatron_lm_merged_input_dir, 'language_model.encoder.final_layernorm.weight.pt')]
158
+ ]
159
+ else:
160
+ tensor_name_mapping = [
161
+ [os.path.join(ds_universal_checkpoints_path, 'zero','tied_modules.embed.position_embeddings.weight'), os.path.join(args.megatron_lm_merged_input_dir,'language_model.embedding.position_embeddings.weight.pt')], \
162
+ [os.path.join(ds_universal_checkpoints_path, 'zero','tied_modules.embed.word_embeddings.weight'), os.path.join(args.megatron_lm_merged_input_dir,'language_model.embedding.word_embeddings.weight.pt')],
163
+ [os.path.join(ds_universal_checkpoints_path, 'zero','word_embeddings.weight'),os.path.join(args.megatron_lm_merged_input_dir,'word_embeddings.weight.pt')], \
164
+ [os.path.join(ds_universal_checkpoints_path, 'zero',str(4+args.num_layers)+'.bias'), os.path.join(args.megatron_lm_merged_input_dir,'language_model.encoder.final_layernorm.bias.pt')],
165
+ [os.path.join(ds_universal_checkpoints_path, 'zero',str(4+args.num_layers)+'.weight'),os.path.join(args.megatron_lm_merged_input_dir,'language_model.encoder.final_layernorm.weight.pt')]
166
+ ]
167
+
168
+ layer_name_mapping = [
169
+ ['.attention.dense.bias', 'language_model.encoder.layers.LAYER_INDEX.self_attention.dense.bias'], \
170
+ ['.attention.dense.weight','language_model.encoder.layers.LAYER_INDEX.self_attention.dense.weight'], \
171
+ ['.attention.query_key_value.bias', 'language_model.encoder.layers.LAYER_INDEX.self_attention.query_key_value.bias'], \
172
+ ['.attention.query_key_value.weight', 'language_model.encoder.layers.LAYER_INDEX.self_attention.query_key_value.weight'], \
173
+ ['.input_layernorm.bias', 'language_model.encoder.layers.LAYER_INDEX.input_layernorm.bias'], \
174
+ ['.input_layernorm.weight', 'language_model.encoder.layers.LAYER_INDEX.input_layernorm.weight'], \
175
+ ['.mlp.dense_4h_to_h.bias', 'language_model.encoder.layers.LAYER_INDEX.mlp.dense_4h_to_h.bias'], \
176
+ ['.mlp.dense_4h_to_h.weight', 'language_model.encoder.layers.LAYER_INDEX.mlp.dense_4h_to_h.weight'], \
177
+ ['.mlp.dense_h_to_4h.bias', 'language_model.encoder.layers.LAYER_INDEX.mlp.dense_h_to_4h.bias'], \
178
+ ['.mlp.dense_h_to_4h.weight', 'language_model.encoder.layers.LAYER_INDEX.mlp.dense_h_to_4h.weight'], \
179
+ ['.post_attention_layernorm.bias', 'language_model.encoder.layers.LAYER_INDEX.post_attention_layernorm.bias'], \
180
+ ['.post_attention_layernorm.weight', 'language_model.encoder.layers.LAYER_INDEX.post_attention_layernorm.weight']
181
+ ]
182
+
183
+ for layer_index in np.arange(args.num_layers):
184
+ for layer_tensor_index in np.arange(len(layer_name_mapping)):
185
+
186
+ ds_tensor_name_map = os.path.join(ds_universal_checkpoints_path,'zero',str(3+layer_index)+layer_name_mapping[layer_tensor_index][0])
187
+ megatron_tensor_name_map = os.path.join(args.megatron_lm_merged_input_dir,'layer_'+str(layer_index),layer_name_mapping[layer_tensor_index][1].replace('LAYER_INDEX',str(layer_index))+'.pt')
188
+ tensor_name_map = [ds_tensor_name_map, megatron_tensor_name_map]
189
+ tensor_name_mapping.append(tensor_name_map)
190
+
191
+
192
+ # go over all the tensors in tensor_name_mapping and convert them from megatron optimizer format to ds_universal
193
+
194
+ #for tensors_index in np.arange(len(tensor_name_mapping)):
195
+ # tensor_convert(tensor_name_mapping,tensors_index)
196
+ # print('finished converting tensor {}'.format(tensors_index))
197
+
198
+ # multiprocessing of the tensors in tensor_name_mapping and converting them from megatron optimizer format to ds_universal
199
+
200
+ pool.starmap(
201
+ tensor_convert,
202
+ [
203
+ (
204
+ tensor_name_mapping,
205
+ k
206
+ )
207
+ for k in np.arange(len(tensor_name_mapping))
208
+ ],
209
+ )
210
+
211
+ pool.close()
212
+ pool.join()
213
+
214
+
215
+ # updating the deepspeed ds_mp_rank files according to megatron non merged ( original megatron checkpoint structure files)
216
+
217
+ if args.model_parallel_same_config == 'True':
218
+ for pp_index in np.arange(args.pp_rank):
219
+ for tp_index in np.arange(args.tp_rank):
220
+ if args.pp_rank > 1:
221
+ file_name = os.path.join(args.megatron_lm_non_merged_input_dir,'mp_rank_{:02d}_{:03d}'.format(tp_index,pp_index),'model_optim_rng.pt')
222
+ else:
223
+ file_name = os.path.join(args.megatron_lm_non_merged_input_dir,'mp_rank_{:02d}'.format(tp_index),'model_optim_rng.pt')
224
+
225
+ state_dict = torch.load(file_name, map_location=torch.device('cpu'))
226
+
227
+ # Need to update according to how the mapping is done when tp_rank * pp_rank > 9
228
+ mp_rank_file_index = '0'+str(pp_index*args.tp_rank+tp_index)
229
+ mp_rank_file = os.path.join(args.ds_mp_rank_files_dir,'mp_rank_'+mp_rank_file_index+'_model_states.pt')
230
+ mp_rank_files_info_adjustment(mp_rank_file, state_dict, args.model_parallel_same_config,
231
+ ds_universal_checkpoints_path)
232
+
233
+
234
+
235
+ model_parallel_matrix_index = []
236
+ for pp_index in np.arange(args.pp_rank):
237
+ for tp_index in np.arange(args.tp_rank):
238
+ model_parallel_matrix_index.append([pp_index, tp_index])
239
+
240
+
241
+ pool = multiprocessing.Pool(args.pool)
242
+
243
+ pool.starmap(
244
+ mp_rank_files_info_adjustment_parallel_processing,
245
+ [
246
+ (
247
+ args.ds_mp_rank_files_dir,
248
+ ds_universal_checkpoints_path,
249
+ args.megatron_lm_non_merged_input_dir,
250
+ args.model_parallel_same_config,
251
+ pp_index,
252
+ tp_index,
253
+ args.tp_rank
254
+ )
255
+ for (pp_index, tp_index) in model_parallel_matrix_index
256
+ ],
257
+ )
258
+
259
+ pool.close()
260
+ pool.join()
261
+
262
+ else:
263
+ mp_rank_files = glob.glob(os.path.join(args.ds_mp_rank_files_dir, 'mp_rank_*.pt'))
264
+ if args.megatron_lm_non_merged_input_dir is not None:
265
+ file_name = glob.glob(os.path.join(args.megatron_lm_non_merged_input_dir,'*'))[0]+'/model_optim_rng.pt'
266
+ megatron_state_dict = torch.load(file_name, map_location=torch.device('cpu'))
267
+
268
+ else:
269
+ class My_args:
270
+ def __init__(self, consumed_train_samples=args.iteration * args.global_batch_size, seq_length=args.seq_length, consumed_valid_samples=0):
271
+ self.consumed_train_samples = consumed_train_samples
272
+ self.seq_length = seq_length
273
+ self.consumed_valid_samples = consumed_valid_samples
274
+
275
+ megatron_state_dict = { 'opt_param_scheduler': args.iteration, 'iteration': args.iteration, 'args' : None }
276
+ megatron_state_dict['opt_param_scheduler'] = {'num_steps': args.iteration*args.global_batch_size, 'warmup_steps': args.lr_warmup_samples , 'decay_steps': args.lr_decay_samples}
277
+ megatron_state_dict['args']= My_args(consumed_train_samples=args.iteration * args.global_batch_size,
278
+ seq_length=args.seq_length)
279
+
280
+ for mp_rank_file in mp_rank_files:
281
+ print(f"Adjusting {mp_rank_file=}", flush=True)
282
+ mp_rank_files_info_adjustment(mp_rank_file, megatron_state_dict, args.model_parallel_same_config, ds_universal_checkpoints_path)
283
+ # Deleting redundant mp_rank files, in case number of devices was decreased
284
+ universal_mp_rank_files = glob.glob(os.path.join(ds_universal_checkpoints_path, 'mp_rank_*.pt'))
285
+ for universal_mp_rank_file in universal_mp_rank_files:
286
+ if os.path.basename(universal_mp_rank_file) not in [os.path.basename(file_elem) for file_elem in mp_rank_files]:
287
+ print(f"Deleting old redundant mp_rank file {universal_mp_rank_file=}", flush=True)
288
+ os.remove(universal_mp_rank_file)
289
+
290
+
291
+
292
+ if __name__ == "__main__":
293
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
294
+ parser.add_argument("--ds-universal-dir", "--o", type=str, help="output directory for saving the converted ds_universal checkpoints", required=True)
295
+ parser.add_argument("--megatron-lm-merged-input-dir", "--merged-input", type=str, help="megatron-lm merged optimizer input checkpoint directory path", required=False)
296
+ parser.add_argument("--megatron-lm-non-merged-input-dir", "--non-merged-input", type=str, help="megatron-lm non merged checkpoint directory path", default = None)
297
+ parser.add_argument("--ds-mp-rank-files-dir", "--ds", type=str, help="deepspeed mp_rank_files directory path", required=True)
298
+ parser.add_argument("--tp-rank", "--tp",type=int, help="deepseed tp_rank configuration", default=8,required=True)
299
+ parser.add_argument("--pp-rank", "--pp",type=int, help="deepseed tp_rank configuration", default=8,required=True)
300
+ parser.add_argument("--num-layers", "--nl", type=int, help="GPT-3 number of layers", default=96)
301
+ parser.add_argument("--iteration", "--iter", type=int, help="#iteration ", default=None, required=True)
302
+ parser.add_argument("--global-batch-size", "--gbs", type=int, help="load ckpt global batch size", default=1536)
303
+ parser.add_argument("--seq_length", "--sl", type=int, help="Sequence length", default=2048)
304
+ parser.add_argument("--lr-warmup-samples", "--lws", type=int, help="lr warmup samples", default=407040)
305
+ parser.add_argument("--lr-decay-samples", "--lds", type=int, help="lr decay samples", default=166809600)
306
+ parser.add_argument("--model-parallel-same-config", "--same_config", help="if megatron-lm and megatron deepspeed tp, pp configuration is the same", default=True)
307
+ parser.add_argument("--pool", "-pl", type=int, help="Process pool", default=4)
308
+ parser.add_argument("--update-only-mp-rank-files", "--update", type=bool, help="if set will update only the mp_rank files w/o converting the nvidia-merged format to ds universal ", default=False, required=False)
309
+
310
+ args = parser.parse_args()
311
+ print("\n=============== Argument ===============")
312
+ for key in vars(args):
313
+ print(f"{key}: {vars(args)[key]}")
314
+ print("========================================")
315
+
316
+ print("Converting megatron merged optimizer checkpoint to deepspeed universal format checkpoint")
317
+ start_time = datetime.now()
318
+ ds_universal_convert(args)
319
+ stop_time = datetime.now()
320
+ run_time = stop_time - start_time
321
+ print(f"[INFO] Spent {run_time} (h:m:s) to convert the merged optimizer to deepspeed universal format")
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/verify_checkpoint_non_tp_consistency.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import tqdm
3
+ import argparse
4
+ from dataclasses import dataclass
5
+ import torch
6
+ from deepspeed.checkpoint import DeepSpeedCheckpoint
7
+
8
+
9
+ def parse_arguments():
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument('--folder', default=None, type=str, help='DeepSpeed Checkpoint folder')
12
+ parser.add_argument('--model_type', default='GPT', type=str, help='Type of the model',
13
+ choices=['GPT', 'BLOOM', 'LLAMA'])
14
+ parser.add_argument('--sequence-parallel', action='store_true', help='Is sequence parallel enabled')
15
+ args = parser.parse_args()
16
+ print(f'args = {args}')
17
+ return args
18
+
19
+
20
+ def show_3d(ds_checkpoint):
21
+ src_3d = ds_checkpoint.zero_checkpoint.src_3d
22
+ dp, tp, pp = src_3d.dp_degree, src_3d.tp_degree, src_3d.pp_degree
23
+ print(f'3D configuration: DP={dp} TP={tp} PP={pp}')
24
+
25
+
26
+ def get_layer_patterns_for_non_sharded(model_type):
27
+ if model_type == 'GPT':
28
+ return [
29
+ 'position_embeddings.weight',
30
+ 'input_layernorm.weight',
31
+ 'input_layernorm.bias',
32
+ 'self_attention.dense.bias',
33
+ "attention.dense.bias",
34
+ 'post_attention_layernorm.weight',
35
+ 'post_attention_layernorm.bias',
36
+ 'mlp.dense_4h_to_h.bias',
37
+ 'weight',
38
+ 'bias'
39
+ ]
40
+
41
+ if model_type == 'BLOOM':
42
+ return [
43
+ 'input_layernorm.weight',
44
+ 'input_layernorm.bias',
45
+ 'self_attention.dense.bias',
46
+ "attention.dense.bias",
47
+ 'post_attention_layernorm.weight',
48
+ 'post_attention_layernorm.bias',
49
+ 'mlp.dense_4h_to_h.bias',
50
+ 'weight',
51
+ 'bias'
52
+ ]
53
+ if model_type == 'LLAMA':
54
+ return [
55
+ 'input_layernorm.weight',
56
+ 'input_layernorm.bias',
57
+ 'self_attention.dense.bias',
58
+ "attention.dense.bias",
59
+ 'post_attention_layernorm.weight',
60
+ 'post_attention_layernorm.bias',
61
+ 'mlp.dense_4h_to_h.bias',
62
+ 'final_rmsnorm.weight',
63
+ ]
64
+
65
+
66
+ def get_zero_patterns_for_non_sharded(model_type, sequence_parallel):
67
+ if model_type == 'GPT':
68
+ patterns = [
69
+ r"tied_modules.embed.word_embeddings.norm.weight",
70
+ r"tied_modules.embed.word_embeddings.norm.bias",
71
+ r"tied_modules.embed.position_embeddings.weight",
72
+ r"\d+.self_attention.dense.bias",
73
+ r"\d+.attention.dense.bias",
74
+ r"\d+.mlp.dense_4h_to_h.bias",
75
+ ]
76
+ if not sequence_parallel:
77
+ patterns = patterns + [
78
+ r"\d+.input_layernorm.weight",
79
+ r"\d+.input_layernorm.bias",
80
+ r"\d+.post_attention_layernorm.weight",
81
+ r"\d+.post_attention_layernorm.bias",
82
+ r"\d+.weight",
83
+ r"\d+.bias",
84
+ ]
85
+ return patterns
86
+ if model_type == 'BLOOM':
87
+ patterns = [
88
+ r"tied_modules.embed.word_embeddings.norm.weight",
89
+ r"tied_modules.embed.word_embeddings.norm.bias",
90
+ r"\d+.self_attention.dense.bias",
91
+ r"\d+.attention.dense.bias",
92
+ r"\d+.mlp.dense_4h_to_h.bias",
93
+ ]
94
+ if not sequence_parallel:
95
+ patterns = patterns + [
96
+ r"\d+.input_layernorm.weight",
97
+ r"\d+.input_layernorm.bias",
98
+ r"\d+.post_attention_layernorm.weight",
99
+ r"\d+.post_attention_layernorm.bias",
100
+ r"\d+.weight",
101
+ r"\d+.bias",
102
+ ]
103
+ return patterns
104
+ if model_type == 'LLAMA':
105
+ patterns = [
106
+ r"tied_modules.embed.word_embeddings.norm.weight",
107
+ r"tied_modules.embed.word_embeddings.norm.bias",
108
+ r"\d+.self_attention.dense.bias",
109
+ r"\d+.attention.dense.bias",
110
+ r"\d+.mlp.dense_4h_to_h.bias",
111
+ ]
112
+ if not sequence_parallel:
113
+ patterns = patterns + [
114
+ r"\d+.input_layernorm.weight",
115
+ r"\d+.input_layernorm.bias",
116
+ r"\d+.post_attention_layernorm.weight",
117
+ r"\d+.post_attention_layernorm.bias",
118
+ r"\d+.final_rmsnorm.weight",
119
+ ]
120
+ return patterns
121
+
122
+
123
+
124
+ @dataclass
125
+ class ParamInfo:
126
+ pp: int
127
+ tp: int
128
+ dp: int
129
+ data: torch.Tensor
130
+ numel: int
131
+
132
+
133
+ def get_zero_pp_stage_non_sharded_params(ds_checkpoint, model_type, sequence_parallel, pp_stage, dp_stage):
134
+ patterns = get_zero_patterns_for_non_sharded(model_type, sequence_parallel)
135
+ params = {}
136
+ for tp_stage in tqdm.tqdm(range(ds_checkpoint.tp_degree), desc='bf16 zero files'):
137
+ sd = ds_checkpoint.get_zero_checkpoint_state(
138
+ pp_index=pp_stage,
139
+ tp_index=tp_stage,
140
+ dp_index=dp_stage)
141
+
142
+ optim_sd = sd["optimizer_state_dict"]
143
+ param_slice_mappings = optim_sd["param_slice_mappings"]
144
+ state_groups = optim_sd["base_optimizer_state"]["state"]
145
+ fp32_groups = optim_sd["single_partition_of_fp32_groups"]
146
+
147
+ for param_group_id in range(len(state_groups)):
148
+ flat_state = dict(
149
+ exp_avg=state_groups[param_group_id]["exp_avg"],
150
+ exp_avg_sq=state_groups[param_group_id]["exp_avg_sq"],
151
+ fp32=fp32_groups[param_group_id],
152
+ )
153
+
154
+ for name, fragment_mapping in param_slice_mappings[param_group_id].items():
155
+ if not any(re.match(pattern, name) for pattern in patterns):
156
+ continue
157
+
158
+ for state_key in flat_state.keys():
159
+ tensor = flat_state[state_key].narrow(
160
+ dim=0,
161
+ start=fragment_mapping.start,
162
+ length=fragment_mapping.numel).clone()
163
+ info = ParamInfo(pp=pp_stage, tp=tp_stage, dp=dp_stage,
164
+ data=tensor, numel=fragment_mapping.numel)
165
+ full_name = name + '.__' + state_key
166
+ if full_name not in params:
167
+ params[full_name] = []
168
+ params[full_name].append(info)
169
+ return params
170
+
171
+
172
+ def verify_equal_params(params, tp):
173
+ failed = 0
174
+ report = {}
175
+ for name, info in params.items():
176
+ n = len(info)
177
+ if n != tp:
178
+ ok = False
179
+ print(f'{name}: FAILED expected n={n} == tp={tp}')
180
+ elif n == 1:
181
+ ok = True
182
+ else:
183
+ ok = all([(x.numel == info[0].numel) for x in info[1:]])
184
+ if not ok:
185
+ print(f'{name}: FAILED numel comparison [n={n}]')
186
+ else:
187
+ ok = all([x.data.eq(info[0].data).all().item() for x in info[1:]])
188
+ if not ok:
189
+ print(f'{name}: FAILED data comparison [n={n}]')
190
+ failed += (ok == False)
191
+ report[name] = (ok, n)
192
+ if ok:
193
+ print(f'{name}: OK [n={n}]')
194
+ return failed, report
195
+
196
+
197
+ def update_layer_non_sharded_params(params, model_type, filename, pp_index, tp_index):
198
+ layer_id, file_tp_index = re.search('layer_(\d+)-model_(\d+)', filename).groups()
199
+ layer_id = int(layer_id)
200
+ file_tp_index = int(file_tp_index)
201
+ #assert tp_index == file_tp_index, f'Inconsistent tp index tp_index={tp_index} file_tp_index={file_tp_index}'
202
+ if tp_index != file_tp_index:
203
+ print('bad')
204
+
205
+ sd = torch.load(filename, map_location=torch.device('cpu'))
206
+ sequential_layers = get_layer_patterns_for_non_sharded(model_type)
207
+ for key in sd.keys():
208
+ if key in sequential_layers:
209
+ param_key = str(layer_id) + '.' + key
210
+ if param_key not in params:
211
+ params[param_key] = []
212
+ info = ParamInfo(pp=pp_index, tp=tp_index, dp=-1,
213
+ data=sd[key], numel=sd[key].numel())
214
+ params[param_key].append(info)
215
+ return params
216
+
217
+
218
+ def verify_layer_files(ds_checkpoint, model_type):
219
+ src_3d = ds_checkpoint.zero_checkpoint.src_3d
220
+ dp, tp, pp = src_3d.dp_degree, src_3d.tp_degree, src_3d.pp_degree
221
+
222
+ total_failed = 0
223
+ for pp_index in range(pp):
224
+ print(f'\nChecking pp_stage={pp_index}')
225
+ params = {}
226
+ if pp_index == 0:
227
+ for tp_index in range(tp):
228
+ for filename in ds_checkpoint.tp_to_embedding_map[tp_index]:
229
+ update_layer_non_sharded_params(params, model_type,
230
+ filename, pp_index, tp_index)
231
+ for tp_index in range(tp):
232
+ for filename_list in ds_checkpoint.transformer_file_map[(tp_index, pp_index)]:
233
+ for filename in filename_list:
234
+ update_layer_non_sharded_params(params, model_type,
235
+ filename, pp_index, tp_index)
236
+ if pp_index == (pp-1):
237
+ for tp_index in range(tp):
238
+ for filename in ds_checkpoint.tp_to_final_norm_map[tp_index]:
239
+ update_layer_non_sharded_params(params, model_type,
240
+ filename, pp_index, tp_index)
241
+ failed, report = verify_equal_params(params, tp)
242
+ total_failed += failed
243
+ return total_failed
244
+
245
+
246
+ def verify_zero_files(ds_checkpoint, model_type,sequence_parallel):
247
+ src_3d = ds_checkpoint.zero_checkpoint.src_3d
248
+ dp, tp, pp = src_3d.dp_degree, src_3d.tp_degree, src_3d.pp_degree
249
+
250
+ total_failed = 0
251
+ for i in range(pp):
252
+ for j in range(dp):
253
+ print(f'\nChecking pp_stage={i} dp_stage={j}')
254
+ params = get_zero_pp_stage_non_sharded_params(ds_checkpoint, model_type, sequence_parallel,
255
+ pp_stage=i, dp_stage=j)
256
+ failed, report = verify_equal_params(params, tp)
257
+ total_failed += failed
258
+ return total_failed
259
+
260
+ def verify_checkpoint(folder,model_type,sequence_parallel=False):
261
+ final_layer_norm_idx = -2 if model_type == 'LLAMA' else -1
262
+ ds_checkpoint = DeepSpeedCheckpoint(folder,final_layer_norm_idx=final_layer_norm_idx)
263
+ ds_checkpoint.validate_files()
264
+ show_3d(ds_checkpoint)
265
+
266
+ print('\nVerify ** layer_ ** files')
267
+ total_failed_layer = verify_layer_files(ds_checkpoint, model_type)
268
+ if total_failed_layer == 0:
269
+ print('\nCheckpoint layer files OK')
270
+ else:
271
+ print(f"\nCheckpoint layer files BAD with total_failed={total_failed_layer}")
272
+
273
+ print('\nVerify ** bf16_zero_ ** files')
274
+ total_failed_zero = verify_zero_files(ds_checkpoint, model_type,sequence_parallel)
275
+ if total_failed_zero == 0:
276
+ print('\nCheckpoint zero files OK')
277
+ else:
278
+ print(f"\nCheckpoint zero files BAD with total_failed={total_failed_zero}")
279
+
280
+ return (total_failed_layer + total_failed_zero) == 0
281
+
282
+
283
+ def main():
284
+ print(f'Verify DeepSpeed Checkpoint consistency for non-TP-sharded parameters')
285
+ args = parse_arguments()
286
+ print(args)
287
+ assert verify_checkpoint(args.folder, args.model_type, args.sequence_parallel) is True, "Checkpoint verification failed"
288
+
289
+ if __name__ == "__main__":
290
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/create_doc_index.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
4
+ os.path.pardir)))
5
+
6
+ from megatron import print_rank_0
7
+ from megatron.indexer import IndexBuilder
8
+ from megatron.initialize import initialize_megatron
9
+
10
+
11
+ def main():
12
+ """Create a BlockData data structure by running an IndexBuilder over an ICT Dataset
13
+ - Include all args needed for initial model specification
14
+
15
+ Other key args:
16
+ --block-data-path: path to write to
17
+ --ict-load or --realm-load: path to checkpoint with which to embed
18
+ --data-path and --titles-data-path: paths for dataset
19
+ --indexer-log-interval: reporting interval
20
+ --indexer-batch-size: size specific for indexer jobs
21
+
22
+ Check README.md for example script
23
+ """
24
+
25
+ initialize_megatron(extra_args_provider=None,
26
+ args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
27
+ index_builder = IndexBuilder()
28
+ index_builder.build_and_save_index()
29
+ print_rank_0("Build and save indices: done!")
30
+
31
+ if __name__ == "__main__":
32
+ main()
33
+
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/generate_samples_gpt.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2023 Habana Labs, Ltd. an Intel Company.
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Sample Generate GPT"""
18
+
19
+ import deepspeed
20
+
21
+ import os
22
+ import sys
23
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
24
+ os.path.pardir)))
25
+
26
+ from megatron import get_args
27
+ from megatron import print_rank_0
28
+ from megatron import get_tokenizer
29
+ from megatron import mpu
30
+ from megatron.checkpointing import load_checkpoint
31
+ from megatron.initialize import initialize_megatron
32
+ from megatron.model import GPTModel
33
+ from megatron.training import get_model
34
+ from megatron.text_generation_utils import generate_and_write_samples_unconditional
35
+ from megatron.text_generation_utils import generate_samples_input_from_file
36
+ from megatron.text_generation_utils import generate_samples_interactive
37
+ import deepspeed
38
+ import torch
39
+
40
+ def model_provider(pre_process=True, post_process=True):
41
+ """Build the model."""
42
+
43
+ print_rank_0('building GPT model ...')
44
+ model = GPTModel(num_tokentypes=0, parallel_output=False,
45
+ pre_process=pre_process, post_process=post_process,
46
+ return_moe_loss=False) # we need to set "return_moe_loss" for the inference_mode
47
+ return model
48
+
49
+
50
+ def add_text_generate_args(parser):
51
+ """Text generation arguments."""
52
+ group = parser.add_argument_group(title='text generation')
53
+
54
+ group.add_argument("--temperature", type=float, default=1.0,
55
+ help='Sampling temperature.')
56
+ group.add_argument("--greedy", action='store_true', default=False,
57
+ help='Use greedy sampling.')
58
+ group.add_argument("--top_p", type=float, default=0.0,
59
+ help='Top p sampling.')
60
+ group.add_argument("--top_k", type=int, default=0,
61
+ help='Top k sampling.')
62
+ group.add_argument("--out-seq-length", type=int, default=1024,
63
+ help='Size of the output generated text.')
64
+ group.add_argument("--sample-input-file", type=str, default=None,
65
+ help='Get input from file instead of interactive mode, '
66
+ 'each line is an input.')
67
+ group.add_argument("--sample-output-file", type=str, default=None,
68
+ help='Output file got from --sample-input-file')
69
+ group.add_argument("--num-samples", type=int, default=0,
70
+ help='Number of samples to generate unconditionally, '
71
+ 'defaults to 0 and interactive conditional sampling')
72
+ group.add_argument("--genfile", type=str,
73
+ help='Output file when generating unconditionally')
74
+ group.add_argument("--recompute", action='store_true',
75
+ help='During generation recompute all attention '
76
+ 'instead of using previously computed keys/values.')
77
+
78
+ return parser
79
+
80
+ def print_latency(latency_set, title=""):
81
+ # 10 warmup queries
82
+ latency_set = latency_set[10:]
83
+ count = len(latency_set)
84
+ if count > 0:
85
+ latency_set.sort()
86
+ n50 = (count - 1) * 0.5 + 1
87
+ n90 = (count - 1) * 0.9 + 1
88
+ n95 = (count - 1) * 0.95 + 1
89
+ n99 = (count - 1) * 0.99 + 1
90
+ n999 = (count - 1) * 0.999 + 1
91
+
92
+ avg = sum(latency_set) / count
93
+ p50 = latency_set[int(n50) - 1]
94
+ p90 = latency_set[int(n90) - 1]
95
+ p95 = latency_set[int(n95) - 1]
96
+ p99 = latency_set[int(n99) - 1]
97
+ p999 = latency_set[int(n999) - 1]
98
+
99
+ print("====== latency stats {0} ======", title)
100
+ print("\tAvg Latency: {0:8.2f} ms".format(avg * 1000))
101
+ print("\tP50 Latency: {0:8.2f} ms".format(p50 * 1000))
102
+ print("\tP90 Latency: {0:8.2f} ms".format(p90 * 1000))
103
+ print("\tP95 Latency: {0:8.2f} ms".format(p95 * 1000))
104
+ print("\tP99 Latency: {0:8.2f} ms".format(p99 * 1000))
105
+ print("\t999 Latency: {0:8.2f} ms".format(p999 * 1000))
106
+
107
+ def main():
108
+ """Main program."""
109
+ latencies = []
110
+ model_latencies = []
111
+ single_token_latency = []
112
+
113
+ initialize_megatron(extra_args_provider=add_text_generate_args,
114
+ args_defaults={'tokenizer_type': 'GPT2BPETokenizer',
115
+ 'no_load_rng': True,
116
+ 'no_load_optim': True})
117
+
118
+ args = get_args()
119
+
120
+ if args.num_layers_per_virtual_pipeline_stage is not None:
121
+ print("Interleaved pipeline schedule is not yet supported for text generation.")
122
+ exit()
123
+
124
+ # Set up model and load checkpoint.
125
+ model = get_model(model_provider)
126
+
127
+ if args.load is not None:
128
+ _ = load_checkpoint(model, None, None)
129
+
130
+ assert len(model) == 1, "Above condition should have caught this"
131
+ model = model[0]
132
+
133
+ if args.ds_inference:
134
+ model = ds_inference(model, args)
135
+ print('> DeepSpeed Inference engine initialized')
136
+
137
+ # Generate samples.
138
+ if args.num_samples == 0:
139
+ assert args.micro_batch_size == args.eval_micro_batch_size, \
140
+ "main (generate_samples_gpt) - Unsupported for split micro batch size"
141
+ args.micro_batch_size = 1
142
+ # Next line should be considered once eval_micro_batch_size is supported here
143
+ args.eval_micro_batch_size = args.micro_batch_size
144
+ if args.sample_input_file != None:
145
+ generate_samples_input_from_file(model)
146
+ else:
147
+ generate_samples_interactive(model)
148
+ else:
149
+ generate_and_write_samples_unconditional(model, latencies, single_token_latency, model_latencies)
150
+
151
+
152
+ #if torch.cuda.current_device() == 0:
153
+ if torch.distributed.get_rank() == 0:
154
+ print_latency(latencies)
155
+ print_latency(model_latencies, "model_latencies")
156
+ print_latency(single_token_latency, "single_token_latency")
157
+
158
+
159
+ def ds_inference(model, args):
160
+ import megatron.model as mm
161
+ engine = deepspeed.init_inference(model=model,
162
+ mp_size=args.tensor_model_parallel_size,
163
+ mpu=mpu,
164
+ dtype=torch.half,
165
+ replace_with_kernel_inject=True,
166
+ moe_experts=args.num_experts,
167
+ moe_type=args.mlp_type)
168
+
169
+ return engine.module
170
+
171
+ if __name__ == "__main__":
172
+
173
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/merge_mp_partitions.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Merge model parallel partitions."""
17
+
18
+ import os
19
+ import re
20
+ import sys
21
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
22
+ os.path.pardir)))
23
+
24
+ import torch
25
+
26
+ from megatron import mpu
27
+ from megatron.checkpointing import load_checkpoint, save_checkpoint
28
+ from megatron.checkpointing import ensure_directory_exists
29
+ from megatron.checkpointing import get_checkpoint_name
30
+ from megatron.checkpointing import get_checkpoint_version
31
+ from megatron.checkpointing import get_checkpoint_tracker_filename
32
+ from megatron.global_vars import set_global_variables, get_args
33
+ from megatron.global_vars import rebuild_tokenizer
34
+
35
+
36
+ def split_into_partitions(tensor, num_partitions, partition_dim, stride):
37
+
38
+ per_partition_size = mpu.utils.divide(tensor.size(partition_dim),
39
+ num_partitions)
40
+ per_partition_per_stride_size = mpu.utils.divide(per_partition_size, stride)
41
+
42
+ partitions_list = torch.split(tensor,
43
+ per_partition_per_stride_size,
44
+ dim=partition_dim)
45
+
46
+ partitions = []
47
+ for i in range(num_partitions):
48
+ partition = torch.cat(partitions_list[i::num_partitions],
49
+ dim=partition_dim)
50
+ partitions.append(partition)
51
+
52
+ return partitions
53
+
54
+
55
+ def merge_partitions(merged, partitions, partition_dim, stride):
56
+
57
+ # Number and size of each partition.
58
+ num_partitions = len(partitions)
59
+ per_partition_size = None
60
+ for partition in partitions:
61
+ if per_partition_size is None:
62
+ per_partition_size = partition.size(partition_dim)
63
+ else:
64
+ assert per_partition_size == partition.size(partition_dim)
65
+
66
+ def concat_partitions(partitions_):
67
+ with torch.no_grad():
68
+ if (per_partition_size * num_partitions) == merged.size(
69
+ partition_dim):
70
+ torch.cat(partitions_, dim=partition_dim, out=merged)
71
+ else:
72
+ print(' ***WARNING*** sizes do not match. Will cut '
73
+ 'the merged partitions by {} along dimension {} '
74
+ 'to reduce the size from {} to {} ...'.format(
75
+ (per_partition_size * num_partitions) - \
76
+ merged.size(partition_dim), partition_dim,
77
+ per_partition_size * num_partitions,
78
+ merged.size(partition_dim)))
79
+ merged_ = torch.cat(partitions_, dim=partition_dim)
80
+ merged_split = torch.split(merged_, merged.size(partition_dim),
81
+ dim=partition_dim)
82
+ merged_ = merged_split[0]
83
+ assert merged_.size(partition_dim) == merged.size(partition_dim)
84
+ merged.data.copy_(merged_.data)
85
+
86
+ # If stride is 1, then do simple concatination.
87
+ if stride == 1:
88
+ concat_partitions(partitions)
89
+ return
90
+
91
+ # For none unity strides, first split based on stride and then group.
92
+ per_partition_per_stride_size = mpu.utils.divide(per_partition_size, stride)
93
+ # Chunk and build a list.
94
+ chunks = None
95
+ for i, partition in enumerate(partitions):
96
+ chunk = torch.split(partition,
97
+ per_partition_per_stride_size,
98
+ dim=partition_dim)
99
+
100
+ if chunks is None:
101
+ chunks = [0]*(num_partitions*len(chunk))
102
+ chunks[i::num_partitions] = chunk
103
+
104
+ # Concatinate.
105
+ concat_partitions(chunks)
106
+
107
+ return
108
+
109
+
110
+ def get_model(model_type):
111
+
112
+ if model_type == 'BERT':
113
+ from pretrain_bert import model_provider
114
+ elif model_type == 'GPT':
115
+ from pretrain_gpt import model_provider
116
+ elif model_type == 'RACE':
117
+ from tasks.race.finetune import model_provider
118
+ elif model_type == ['MNLI', 'QQP']:
119
+ num_classes = 2
120
+ if model_type == 'MNLI':
121
+ num_classes = 3
122
+ from megatron.model.classification import Classification
123
+ def model_provider():
124
+ return Classification(num_classes=num_classes, num_tokentypes=2)
125
+ else:
126
+ raise Exception('unrecognized model type: {}'.format(model_type))
127
+
128
+ model = model_provider()
129
+ model = model.half()
130
+
131
+ return model
132
+
133
+
134
+ def get_parallel_checkpoint_name(path):
135
+
136
+ tracker_filename = get_checkpoint_tracker_filename(path)
137
+ iteration = 0
138
+ with open(tracker_filename, 'r') as f:
139
+ metastring = f.read().strip()
140
+ iteration = int(metastring)
141
+ assert iteration > 0
142
+ checkpoint_name = get_checkpoint_name(path, iteration)
143
+
144
+ return checkpoint_name, iteration
145
+
146
+
147
+ def test_split_merge():
148
+
149
+ print('testing split and merge ...')
150
+
151
+ #[QKV.ROW-COL]
152
+ tensor = torch.FloatTensor([[1.11, 1.12, 1.13, 1.14, 1.15],
153
+ [1.21, 1.22, 1.23, 1.24, 1.25],
154
+ [1.31, 1.32, 1.33, 1.34, 1.35],
155
+ [1.41, 1.42, 1.43, 1.44, 1.45],
156
+ [2.11, 2.12, 2.13, 2.14, 2.15],
157
+ [2.21, 2.22, 2.23, 2.24, 2.25],
158
+ [2.31, 2.32, 2.33, 2.34, 2.35],
159
+ [2.41, 2.42, 2.43, 2.44, 2.45],
160
+ [3.11, 3.12, 3.13, 3.14, 3.15],
161
+ [3.21, 3.22, 3.23, 3.24, 3.25],
162
+ [3.31, 3.32, 3.33, 3.34, 3.35],
163
+ [3.41, 3.42, 3.43, 3.44, 3.45]])
164
+
165
+ num_partitions = 2
166
+ partition_dim = 0
167
+ stride = 3
168
+ partitions = split_into_partitions(tensor, num_partitions,
169
+ partition_dim, stride)
170
+
171
+ merged = torch.zeros_like(tensor)
172
+ merge_partitions(merged, partitions, partition_dim, stride)
173
+
174
+ max_error = (merged - tensor).abs().max()
175
+ print(' > max error (should be zero): {}'.format(max_error))
176
+
177
+
178
+ def get_mp_merge_args(parser):
179
+ """Provide extra arguments required for merging."""
180
+ group = parser.add_argument_group(title='mp merge')
181
+
182
+ group.add_argument('--model-type', type=str, required=True,
183
+ choices=['BERT', 'GPT', 'RACE', 'MNLI', 'QQP'],
184
+ help='Type of the mdoel.')
185
+ group.add_argument('--target-pipeline-model-parallel-size', type=int, default=1,
186
+ help='Degree of pipeline model parallelism in output model.')
187
+
188
+ return parser
189
+
190
+
191
+ def main():
192
+
193
+ # Arguments do sanity checks on the world size, but we don't care,
194
+ # so trick it into thinking we are plenty of processes
195
+ os.environ["WORLD_SIZE"] = f'{2**31}'
196
+
197
+ # Args
198
+ set_global_variables(extra_args_provider=get_mp_merge_args,
199
+ args_defaults = {'use_cpu_initialization': True,
200
+ 'micro_batch_size': 1,
201
+ 'no_load_optim': True,
202
+ 'no_load_rng': True,
203
+ 'no_save_optim': True,
204
+ 'no_save_rng': True,
205
+ 'save_interval': 1})
206
+ args = get_args()
207
+
208
+ if args.pipeline_model_parallel_size > 1:
209
+ print("Checkpoints with pipeline model parallelism are not currently supported.")
210
+ exit()
211
+
212
+ model_type = args.model_type
213
+ orig_tensor_model_parallel_size = args.tensor_model_parallel_size
214
+ args.tensor_model_parallel_size = 1
215
+ tokenizer = rebuild_tokenizer(args)
216
+
217
+ print('\n merging model parallel partitions ...')
218
+ print(' > number of partitions: {}'.format(orig_tensor_model_parallel_size))
219
+ print(' > checkpoint path: {}'.format(args.load))
220
+ print(' > model parameters:')
221
+ print(' number of tokens ................ {} '.format(
222
+ tokenizer.vocab_size))
223
+ print(' number of layers ................ {}'.format(args.num_layers))
224
+ print(' hidden size ..................... {}'.format(args.hidden_size))
225
+ print(' number of attention heads ....... {}'.format(
226
+ args.num_attention_heads))
227
+ print(' maximum position embeddings ..... {}'.format(
228
+ args.max_position_embeddings))
229
+
230
+ # Full model.
231
+ print('> building the full model ...')
232
+ mpu.initialize.set_tensor_model_parallel_world_size(1)
233
+ mpu.initialize.set_tensor_model_parallel_rank(0)
234
+ mpu.initialize.set_pipeline_model_parallel_world_size(1)
235
+ mpu.initialize.set_pipeline_model_parallel_rank(0)
236
+ merged_model = get_model(model_type)
237
+
238
+ # Build and load partitions.
239
+ partitions = []
240
+ iteration = 0
241
+ args.tensor_model_parallel_size = orig_tensor_model_parallel_size
242
+ tokenizer = rebuild_tokenizer(args)
243
+ mpu.initialize.set_tensor_model_parallel_world_size(args.tensor_model_parallel_size)
244
+ for rank in range(args.tensor_model_parallel_size):
245
+ # Reset these since load_checkpoint asserts they are 0, but we are loading
246
+ # multiple checkpoints in the same process and they get set each time
247
+ args.consumed_train_samples = 0
248
+ args.consumed_valid_samples = 0
249
+
250
+ mpu.initialize.set_tensor_model_parallel_rank(rank)
251
+ checkpoint_name, iteration = get_parallel_checkpoint_name(args.load)
252
+ model_ = get_model(model_type)
253
+ print(f'> loading {checkpoint_name} ...')
254
+ load_checkpoint(model_, None, None)
255
+ print(f'> checkpoint version {get_checkpoint_version()}')
256
+ partitions.append(model_)
257
+
258
+ # Parameter generators so we can loop through them semiltaneouly.
259
+ merged_params_gen = merged_model.named_parameters()
260
+ partitions_params_gen = [partition.named_parameters()
261
+ for partition in partitions]
262
+ while True:
263
+ try:
264
+
265
+ # Get the params and check names.
266
+ name, merged_param = next(merged_params_gen)
267
+ print(' > working on {} ...'.format(name))
268
+ print(' merged type: {}, size: {}'.format(
269
+ merged_param.dtype, list(merged_param.size())))
270
+ partitions_param = []
271
+ for rank, partition_params_gen in enumerate(partitions_params_gen):
272
+ partition_name, partition_param = next(partition_params_gen)
273
+ assert partition_name == name
274
+ partitions_param.append(partition_param)
275
+ print(' partition {} type: {}, size: {}'.format(
276
+ rank, partition_param.dtype, list(partition_param.size())))
277
+
278
+ # For the non-parallel parameters, simply copy the rank 0 values.
279
+ if not hasattr(merged_param, 'tensor_model_parallel'):
280
+ print(' none-parallel parameter, simple copy from rank 0')
281
+ with torch.no_grad():
282
+ merged_param.data.copy_(partitions_param[0].data)
283
+ # For parallel parameters, merge the values
284
+ else:
285
+ dim = merged_param.partition_dim
286
+ stride = merged_param.partition_stride
287
+ print(f' parallel parameter merge with stride {stride} along '
288
+ f'dimention {dim}')
289
+ merge_partitions(merged_param,
290
+ partitions_param,
291
+ dim,
292
+ stride)
293
+
294
+ except StopIteration:
295
+ break
296
+
297
+ partitions = []
298
+ args.tensor_model_parallel_size = 1
299
+ args.pipeline_model_parallel_size = args.target_pipeline_model_parallel_size
300
+
301
+ assert args.num_layers % args.pipeline_model_parallel_size == 0, \
302
+ 'num_layers must be divisible by target pipeline model parallel size'
303
+ layers_per_part = args.num_layers // args.pipeline_model_parallel_size
304
+
305
+ tokenizer = rebuild_tokenizer(args)
306
+ mpu.initialize.set_tensor_model_parallel_world_size(args.tensor_model_parallel_size)
307
+ mpu.initialize.set_tensor_model_parallel_rank(0)
308
+ mpu.initialize.set_pipeline_model_parallel_world_size(args.pipeline_model_parallel_size)
309
+
310
+ # regex to parse out layer number from param name
311
+ layer_re = re.compile('layers\.([0-9]+)')
312
+
313
+ if args.pipeline_model_parallel_size > 1:
314
+ merged_params = {}
315
+ for name, merged_param in merged_model.named_parameters():
316
+ merged_params[name] = merged_param
317
+
318
+ for rank in range(args.pipeline_model_parallel_size):
319
+ mpu.initialize.set_pipeline_model_parallel_rank(rank)
320
+ model = get_model(model_type)
321
+ def update_layer_num(m):
322
+ # TODO! This assumes no interleaved pipeline execution
323
+ layer = int(m.group(1))
324
+ layer += rank * layers_per_part
325
+ return f'layers.{layer}'
326
+
327
+ for dst_name, partition_param in model.named_parameters():
328
+ if dst_name == "word_embeddings.weight":
329
+ # See comment in MegatronModule.initialize_word_embeddings()
330
+ src_name = "language_model.embedding.word_embeddings.weight"
331
+ else:
332
+ # Translate destination layer number (0-N for each partition)
333
+ # to source layer number (single-model layer number)
334
+ src_name = re.sub(layer_re, update_layer_num, dst_name)
335
+ print(f" > copying {src_name} to {dst_name} in rank {rank}'s model")
336
+ partition_param.data.copy_(merged_params[src_name].data)
337
+
338
+ partitions.append(model)
339
+ else:
340
+ partitions = [merged_model]
341
+
342
+ for rank, model in enumerate(partitions):
343
+ mpu.initialize.set_pipeline_model_parallel_rank(rank)
344
+ print(f"> saving rank {rank}'s model")
345
+ save_checkpoint(iteration, model, None, None)
346
+
347
+ print('done :-)')
348
+
349
+
350
+ if __name__ == '__main__':
351
+
352
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/add_id.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import json
18
+ import os
19
+ import time
20
+
21
+ """
22
+ This code adds id to each json object in a json file. User can add prefix
23
+ to the ids.
24
+ """
25
+
26
+ if __name__ == '__main__':
27
+
28
+ print('parsing the arguments ...')
29
+
30
+ parser = argparse.ArgumentParser()
31
+ parser.add_argument('--input-file', type=str, default=None, help='Input'\
32
+ ' json file where id needs to be added')
33
+ parser.add_argument('--output-file', type=str, default=None, help=\
34
+ 'Output file name with id')
35
+ parser.add_argument('--id-prefix', type=str, default=None, help=\
36
+ 'Id prefix')
37
+ parser.add_argument('--log-interval', type=int, default=100,
38
+ help='Log interval')
39
+ args = parser.parse_args()
40
+
41
+ print('Adding ids to dataset ...')
42
+
43
+ f_input = open(args.input_file, 'r', encoding='utf-8')
44
+ f_output = open(args.output_file, 'wb')
45
+
46
+ unique_ids = 1
47
+ start_time = time.time()
48
+ for row in f_input:
49
+ each_row = json.loads(row)
50
+ adlr_id_string = args.id_prefix + '-{:010d}'.format(int(unique_ids))
51
+ each_row['adlr_id'] = adlr_id_string
52
+ myjson = json.dumps(each_row, ensure_ascii=False)
53
+
54
+ f_output.write(myjson.encode('utf-8'))
55
+ f_output.write('\n'.encode('utf-8'))
56
+
57
+ if unique_ids % args.log_interval == 0:
58
+ print(' processed {:9d} documents in {:.2f} seconds ...'.format( \
59
+ unique_ids, time.time() - start_time), flush=True)
60
+
61
+ unique_ids += 1
62
+
63
+ # Close the file.
64
+ f_input.close()
65
+ f_output.close()
66
+
67
+ print('done :-)', flush=True)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/cleanup_dataset.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import ftfy
18
+ import json
19
+ from langdetect import detect
20
+ import numpy as np
21
+ import time
22
+ import os
23
+ import sys
24
+
25
+ from tokenizer import Tokenizer
26
+
27
+ MIN_DOCUMENT_LENGHT = 128
28
+
29
+
30
+ def print_progress(prefix, start_time, num_docs, num_fixed_text,
31
+ num_non_english_docs, chars_non_english_docs,
32
+ num_small_docs, chars_small_docs):
33
+
34
+ string = prefix + ' | '
35
+ string += 'elapsed time: {:.2f} | '.format(time.time() - start_time)
36
+ string += 'documents: {} | '.format(num_docs)
37
+ string += 'fixed text: {} | '.format(num_fixed_text)
38
+ string += 'non-english: {} | '.format(num_non_english_docs)
39
+ string += 'non-english chars: {} | '.format(chars_non_english_docs)
40
+ string += 'small docs: {} | '.format(num_small_docs)
41
+ string += 'small docs chars: {}'.format(chars_small_docs)
42
+ print(string, flush=True)
43
+
44
+
45
+ def filter_corpus(filename, out_filename, print_interval=10000):
46
+
47
+ print(' > filtering {}'.format(filename))
48
+
49
+ tokenizer = Tokenizer(cache_dir='./cache')
50
+
51
+ num_docs = 0
52
+ num_written_docs = 0
53
+ num_small_docs = 0
54
+ num_fixed_text = 0
55
+ num_non_english_docs = 0
56
+ chars_non_english_docs = 0
57
+ chars_small_docs = 0
58
+ start_time = time.time()
59
+ with open(out_filename, 'wb') as f:
60
+ with open(filename, 'r') as fin:
61
+ for line in fin:
62
+ try:
63
+ num_docs += 1
64
+ myjson = json.loads(line)
65
+ # Fix text
66
+ text = ftfy.fix_text(myjson['text'])
67
+ if text != myjson['text']:
68
+ num_fixed_text += 1
69
+ myjson['text'] = text
70
+ # Detect language.
71
+ if detect(text) != 'en':
72
+ print('[non-english text]', myjson)
73
+ num_non_english_docs += 1
74
+ chars_non_english_docs += len(text)
75
+ continue
76
+ # On average each token is 5 characters so 8 is an
77
+ # upper bound.
78
+ if len(text) < (8 * MIN_DOCUMENT_LENGHT):
79
+ tokens = tokenizer.tokenize_document(text)
80
+ if len(tokens) < MIN_DOCUMENT_LENGHT:
81
+ print('[small document, skipping]:', myjson)
82
+ num_small_docs += 1
83
+ chars_small_docs += len(text)
84
+ continue
85
+ myjson = json.dumps(myjson, ensure_ascii=False)
86
+ f.write(myjson.encode('utf-8'))
87
+ f.write('\n'.encode('utf-8'))
88
+ num_written_docs += 1
89
+ if num_docs % print_interval == 0:
90
+ print_progress('[PROGRESS]', start_time, num_docs,
91
+ num_fixed_text, num_non_english_docs,
92
+ chars_non_english_docs,
93
+ num_small_docs, chars_small_docs)
94
+ except Exception as e:
95
+ print(' skipping ', line, e)
96
+
97
+ print_progress('[FINAL]', start_time, num_docs,
98
+ num_fixed_text, num_non_english_docs,
99
+ chars_non_english_docs,
100
+ num_small_docs, chars_small_docs)
101
+
102
+
103
+ if __name__ == '__main__':
104
+
105
+ print('building gpt2 dataset ...')
106
+
107
+ input_filename = sys.argv[1]
108
+ output_filename = sys.argv[2]
109
+
110
+ print('will be reading {}'.format(input_filename))
111
+ print('and will write the results to {}'.format(output_filename))
112
+
113
+ filter_corpus(input_filename, output_filename)
114
+
115
+
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/filter_ngrams.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Deduplicate downstream tasks from training dataset. 13-grams have been used.
18
+ All split documents with less than 200 characters got filtered. Any document
19
+ with more than 10 splits got filtered as well.
20
+ """
21
+
22
+ import argparse
23
+ from functools import partial
24
+ import json
25
+ import multiprocessing
26
+ import nltk
27
+ import pickle
28
+ import re
29
+ import string
30
+ import sys
31
+ import time
32
+
33
+ def get_words(text):
34
+ # get all the lowercase words from text
35
+ words, positions = [], []
36
+ for match in re.finditer(r'\w+', text.lower()):
37
+ words.append(match.group(0))
38
+ positions.append(match.start())
39
+ return words, positions
40
+
41
+ # splits the text
42
+ def split_text(text, start_position, remove_char_each_side, seq):
43
+ # first part of the text
44
+ punctuations = ".!?"
45
+ pos = start_position - remove_char_each_side
46
+ text_first = ""
47
+ while pos > 0 and not text[pos] in punctuations:
48
+ pos -= 1
49
+ if pos > 0:
50
+ text_first = text[0:pos+1]
51
+
52
+ # add length of seq and remove_char_each_side
53
+ pos = start_position + len(seq) + remove_char_each_side
54
+
55
+ # last part of the text
56
+ text_second = ""
57
+ while pos < len(text) and not text[pos] in punctuations:
58
+ pos += 1
59
+ if pos + 1 < len(text):
60
+ text_second = text[pos+1:len(text)]
61
+
62
+ return text_first, text_second
63
+
64
+ def check_and_clean_text(args, words, ngrams, text, start_position, \
65
+ text_buf_ngram_free, text_buf, local_ngram):
66
+
67
+ seq = " ".join(words)
68
+ if seq in ngrams:
69
+ print(" [matched]: {}".format(seq), flush=True)
70
+
71
+ if args.get_ngram_freq_only:
72
+ # increase freq of this seq and then only consider the later part
73
+ # of the text for further processing
74
+ if seq in local_ngram:
75
+ local_ngram[seq] += 1
76
+ else:
77
+ local_ngram[seq] = 1
78
+ #print(" [increased]: {} {}".format(seq, ngrams[seq]), flush=True)
79
+ if (start_position + len(seq) + 1) < len(text):
80
+ text_buf.append(text[start_position + len(seq) + 1:len(text)])
81
+ return False
82
+
83
+ # split the text
84
+ text_first, text_second = split_text(text, start_position, \
85
+ args.remove_char_each_side, seq)
86
+
87
+ # first part of ngrams free
88
+ if len(text_first) > args.filter_text_char_len:
89
+ text_buf_ngram_free.append(text_first)
90
+
91
+ # add second part for further processing
92
+ if len(text_second) > args.filter_text_char_len:
93
+ text_buf.append(text_second)
94
+
95
+ return False # not ngram free
96
+
97
+ # ngram free
98
+ return True
99
+
100
+
101
+ def free_ngram(line, args, key, ngrams, ngrams_freq_sorted):
102
+ # remove all the ngrams
103
+
104
+ try:
105
+ myjson = json.loads(line)
106
+ text_buf = [myjson[key]]
107
+ except Exception as e:
108
+ print("Error: {}".format(e), flush=True)
109
+ text_buf = []
110
+
111
+ text_buf_ngram_free = []
112
+ local_ngram = {}
113
+ while len(text_buf) > 0:
114
+
115
+ # get the first one from the buffer
116
+ text = text_buf.pop(0)
117
+ words, positions = get_words(text)
118
+
119
+ ngram_free = True
120
+ # find each max n-grams and check dictionary
121
+ for i in range(len(words) - args.max_ngram_size + 1):
122
+ check_ngram_free = check_and_clean_text(args, words[i:\
123
+ i+args.max_ngram_size], ngrams, text, positions[i], \
124
+ text_buf_ngram_free, text_buf, local_ngram)
125
+
126
+ # the seq is ngram free? if yes, break
127
+ if not check_ngram_free:
128
+ ngram_free = False
129
+ break
130
+
131
+ # if max ngrams doesn't match, check if any other lower n-grams
132
+ # within max ngram macthes
133
+ for ngram_len, _ in ngrams_freq_sorted:
134
+ check_ngram_free = check_and_clean_text(args, words[i:\
135
+ i+ngram_len], ngrams, text, positions[i], \
136
+ text_buf_ngram_free, text_buf, local_ngram)
137
+
138
+ # same check as above
139
+ if not check_ngram_free:
140
+ ngram_free = False
141
+ break
142
+
143
+ # check break from lower than max ngram loop above
144
+ if not ngram_free:
145
+ break
146
+
147
+ # for the last max n-gram, check all the lower ngrams in it
148
+ if ngram_free and len(words) - args.max_ngram_size > 0:
149
+ # get the last words of the lax max ngram
150
+ last_seq_words = words[(len(words)-args.max_ngram_size):len(words)]
151
+ last_seq_start_position = len(words) - args.max_ngram_size
152
+
153
+ # check all n-grams lower than the max
154
+ for pos, (ngram_len, _) in enumerate(ngrams_freq_sorted):
155
+
156
+ # ignore the max ngram as has been considered already
157
+ if ngram_len == args.max_ngram_size:
158
+ continue
159
+
160
+ # find each ngram of ngram_len in max n-grams and check
161
+ for i in range(len(last_seq_words) - ngram_len + 1):
162
+ check_ngram_free = check_and_clean_text(args, \
163
+ last_seq_words[i:i+ngram_len], ngrams, text,\
164
+ positions[last_seq_start_position+i], \
165
+ text_buf_ngram_free, text_buf, local_ngram)
166
+
167
+ if not check_ngram_free:
168
+ ngram_free = False
169
+ break
170
+
171
+ if not ngram_free:
172
+ break
173
+
174
+ # texts are ngram free
175
+ if ngram_free and not args.get_ngram_freq_only:
176
+ text_buf_ngram_free.append(text)
177
+
178
+ # check if the text has only been trimmed
179
+ trimmed = 0
180
+ if not args.get_ngram_freq_only and len(text_buf_ngram_free) == 1 and \
181
+ len(text_buf_ngram_free[0]) < len(myjson[key]):
182
+ trimmed = 1
183
+
184
+ return text_buf_ngram_free, trimmed, myjson, local_ngram
185
+
186
+ # insert word sequence into dictionary
187
+ def insert_dict(words, ngrams, pos):
188
+ seq = " ".join(words)
189
+ if seq not in ngrams:
190
+ ngrams[seq] = 0
191
+ #ngrams[seq] = pos
192
+
193
+ # insert each ngram from text into the ngrams dictionary
194
+ def compute_ngrams_insert_dict(args, text, ngrams):
195
+ words, positions = get_words(text)
196
+ if len(words) < args.min_ngram_size:
197
+ return
198
+
199
+ if len(words) < args.max_ngram_size:
200
+ insert_dict(words, ngrams, positions[0])
201
+
202
+ for i in range(len(words) - args.max_ngram_size+1):
203
+ insert_dict(words[i:i+args.max_ngram_size], ngrams, positions[i])
204
+
205
+
206
+ # Build ngrams for the lambada dataset
207
+ def process_task_lambda(args, task_file, ngrams):
208
+ print(' reading from {} and computing ngrams'.format(task_file))
209
+ with open(task_file, 'r') as f:
210
+ for line in f:
211
+ try:
212
+ myjson = json.loads(line)
213
+ text = myjson['text']
214
+ compute_ngrams_insert_dict(args, text, ngrams)
215
+ except Exception as e:
216
+ print('Error:', e)
217
+ print(" Entities in ngrams {}".format(len(ngrams)), flush=True)
218
+
219
+
220
+ # Build ngrams for the dataset of the given task
221
+ def process_task(args, task_name, ngrams):
222
+
223
+ print(' reading from {} and computing ngrams'.format('import datasets'))
224
+ print(" Current entities in ngrams {}".format(len(ngrams)), flush=True)
225
+ # using validation/test data from datasets
226
+ from datasets import load_dataset
227
+
228
+ entities_in_ngrams = len(ngrams)
229
+
230
+ # load the dataset
231
+ if task_name == 'squad':
232
+ dataset = load_dataset('squad_v2', split='validation')
233
+ elif task_name == 'natural_questions':
234
+ dataset = load_dataset('natural_questions', split='validation')
235
+ elif task_name == 'triviaqa':
236
+ dataset = load_dataset('trivia_qa', 'unfiltered', split='test')
237
+ elif task_name == 'webqa':
238
+ dataset = load_dataset('web_questions', split='test')
239
+ elif task_name == 'race':
240
+ dataset = load_dataset('race', 'all', split='test')
241
+ elif task_name == 'drop':
242
+ dataset = load_dataset('drop', split='validation')
243
+ elif task_name == 'coqa':
244
+ dataset = load_dataset('coqa', split='validation')
245
+ elif task_name == 'piqa':
246
+ dataset = load_dataset('piqa', split='test')
247
+ else:
248
+ print("Invalid task name: {}".format(task_name), flush=True)
249
+ return
250
+
251
+ # read the dataset and add to ngrams
252
+ for line in dataset:
253
+ try:
254
+ if task_name in ['squad', 'triviaqa', 'webqa', 'race', 'drop']:
255
+ text = line['question']
256
+ compute_ngrams_insert_dict(args, text, ngrams)
257
+ elif task_name == 'natural_questions':
258
+ text = line['question']['text']
259
+ compute_ngrams_insert_dict(args, text, ngrams)
260
+ elif task_name == 'coqa':
261
+ all_questions = line['questions']
262
+ for question in all_questions:
263
+ compute_ngrams_insert_dict(args, question, ngrams)
264
+ elif task_name == 'piqa':
265
+ text = line['goal']
266
+ compute_ngrams_insert_dict(args, text, ngrams)
267
+ except Exception as e:
268
+ print('Error:', e)
269
+
270
+ print(" After task {} entities in ngrams {}, added {}".format(task_name, \
271
+ len(ngrams), len(ngrams) - entities_in_ngrams), flush=True)
272
+
273
+ def compute_tasks_ngrams(args, ngrams):
274
+ start_time = time.time()
275
+ for _, task_name in enumerate(args.tasks):
276
+ print('Task: {}'.format(task_name), flush=True)
277
+ if task_name == 'lambada':
278
+ assert args.lambada_path is not None
279
+ process_task_lambda(args, args.lambada_path, ngrams)
280
+ else:
281
+ process_task(args, task_name, ngrams)
282
+ print(" Taken time to compute ngrams {:.2f}".format(time.time() - \
283
+ start_time), flush=True)
284
+
285
+ def compute_ngram_freq_sorted(args, ngrams):
286
+ ngrams_freq = {}
287
+ for ngram_key in ngrams.keys():
288
+ length = len(ngram_key.split())
289
+ ngrams_freq[length] = ngrams_freq[length] + 1 if length in \
290
+ ngrams_freq else 1
291
+
292
+ ngrams_freq_sorted = sorted(ngrams_freq.items(), key=lambda item: item[0])
293
+ print(" Ngram frequencies: {}".format(ngrams_freq_sorted), flush=True)
294
+ print(" Entities in ngrams {} min_ngram_size {} max_ngram_size {}".format(\
295
+ len(ngrams), ngrams_freq_sorted[0][0], ngrams_freq_sorted[len(\
296
+ ngrams_freq_sorted) -1 ][0]), flush=True)
297
+ return ngrams_freq_sorted
298
+
299
+ def get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \
300
+ dedup_file, dedup_key, ngrams_freq_sorted):
301
+
302
+ start_time = time.time()
303
+ # get the ngrams frequency
304
+ args.get_ngram_freq_only = True
305
+
306
+ # Open the large file to process in parallel
307
+ num_workers = args.num_threads
308
+ pool = multiprocessing.Pool(num_workers)
309
+ fin = open(dedup_file, 'r', encoding='utf-8')
310
+ free_ngram_abt_partial=partial(free_ngram, args=args, key=dedup_key, \
311
+ ngrams=ngrams, ngrams_freq_sorted=ngrams_freq_sorted)
312
+ free_ngrams_abt = pool.imap(free_ngram_abt_partial, fin, 500)
313
+
314
+ counter = 0
315
+ for _, _, _, local_ngram in free_ngrams_abt:
316
+ counter += 1
317
+ if counter % 1000 == 0:
318
+ print(' [compute_stat]> processed {} documents in {:.2f} seconds ...'.
319
+ format(counter, time.time() - start_time), flush=True)
320
+ for local_key in local_ngram:
321
+ if local_key in ngrams:
322
+ ngrams[local_key] += 1
323
+ local_ngram = {}
324
+
325
+ print(' Time taken to compute statistics {:.2f} seconds'.format(time.time() - \
326
+ start_time), flush=True)
327
+ pool.close()
328
+ pool.join()
329
+
330
+ start_time = time.time()
331
+ counter_threshold = 0
332
+ # Get ngram below theadhold
333
+ for local_key, local_val in ngrams.items():
334
+ if ngrams[local_key] < args.key_threshold:
335
+ print(" [threshold] {} {}".format(local_key, local_val), flush=True)
336
+ counter_threshold += 1
337
+ ngrams_below_threshold[local_key] = 1
338
+
339
+ print(' Ngrams below threshold {}'.format(counter_threshold), flush=True)
340
+ fin.close()
341
+
342
+ def clean_ngrams_below_threshold(args, ngrams_below_threshold, dedup_file, \
343
+ dedup_key):
344
+
345
+ start_time = time.time()
346
+ # Now actually filter the dataset
347
+ args.get_ngram_freq_only = False
348
+ #id_prefix = '-'.join(args.tasks[::2])
349
+ id_prefix = '-'.join(args.tasks[::1])
350
+
351
+ # get the range of the size of the ngrams
352
+ ngrams_freq_sorted = compute_ngram_freq_sorted(args, ngrams_below_threshold)
353
+
354
+ # Open the large file to process in parallel
355
+ counter = splitted = ignored = split_mt_thld = trimmed_count = 0
356
+ num_workers = args.num_threads
357
+ pool = multiprocessing.Pool(num_workers)
358
+ fin = open(dedup_file, 'r', encoding='utf-8')
359
+ free_ngram_clean_partial=partial(free_ngram, args=args, key=dedup_key, \
360
+ ngrams=ngrams_below_threshold, ngrams_freq_sorted=ngrams_freq_sorted)
361
+ free_ngrams_clean = pool.imap(free_ngram_clean_partial, fin, 500)
362
+
363
+ out_f = open(args.output, 'wb')
364
+
365
+ for text_buf_ngram_free, trimmed, myjson, _ in free_ngrams_clean:
366
+ counter += 1
367
+ try:
368
+
369
+ trimmed_count += trimmed
370
+
371
+ if len(text_buf_ngram_free) > 1:
372
+ splitted += 1
373
+ if len(text_buf_ngram_free) == 0:
374
+ ignored += 1
375
+ # more than 10 splits ignored
376
+ if len(text_buf_ngram_free) > args.splits_count:
377
+ text_buf_ngram_free = []
378
+ split_mt_thld += 1
379
+
380
+ if args.output is not None:
381
+ if "split_id" in myjson:
382
+ use_prefix = myjson["split_id"] + "-"
383
+ else:
384
+ use_prefix = ""
385
+
386
+ for i in range(len(text_buf_ngram_free)):
387
+ split_id_string = id_prefix + '-{:010d}'.format(int(\
388
+ counter)) + '-{:04d}'.format(int(i))
389
+ myjson[dedup_key] = text_buf_ngram_free[i]
390
+ myjson["split_id"] = use_prefix + split_id_string
391
+ outjson = json.dumps(myjson, ensure_ascii=False)
392
+ #outjson = json.dumps({"text":text_buf_ngram_free[i],
393
+ # id_prefix+"_split_id":split_id_string},
394
+ # ensure_ascii=False)
395
+ out_f.write(outjson.encode('utf-8'))
396
+ out_f.write('\n'.encode('utf-8'))
397
+
398
+ if counter % 1000 == 0:
399
+ print(' [final]> processed {} documents in {:.2f} seconds ...'.
400
+ format(counter, time.time() - start_time), flush=True)
401
+ except Exception as e:
402
+ print('Error:', e)
403
+
404
+ print(' [final]> processed {} documents in {:.2f} seconds ...'.
405
+ format(counter, time.time() - start_time), flush=True)
406
+
407
+ print(' Total docs {} splitted {} ignored {} splits > theshold {} trimmed'\
408
+ ' {}'.format(counter, splitted, ignored, split_mt_thld, trimmed_count)\
409
+ , flush=True)
410
+
411
+ pool.close()
412
+ pool.join()
413
+
414
+ out_f.close()
415
+ fin.close()
416
+
417
+ if __name__ == '__main__':
418
+
419
+ # we use 13-grams, any text less than 200 characters got removed
420
+ # any text splitted more than 10 got removed as well
421
+
422
+ print('parsing the arguments ...')
423
+
424
+ parser = argparse.ArgumentParser()
425
+ parser.add_argument('--tasks', nargs = '*', required=True, default=None, \
426
+ help = 'Tasks to use for deduplication: currently '
427
+ ' suuport [lambada, squad, natural_questions,'
428
+ ' triviaqa, webqa, race, drop, coqa, and piqa]')
429
+ parser.add_argument('--lambada-path', type=str, default=None,
430
+ help='Only Lambada task needs the path')
431
+ parser.add_argument('--dedup-dataset', nargs = '*', default=None,
432
+ help='Dataset to deduplicate with the key to use'
433
+ ' e.g. cc.json text')
434
+ parser.add_argument('--output', type=str, default=None,
435
+ help='Output file name to save dedup dataset')
436
+ parser.add_argument('--num-threads', type=int, default=40,
437
+ help='Number of threads to use')
438
+ # Default dedup values
439
+ parser.add_argument('--max-ngram-size', type=int, default=13,
440
+ help='Maximum size of ngram to use.')
441
+ parser.add_argument('--min-ngram-size', type=int, default=8,
442
+ help='Minimum size of ngram to use.')
443
+ parser.add_argument('--filter-text-char-len', type=int, default=200,
444
+ help='Remove any text below this length.')
445
+ parser.add_argument('--key-threshold', type=int, default=10,
446
+ help='Number of keys to consider as threshold')
447
+ parser.add_argument('--save-dictionary', type=str, default=None,
448
+ help='Save the dictionary')
449
+ parser.add_argument('--load-dictionary', type=str, default=None,
450
+ help='Load the dictionary')
451
+ parser.add_argument('--splits-count', type=int, default=10,
452
+ help='Remove any documents more than this many splits')
453
+ parser.add_argument('--remove-char-each-side', type=int, default=200,
454
+ help='Maximum size of ngram to use.')
455
+
456
+ args = parser.parse_args()
457
+
458
+ assert len(args.dedup_dataset) == 2
459
+ dedup_file = args.dedup_dataset[0]
460
+ dedup_key = args.dedup_dataset[1]
461
+
462
+ # Setup multi-processing
463
+ num_workers = args.num_threads
464
+ if args.load_dictionary is None:
465
+
466
+ # Build ngrams
467
+ ngrams = {}
468
+ compute_tasks_ngrams(args, ngrams)
469
+
470
+ # get the range of the size of the ngrams
471
+ ngrams_freq_sorted = compute_ngram_freq_sorted(args, ngrams)
472
+
473
+ # get ngram freq from large file in parallel
474
+ # get ngrams below threshold
475
+ ngrams_below_threshold = {}
476
+ get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \
477
+ dedup_file, dedup_key, ngrams_freq_sorted)
478
+
479
+ # save the dictionary if needed
480
+ if args.save_dictionary is not None:
481
+ with open(args.save_dictionary, 'wb') as save_dict_handle:
482
+ pickle.dump(ngrams_below_threshold, save_dict_handle)
483
+ else:
484
+ with open(args.load_dictionary, 'rb') as load_dict_handle:
485
+ ngrams_below_threshold = pickle.load(load_dict_handle)
486
+
487
+ # filter the large file
488
+ if args.output is not None:
489
+ clean_ngrams_below_threshold(args, ngrams_below_threshold, \
490
+ dedup_file, dedup_key)
491
+
492
+ print('done :-)')
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/merge_jsons.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import glob
18
+ import sys
19
+ import json
20
+ import argparse
21
+
22
+ if __name__ == '__main__':
23
+
24
+ parser = argparse.ArgumentParser()
25
+ parser.add_argument("--json_path", type=str, default=".",
26
+ help="path where all the json files are located")
27
+
28
+ parser.add_argument("--output_file", type=str, default="merged_output.json",
29
+ help="filename where the merged json should go")
30
+
31
+ args = parser.parse_args()
32
+
33
+ json_path = args.json_path
34
+ out_file = args.output_file
35
+
36
+ json_files = glob.glob(json_path + '/*.json')
37
+
38
+ counter = 0
39
+
40
+ with open(out_file, 'w') as outfile:
41
+ for fname in json_files:
42
+ counter += 1
43
+
44
+ if counter % 1024 == 0:
45
+ print("Merging at ", counter, flush=True)
46
+
47
+ with open(fname, 'r') as infile:
48
+ for row in infile:
49
+ each_row = json.loads(row)
50
+ outfile.write(row)
51
+
52
+
53
+ print("Merged file", out_file, flush=True)
54
+
55
+
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .resnet import *
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/utils.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ try:
2
+ from torch.hub import load_state_dict_from_url
3
+ except ImportError:
4
+ from torch.utils.model_zoo import load_url as load_state_dict_from_url
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/batch_256.cfg ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #general param
4
+ export RESNET_SIZE=50
5
+ export IMAGENET_DIR=/root/datasets/imagenet/tf_records
6
+ export TRAIN_STEPS=999
7
+ export DISPLAY_STEPS=90000
8
+ export STEPS_PER_LOOP=90000
9
+ export USE_LARS_OPTIMIZER=1
10
+ export CPU_BIND_TYPE=cpu
11
+ export EPOCHS_BETWEEN_EVALS=4
12
+ export USE_MLPERF=1
13
+ export NO_EVAL=0
14
+ export TF_BF16_CONVERSION=1
15
+ export USE_HOROVOD=1
16
+ export DATASET_CACHE=true
17
+ export SYNTHETIC_DATA=false
18
+ export MODELING=false
19
+ export NUM_TRAIN_FILES=1024
20
+ export NUM_EVAL_FILES=256
21
+ export HOROVOD_FUSION_THRESHOLD=0
22
+ export NUM_WORKERS_PER_HLS=8
23
+ export HLS_TYPE=HLS2
24
+
25
+ #hp param
26
+ export NUM_WORKERS=8
27
+ export BATCH_SIZE=256
28
+ export TRAIN_EPOCHS=35
29
+ export LARS_DECAY_EPOCHS=36
30
+ export EVAL_OFFSET_EPOCHS=3
31
+ export WARMUP_EPOCHS=3
32
+ export BASE_LEARNING_RATE=9
33
+ export WEIGHT_DECAY=0.00005
34
+ export LR_MOMENTUM=0.9
35
+ export LABEL_SMOOTH=0.1
36
+ export STOP_THRESHOLD=0.759
37
+
38
+ unset MPI_TCP_INCLUDE
39
+ unset TRAIN_AND_EVAL
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/launch_keras_resnet_hvd.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
4
+ cd $SCRIPT_DIR/..
5
+ ../scripts/launch_keras_resnet_hvd.sh "$@"
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/list_affinity_topology_bare_metal.sh ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #Description
4
+ #This script outputs a file for each moduleID.
5
+ #These files contain the Hthread_sequence on which the process is bound too (this is a restriction and not a reservation).
6
+ #We do this by getting the mapping of (ModuleID, pcie_bus_id) from hl-smi
7
+ #Then we map the 2 tuple to a numa by opening the file
8
+ #/sys/bus/pci/devices/<pcie_bus_id >/numa_node
9
+ #Now we have a 3 tuple (ModuleID, pcie_bus_id, numa_node)
10
+ #Lastly we get the Hthread_sequence that correspond to that numa_node from lscpu so now we have
11
+ #(ModuleID, pcie_bus_id, numa_node, Hthread_sequence )
12
+ #The Hthread_sequence is then used to bind the process to the specific threads on the numa closest to the PCIE bus.
13
+
14
+ affinity_print()
15
+ {
16
+ echo "Affinity: "$1
17
+ }
18
+
19
+ hl_smi_check()
20
+ {
21
+ if [[ ! -x `which hl-smi` ]];
22
+ then
23
+ affinity_print "hl-smi could not be found, exiting"
24
+ exit 1
25
+ fi
26
+ }
27
+
28
+ check_env()
29
+ {
30
+ if [[ -z "$NUMA_MAPPING_DIR" ]];
31
+ then
32
+ affinity_print "Missing env variable \"NUMA_MAPPING_DIR\", exiting!"
33
+ exit 1
34
+ fi
35
+ }
36
+
37
+ create_temp_files()
38
+ {
39
+ # create a temp directory, mktemp is used to create a temp directory with a unique name
40
+ temp_dir=$(mktemp -d)
41
+
42
+ # create temp files for holding outputs
43
+ file_hl_smi=$temp_dir/hl_smi.txt
44
+ file_module_id=$temp_dir/module_id.txt
45
+ file_pcie_bus_id=$temp_dir/pcie_bus_id.txt
46
+ file_pcie_numa=$temp_dir/pcie_numa.txt
47
+ file_hl_smi=$temp_dir/hl_smi.txt
48
+ file_configuration_table=$temp_dir/configuration_table.txt
49
+ file_final_output=$NUMA_MAPPING_DIR/.habana_module_topo
50
+ }
51
+
52
+ create_configuartion_table()
53
+ {
54
+ # save the entire hl-smi output to file
55
+ hl-smi -L > $file_hl_smi
56
+
57
+ #check that the driver is up
58
+ if [ $? -eq 1 ]; then
59
+ affinity_print "Issue while trying to run hl-smi, aborting..."
60
+ exit 1
61
+ fi
62
+
63
+ # get the module IDs (unique identifier for each gaudi)
64
+ grep "Module ID" $file_hl_smi > $file_module_id
65
+
66
+ # get the bus IDs
67
+ grep "Bus Id" $file_hl_smi > $file_pcie_bus_id
68
+
69
+ # Get the numa for each PCIE bus
70
+ for i in `cat $file_pcie_bus_id|awk '{print $4}'`; do
71
+ numa_node=`cat /sys/bus/pci/devices/$i/numa_node`
72
+ if [ $numa_node -ge 0 ]; then
73
+ echo $numa_node >> $file_pcie_numa
74
+ else
75
+ for i in `hl-smi -L|grep "Bus Id"|awk '{print $4}'`; do affinity_print "PCIE:"$i", NUMA:"`cat /sys/bus/pci/devices/$i/numa_node`; done
76
+ affinity_print "Numa mapping isn't set properly, you are most likley running on an unsupported VM, aborting..."
77
+ exit 1
78
+ fi
79
+ done
80
+
81
+ #append output files horizontally
82
+ paste $file_module_id $file_pcie_bus_id $file_pcie_numa | awk ' {print $4,$8,$9}' | sort -k1 > $file_configuration_table
83
+ }
84
+
85
+
86
+ create_thread_list()
87
+ {
88
+ no_of_numa_nodes=`lscpu|grep "NUMA node(s):"|awk '{print $3}'`
89
+ no_of_gaudis=`cat $file_configuration_table|wc -l`
90
+ no_of_used_numa=`cat $file_pcie_numa | uniq | wc -l`
91
+
92
+
93
+ for module_id in $(seq 0 $(($no_of_gaudis-1))); do
94
+ #grab one pcieid at a time (busID)
95
+ pcie_bus_id=`cat $file_configuration_table | awk '{print $2}' | sed -n $(($module_id+1))p`
96
+
97
+ #get the corespoinding numanode (pcie_numa)
98
+ numa_node=`cat /sys/bus/pci/devices/$pcie_bus_id/numa_node`
99
+
100
+ #special barcelona configuration where two sockets are configured to be 4 virtual numa nodes
101
+ if [[ $no_of_used_numa -eq 2 && $no_of_numa_nodes -eq 4 ]]; then
102
+ #get current node (moduleID)
103
+ curr_node=`cat $file_configuration_table | awk '{print ","$3,$1}'| grep ",$numa_node" | awk '{print $2}'|head -1`
104
+ if [ $module_id -eq $curr_node ]; then
105
+ numa_node=$(($numa_node-1))
106
+ fi
107
+ fi
108
+
109
+ #get the list of threads
110
+ if [ $numa_node -ge 0 ]; then
111
+ vector=`lscpu --parse | grep ",$numa_node,,"|awk -F"," '{print $1}'`
112
+ vector=`echo $vector | tr ' ' ,`
113
+ echo $vector > $NUMA_MAPPING_DIR/.habana_moduleID$module_id
114
+ echo $vector >> $temp_dir/.module
115
+ fi
116
+ done
117
+ }
118
+
119
+
120
+ add_thread_list_to_config_table()
121
+ {
122
+ #put it all together
123
+ echo "ModID BusID NUMA CPUs: " > $file_final_output
124
+ echo "===== ===== ===== ===== " >> $file_final_output
125
+ paste $file_configuration_table $temp_dir/.module >> $file_final_output
126
+ }
127
+
128
+ clean_up()
129
+ {
130
+ #remove the temp dir
131
+ if [ ! -z "$temp_dir" ]; then
132
+ rm -fr $temp_dir
133
+ fi
134
+ }
135
+
136
+ main()
137
+ {
138
+ check_env
139
+ hl_smi_check
140
+ create_temp_files
141
+ create_configuartion_table
142
+ create_thread_list
143
+ add_thread_list_to_config_table
144
+ clean_up
145
+ affinity_print "Script finished successfully"
146
+ exit 0
147
+ }
148
+
149
+ main
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/debug.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ ###############################################################################
16
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
17
+ ###############################################################################
18
+
19
+ from absl import flags
20
+ from absl import logging
21
+ from tensorflow.core.protobuf import debug_event_pb2
22
+ from tensorflow.python.debug.lib import debug_events_writer
23
+ from tensorflow.python.framework import op_callbacks
24
+ from tensorflow.python.ops import gen_debug_ops
25
+ import tensorflow as tf
26
+ import re
27
+ import json
28
+
29
+ flags.DEFINE_string(name='dump_config', default=None, help='Defines config for tensor dumping')
30
+
31
+
32
+ class _DumpCallback(object):
33
+ def __init__(self, dump_root, tensor_debug_mode, circular_buffer_size, op_regex):
34
+ self._dump_root = dump_root
35
+ self._tensor_debug_mode = debug_event_pb2.TensorDebugMode.Value(tensor_debug_mode)
36
+ self._circular_buffer_size = circular_buffer_size
37
+ self._op_regex = re.compile(op_regex) if isinstance(op_regex, str) else op_regex
38
+ self._tfdbg_run_id = ''
39
+ self._dump_op_counter = 0
40
+
41
+ debug_writer_args = {
42
+ "dump_root" : self._dump_root,
43
+ "circular_buffer_size": self._circular_buffer_size
44
+ }
45
+
46
+ if tf.__version__.startswith("2.4"):
47
+ debug_writer_args["tfdbg_run_id"] = self._tfdbg_run_id
48
+
49
+ self._writer = debug_events_writer.DebugEventsWriter(**debug_writer_args)
50
+
51
+ def callback(self, op_type, inputs, attrs, outputs, op_name=None, graph=None):
52
+ if op_name is not None and self._op_regex.match(op_name):
53
+ graph_name = "missing-graph-name"
54
+ if graph is not None and hasattr(graph, "name"):
55
+ graph_name=graph.name
56
+
57
+ logging.info("Adding dump op for '%s' of type '%s' from graph '%s'" %(op_name, op_type, graph_name))
58
+
59
+ new_outputs = []
60
+
61
+ for output_slot, output in enumerate(outputs):
62
+ debug_identity_op_kwargs = {
63
+ "tfdbg_context_id": graph_name,
64
+ "op_name": op_name,
65
+ "output_slot": output_slot,
66
+ "tensor_debug_mode": self._tensor_debug_mode,
67
+ "debug_urls": ["file://%s" % self._dump_root],
68
+ "name": "dump_%d" % self._dump_op_counter
69
+ }
70
+
71
+ if tf.__version__.startswith("2.4"):
72
+ debug_identity_op_kwargs["circular_buffer_size"] = self._circular_buffer_size
73
+ debug_identity_op_kwargs["tfdbg_run_id"] = self._tfdbg_run_id
74
+
75
+ self._dump_op_counter = self._dump_op_counter + 1
76
+ new_outputs.append(gen_debug_ops.debug_identity_v2(output, **debug_identity_op_kwargs))
77
+
78
+ return new_outputs
79
+ else:
80
+ return None
81
+
82
+ def __enter__(self, *args, **kwargs):
83
+ op_callbacks.add_op_callback(self.callback)
84
+ logging.info("Enabled tensor dumping")
85
+
86
+ def __exit__(self, *args, **kwargs):
87
+ op_callbacks.remove_op_callback(self.callback)
88
+ logging.info("Disabled tensor dumping")
89
+
90
+ def __del__(self):
91
+ self._writer.Close()
92
+
93
+ class _Dummy(object):
94
+ def __enter__(self, *args, **kwargs):
95
+ pass
96
+ def __exit__(self, *args, **kwargs):
97
+ pass
98
+
99
+ def dump_callback(config_file=None):
100
+ if config_file is not None:
101
+ kwargs = json.load(open(config_file, 'r'))
102
+ return _DumpCallback(**kwargs)
103
+ try:
104
+ kwargs = json.load(open(flags.FLAGS.dump_config, 'r'))
105
+ return _DumpCallback(**kwargs)
106
+ except:
107
+ return _Dummy()
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/modeling/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/modeling/performance.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ==============================================================================
16
+ """Functions and classes related to training performance."""
17
+
18
+ import tensorflow as tf
19
+
20
+
21
+ def configure_optimizer(optimizer,
22
+ use_float16=False,
23
+ use_graph_rewrite=False,
24
+ loss_scale="dynamic"):
25
+ """Configures optimizer object with performance options."""
26
+ if use_float16:
27
+ # Wraps optimizer with a LossScaleOptimizer. This is done automatically
28
+ # in compile() with the "mixed_float16" policy, but since we do not call
29
+ # compile(), we must wrap the optimizer manually.
30
+ optimizer = (
31
+ tf.keras.mixed_precision.LossScaleOptimizer(
32
+ optimizer, loss_scale=loss_scale))
33
+ if use_graph_rewrite:
34
+ # Note: the model dtype must be 'float32', which will ensure
35
+ # tf.ckeras.mixed_precision and
36
+ # tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite do not double
37
+ # up.
38
+ optimizer = tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(
39
+ optimizer)
40
+ return optimizer
41
+
42
+
43
+ def set_mixed_precision_policy(dtype, loss_scale=None):
44
+ """Sets mix precision policy."""
45
+ if dtype == tf.float16:
46
+ policy = tf.keras.mixed_precision.Policy(
47
+ 'mixed_float16', loss_scale=loss_scale)
48
+ tf.keras.mixed_precision.set_global_policy(policy)
49
+ elif dtype == tf.bfloat16:
50
+ policy = tf.keras.mixed_precision.Policy(
51
+ 'mixed_bfloat16')
52
+ tf.keras.mixed_precision.set_global_policy(policy)
53
+ elif dtype == tf.float32:
54
+ tf.keras.mixed_precision.set_global_policy('float32')
55
+ else:
56
+ raise ValueError("Unexpected dtype: %s" % dtype)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/tb_utils.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import tensorflow as tf
4
+ from copy import deepcopy
5
+ from tensorboard.plugins.hparams import api as hp
6
+ from tensorflow.python.eager import context
7
+ from tensorflow.keras import backend as K
8
+ from tensorflow.python.ops import summary_ops_v2
9
+ from tensorflow.python.summary import summary as tf_summary
10
+ from tensorflow.python.training.summary_io import SummaryWriterCache
11
+ from tensorflow.compat.v1.keras.callbacks import TensorBoard, Callback
12
+
13
+
14
+ def _remove_prefix(s, prefix):
15
+ if s.startswith(prefix):
16
+ s = s[len(prefix):]
17
+ return s
18
+
19
+
20
+ def _parse_precision():
21
+ flag = os.environ.get('TF_BF16_CONVERSION', '0')
22
+ flag = flag.lower()
23
+ try:
24
+ value = int(flag)
25
+ except:
26
+ value = -1
27
+
28
+ if flag == 'false' or value == 0:
29
+ return 'fp32'
30
+ elif flag == 'true' or value == 1:
31
+ return 'bf16'
32
+ return flag
33
+
34
+
35
+ def _set_precision_if_missing(hparams: dict):
36
+ if 'precision' not in hparams:
37
+ hparams['precision'] = _parse_precision()
38
+ return hparams
39
+
40
+
41
+ def _copy_and_clean_hparams(hparams: dict):
42
+ hparams_ = dict()
43
+ for name, value in hparams.items():
44
+ if isinstance(value, (str, bool, int, float)):
45
+ hparams_[name] = value
46
+ continue
47
+
48
+ try:
49
+ hparams_[name] = str(value)
50
+ tf.compat.v1.logging.info(
51
+ f'Type of parameter "{name}" is not one of (bool, int, float, str). '
52
+ 'It will be saved as a string.')
53
+ except:
54
+ tf.compat.v1.logging.info(
55
+ f'Conversion of parameter "{name}" to string failed. '
56
+ 'Parameter will not be saved.')
57
+
58
+ return hparams_
59
+
60
+
61
+ def write_hparams_v1(writer, hparams: dict):
62
+ hparams = _copy_and_clean_hparams(hparams)
63
+ hparams = _set_precision_if_missing(hparams)
64
+
65
+ with tf.compat.v1.Graph().as_default():
66
+ if isinstance(writer, str):
67
+ writer = SummaryWriterCache.get(writer)
68
+ summary = hp.hparams_pb(hparams).SerializeToString()
69
+ writer.add_summary(summary)
70
+
71
+
72
+ def write_hparams_v2(writer, hparams: dict):
73
+ hparams = _copy_and_clean_hparams(hparams)
74
+ hparams = _set_precision_if_missing(hparams)
75
+
76
+ with writer.as_default():
77
+ hp.hparams(hparams)
78
+
79
+
80
+ class ExamplesPerSecondEstimatorHook(tf.compat.v1.train.StepCounterHook):
81
+ """Calculate and report global_step/sec and examples/sec during runtime."""
82
+ # Copy-pasted from tensorflow_estimator/python/estimator/tpu/tpu_estimator.py
83
+
84
+ def __init__(self,
85
+ batch_size=None,
86
+ every_n_steps=1,
87
+ every_n_secs=None,
88
+ output_dir=None,
89
+ summary_writer=None,
90
+ extra_metrics=None,
91
+ log_global_step=False,
92
+ verbose=False):
93
+ super().__init__(
94
+ every_n_steps=every_n_steps,
95
+ every_n_secs=every_n_secs,
96
+ output_dir=output_dir,
97
+ summary_writer=summary_writer)
98
+ self._metrics = extra_metrics or {}
99
+ self._verbose = verbose
100
+ if log_global_step:
101
+ # Because estimator will log global_step/sec by default
102
+ # when log_step_count_steps is not None saving it here
103
+ # would duplicate events in TensorBoard.
104
+ # Use log_global_step=True when RunConfig.log_step_count_step=None
105
+ self._metrics['global_step/sec'] = 1
106
+ if batch_size is not None:
107
+ self._metrics['examples/sec'] = batch_size
108
+
109
+ def _add_summary(self, tag, value, step):
110
+ Summary = tf.compat.v1.Summary
111
+ global_step_summary = Summary(value=[
112
+ Summary.Value(tag=tag, simple_value=value)
113
+ ])
114
+ self._summary_writer.add_summary(global_step_summary, step)
115
+ if self._verbose:
116
+ tf.compat.v1.logging.info(f'{tag}: {value}')
117
+
118
+ def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
119
+ global_step_per_sec = elapsed_steps / elapsed_time
120
+ if self._summary_writer is not None:
121
+ for name, factor in self._metrics.items():
122
+ value = factor * global_step_per_sec
123
+ self._add_summary(name, value, global_step)
124
+
125
+ def after_create_session(self, session, coord):
126
+ self._timer.reset()
127
+
128
+
129
+ class ExamplesPerSecondKerasHookV1(Callback):
130
+ def __init__(self,
131
+ every_n_steps=1,
132
+ every_n_secs=None,
133
+ output_dir=None,
134
+ summary_writer=None,
135
+ batch_size=None):
136
+ self.writer = summary_writer or SummaryWriterCache.get(output_dir)
137
+ self._timer = tf.compat.v1.train.SecondOrStepTimer(
138
+ every_n_secs, every_n_steps)
139
+ self._total_examples = 0
140
+ self._should_trigger = True
141
+ self._batch_size = batch_size
142
+
143
+ def on_train_begin(self, logs=None):
144
+ self._timer.reset()
145
+
146
+ def on_train_batch_begin(self, batch, logs=None):
147
+ self._should_trigger = self._timer.should_trigger_for_step(
148
+ logs.get('batch', batch))
149
+
150
+ def on_train_batch_end(self, batch, logs=None):
151
+ step = logs.get('batch', batch)
152
+ self._total_examples += logs.get('size', 0)
153
+ if self._should_trigger:
154
+ elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
155
+ step)
156
+ if elapsed_time is not None:
157
+ total_examples = self._total_examples
158
+ if self._batch_size is not None:
159
+ total_examples = self._batch_size * elapsed_steps
160
+ self._log_and_record(
161
+ elapsed_steps, elapsed_time, step, total_examples)
162
+ self._total_examples = 0
163
+
164
+ def _log_and_record(self, elapsed_steps, elapsed_time,
165
+ global_step, total_examples=None):
166
+ Summary = tf.compat.v1.Summary
167
+ global_step_per_sec = elapsed_steps / elapsed_time
168
+ if self.writer is not None:
169
+ global_step_summary = Summary(value=[
170
+ Summary.Value(
171
+ tag='global_step/sec', simple_value=global_step_per_sec)
172
+ ])
173
+ self.writer.add_summary(global_step_summary, global_step)
174
+ if total_examples is not None:
175
+ examples_per_sec = total_examples / elapsed_time
176
+ example_summary = Summary(value=[
177
+ Summary.Value(tag='examples/sec',
178
+ simple_value=examples_per_sec)
179
+ ])
180
+ self.writer.add_summary(example_summary, global_step)
181
+
182
+
183
+ class ExamplesPerSecondKerasHookV2(ExamplesPerSecondKerasHookV1):
184
+ def __init__(self,
185
+ every_n_steps=1,
186
+ every_n_secs=None,
187
+ output_dir=None,
188
+ summary_writer=None,
189
+ batch_size=None):
190
+ writer = summary_writer or summary_ops_v2.create_file_writer_v2(output_dir)
191
+ super().__init__(every_n_steps, every_n_secs, output_dir, writer, batch_size)
192
+
193
+ def _log_and_record(self, elapsed_steps, elapsed_time,
194
+ global_step, total_examples=None):
195
+ global_step_per_sec = elapsed_steps / elapsed_time
196
+ if self.writer is not None:
197
+ with self.writer.as_default(), summary_ops_v2.always_record_summaries():
198
+ summary_ops_v2.scalar('global_step/sec', global_step_per_sec,
199
+ step=global_step)
200
+ if total_examples is not None:
201
+ examples_per_sec = total_examples / elapsed_time
202
+ summary_ops_v2.scalar('examples/sec', examples_per_sec,
203
+ step=global_step)
204
+
205
+
206
+ ExamplesPerSecondKerasHook = ExamplesPerSecondKerasHookV1
207
+
208
+
209
+ class TBSummary(object):
210
+ """
211
+ Creates a proxy for FileWriter for TensorBoard.
212
+
213
+ :param log_dir: - path where experiment is running (usually the same as
214
+ model_dir in Estimator)
215
+ """
216
+
217
+ def __init__(self, log_dir: str):
218
+ super().__init__()
219
+ self._log_dir = log_dir
220
+ self._session = None
221
+
222
+ def __enter__(self):
223
+ self._session = tf.compat.v1.Session()
224
+ return self
225
+
226
+ def __exit__(self, exc_type, exc_val, exc_tb):
227
+ if self._session:
228
+ self._session.close()
229
+ self._session = None
230
+
231
+ def add_scalar(self, tag, value, global_step=None):
232
+ with self._session:
233
+ writer = SummaryWriterCache.get(self._log_dir)
234
+ summary = tf.compat.v1.Summary(
235
+ value=[tf.compat.v1.Summary.Value(tag=tag, simple_value=value)])
236
+ event = tf.compat.v1.Event(summary=summary)
237
+ event.wall_time = time.time()
238
+ event.step = global_step
239
+ writer.add_event(event)
240
+
241
+
242
+ class TensorBoardWithHParamsV1(TensorBoard):
243
+ """
244
+ Adds TensorBoard visualization to training process.
245
+
246
+ Writes training tfevent file into default log directory, but
247
+ stores evaluation in log_dir/eval subdirectory.
248
+ """
249
+
250
+ def __init__(self, hparams, *args, **kwargs):
251
+ super().__init__(*args, **kwargs)
252
+ self.hparams = hparams
253
+ self._train_summary = None
254
+ self._eval_summary = None
255
+
256
+ def _switch_writer(self, mode):
257
+ self.writer = self._train_summary if mode == 'train' else self._eval_summary
258
+
259
+ def _init_writer(self, model):
260
+ """Sets file writer."""
261
+ if context.executing_eagerly():
262
+ raise NotImplementedError('hook does not support eager execution')
263
+
264
+ self._train_summary = SummaryWriterCache.get(self.log_dir)
265
+ self._eval_summary = SummaryWriterCache.get(
266
+ os.path.join(self.log_dir, 'eval'))
267
+ self._switch_writer('train')
268
+
269
+ write_hparams_v1(self.writer, self.hparams)
270
+
271
+ def _write_custom_summaries(self, step, logs=None):
272
+ """
273
+ This methods works on the assumption that metrics containing `val`
274
+ in name are related to validation (that's the default in Keras).
275
+ """
276
+
277
+ logs = logs or {}
278
+ train_logs = {}
279
+ eval_logs = {}
280
+
281
+ for name, value in logs.items():
282
+ if 'val' in name:
283
+ if name.startswith('batch_val_'):
284
+ name = 'batch_' + _remove_prefix(name, 'batch_val_')
285
+ elif name.startswith('epoch_val_'):
286
+ name = _remove_prefix(name, 'epoch_val_')
287
+ eval_logs[name] = value
288
+ else:
289
+ if name.startswith('batch_'):
290
+ name = _remove_prefix(name, 'batch_')
291
+ train_logs[name] = value
292
+
293
+ self._switch_writer('eval')
294
+ super()._write_custom_summaries(step, eval_logs)
295
+ self._switch_writer('train')
296
+ super()._write_custom_summaries(step, train_logs)
297
+
298
+
299
+ class TensorBoardWithHParamsV2(TensorBoard):
300
+ """
301
+ Adds TensorBoard visualization to training process.
302
+
303
+ Writes training tfevent file into default log directory, but
304
+ stores evaluation in log_dir/eval subdirectory.
305
+ """
306
+
307
+ def __init__(self, hparams, *args, **kwargs):
308
+ super().__init__(*args, **kwargs)
309
+ self.hparams = hparams
310
+
311
+ def set_model(self, model):
312
+ """Sets Keras model and writes graph if specified."""
313
+ self.model = model
314
+ self._log_write_dir = self._get_log_write_dir()
315
+
316
+ self._train_dir = self._log_write_dir
317
+ self._train_step = self.model._train_counter # pylint: disable=protected-access
318
+
319
+ self._val_dir = os.path.join(self._log_write_dir, 'eval')
320
+ self._val_step = self.model._test_counter # pylint: disable=protected-access
321
+
322
+ self._writers = {} # Resets writers.
323
+
324
+ self._should_write_train_graph = False
325
+ if self.write_graph:
326
+ self._write_keras_model_summary()
327
+ self._should_write_train_graph = True
328
+ if self.embeddings_freq:
329
+ self._configure_embeddings()
330
+
331
+ write_hparams_v2(self._train_writer, self.hparams)
332
+
333
+ def _log_epoch_metrics(self, epoch, logs):
334
+ """Writes epoch metrics out as scalar summaries.
335
+
336
+ Arguments:
337
+ epoch: Int. The global step to use for TensorBoard.
338
+ logs: Dict. Keys are scalar summary names, values are scalars.
339
+ """
340
+ if not logs:
341
+ return
342
+
343
+ train_logs = {k: v for k,
344
+ v in logs.items() if not k.startswith('val_')}
345
+ val_logs = {k: v for k, v in logs.items() if k.startswith('val_')}
346
+ train_logs = self._collect_learning_rate(train_logs)
347
+
348
+ with summary_ops_v2.always_record_summaries():
349
+ if train_logs:
350
+ with self._train_writer.as_default():
351
+ for name, value in train_logs.items():
352
+ summary_ops_v2.scalar(name, value, step=epoch)
353
+ if val_logs:
354
+ with self._val_writer.as_default():
355
+ for name, value in val_logs.items():
356
+ name = name[4:] # Remove 'val_' prefix.
357
+ summary_ops_v2.scalar(name, value, step=epoch)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================