Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- docker/intel_code/llama13b/Model-References/PyTorch/examples/computer_vision/data/MNIST/raw/t10k-images-idx3-ubyte.gz +3 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/computer_vision/data/MNIST/raw/train-labels-idx1-ubyte.gz +3 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/CODEOWNERS +1 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/LICENSE +265 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/MANIFEST.in +2 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/README.md +110 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/SECURITY.md +41 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/examples/pretrain_gpt_distributed.sh +48 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/examples/pretrain_t5_distributed_with_mp.sh +48 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_bert.py +147 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_gpt.py +294 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_ict.py +167 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_t5.py +134 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_vit.py +91 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/requirements.txt +5 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/setup.py +91 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/LICENSE +204 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/bert_config.json +13 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/checkpoints/.keep +0 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/BooksDownloader.py +26 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/BookscorpusTextFormatting.py +32 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/Downloader.py +86 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/GLUEDownloader.py +46 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/GooglePretrainedWeightDownloader.py +158 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/NVIDIAPretrainedWeightDownloader.py +27 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/SquadDownloader.py +54 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/TextSharding.py +327 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/WikiDownloader.py +55 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/WikicorpusTextFormatting.py +46 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/__init__.py +12 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/bertPrep.py +363 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/create_datasets_from_start.sh +51 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/squad/squad_download.sh +73 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/file_utils.py +263 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/gpu_migration_logs/gpu_migration_5494.log +84 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/inference.py +349 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/patches/minimal_changes.diff +28 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/patches/performance_improvements.diff +108 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/patches/use_packed_dataset.diff +430 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/processors/__init__.py +0 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/processors/glue.py +325 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/requirements.txt +16 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/results/.keep +0 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/results/checkpoints/lddl_log/node-0.txt +0 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/results/checkpoints/lddl_log/node-0_local-0.txt +0 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/results/dllogger.json +0 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/run.sub +76 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/run_pretraining.py +818 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/run_squad.py +1200 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/run_swag.py +570 -0
docker/intel_code/llama13b/Model-References/PyTorch/examples/computer_vision/data/MNIST/raw/t10k-images-idx3-ubyte.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d422c7b0a1c1c79245a5bcf07fe86e33eeafee792b84584aec276f5a2dbc4e6
|
3 |
+
size 1648877
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/computer_vision/data/MNIST/raw/train-labels-idx1-ubyte.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3552534a0a558bbed6aed32b30c495cca23d567ec52cac8be1a0730e8010255c
|
3 |
+
size 28881
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/CODEOWNERS
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
* @jeffra @samyam @tjruwase @ShadenSmith @conglongli @awan-10 @cli99 @eltonzheng @minjiaz @RezaYazdaniAminabadi @duli2012 @mrwyattii @yaozhewei @arashb @xiaoxiawu-microsoft @guanhuawang
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/LICENSE
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The following applies to all files unless otherwise noted:
|
2 |
+
|
3 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Redistribution and use in source and binary forms, with or without
|
6 |
+
# modification, are permitted provided that the following conditions
|
7 |
+
# are met:
|
8 |
+
# * Redistributions of source code must retain the above copyright
|
9 |
+
# notice, this list of conditions and the following disclaimer.
|
10 |
+
# * Redistributions in binary form must reproduce the above copyright
|
11 |
+
# notice, this list of conditions and the following disclaimer in the
|
12 |
+
# documentation and/or other materials provided with the distribution.
|
13 |
+
# * Neither the name of NVIDIA CORPORATION nor the names of its
|
14 |
+
# contributors may be used to endorse or promote products derived
|
15 |
+
# from this software without specific prior written permission.
|
16 |
+
#
|
17 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
|
18 |
+
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
19 |
+
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
20 |
+
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
21 |
+
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
22 |
+
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
23 |
+
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
24 |
+
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
25 |
+
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
26 |
+
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
27 |
+
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
28 |
+
|
29 |
+
--
|
30 |
+
|
31 |
+
This repository also contains code from Hugging Face Inc., Google Research,
|
32 |
+
Facebook (from their Fairseq project), and Philip Popien. Files from these
|
33 |
+
organizations have notices at the top of each file. Below are licenses
|
34 |
+
used in those files, as indicated.
|
35 |
+
|
36 |
+
|
37 |
+
------------- LICENSE FOR huggingface and Google Research code --------------
|
38 |
+
|
39 |
+
|
40 |
+
Apache License
|
41 |
+
Version 2.0, January 2004
|
42 |
+
http://www.apache.org/licenses/
|
43 |
+
|
44 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
45 |
+
|
46 |
+
1. Definitions.
|
47 |
+
|
48 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
49 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
50 |
+
|
51 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
52 |
+
the copyright owner that is granting the License.
|
53 |
+
|
54 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
55 |
+
other entities that control, are controlled by, or are under common
|
56 |
+
control with that entity. For the purposes of this definition,
|
57 |
+
"control" means (i) the power, direct or indirect, to cause the
|
58 |
+
direction or management of such entity, whether by contract or
|
59 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
60 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
61 |
+
|
62 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
63 |
+
exercising permissions granted by this License.
|
64 |
+
|
65 |
+
"Source" form shall mean the preferred form for making modifications,
|
66 |
+
including but not limited to software source code, documentation
|
67 |
+
source, and configuration files.
|
68 |
+
|
69 |
+
"Object" form shall mean any form resulting from mechanical
|
70 |
+
transformation or translation of a Source form, including but
|
71 |
+
not limited to compiled object code, generated documentation,
|
72 |
+
and conversions to other media types.
|
73 |
+
|
74 |
+
"Work" shall mean the work of authorship, whether in Source or
|
75 |
+
Object form, made available under the License, as indicated by a
|
76 |
+
copyright notice that is included in or attached to the work
|
77 |
+
(an example is provided in the Appendix below).
|
78 |
+
|
79 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
80 |
+
form, that is based on (or derived from) the Work and for which the
|
81 |
+
editorial revisions, annotations, elaborations, or other modifications
|
82 |
+
represent, as a whole, an original work of authorship. For the purposes
|
83 |
+
of this License, Derivative Works shall not include works that remain
|
84 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
85 |
+
the Work and Derivative Works thereof.
|
86 |
+
|
87 |
+
"Contribution" shall mean any work of authorship, including
|
88 |
+
the original version of the Work and any modifications or additions
|
89 |
+
to that Work or Derivative Works thereof, that is intentionally
|
90 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
91 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
92 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
93 |
+
means any form of electronic, verbal, or written communication sent
|
94 |
+
to the Licensor or its representatives, including but not limited to
|
95 |
+
communication on electronic mailing lists, source code control systems,
|
96 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
97 |
+
Licensor for the purpose of discussing and improving the Work, but
|
98 |
+
excluding communication that is conspicuously marked or otherwise
|
99 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
100 |
+
|
101 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
102 |
+
on behalf of whom a Contribution has been received by Licensor and
|
103 |
+
subsequently incorporated within the Work.
|
104 |
+
|
105 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
106 |
+
this License, each Contributor hereby grants to You a perpetual,
|
107 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
108 |
+
copyright license to reproduce, prepare Derivative Works of,
|
109 |
+
publicly display, publicly perform, sublicense, and distribute the
|
110 |
+
Work and such Derivative Works in Source or Object form.
|
111 |
+
|
112 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
113 |
+
this License, each Contributor hereby grants to You a perpetual,
|
114 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
115 |
+
(except as stated in this section) patent license to make, have made,
|
116 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
117 |
+
where such license applies only to those patent claims licensable
|
118 |
+
by such Contributor that are necessarily infringed by their
|
119 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
120 |
+
with the Work to which such Contribution(s) was submitted. If You
|
121 |
+
institute patent litigation against any entity (including a
|
122 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
123 |
+
or a Contribution incorporated within the Work constitutes direct
|
124 |
+
or contributory patent infringement, then any patent licenses
|
125 |
+
granted to You under this License for that Work shall terminate
|
126 |
+
as of the date such litigation is filed.
|
127 |
+
|
128 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
129 |
+
Work or Derivative Works thereof in any medium, with or without
|
130 |
+
modifications, and in Source or Object form, provided that You
|
131 |
+
meet the following conditions:
|
132 |
+
|
133 |
+
(a) You must give any other recipients of the Work or
|
134 |
+
Derivative Works a copy of this License; and
|
135 |
+
|
136 |
+
(b) You must cause any modified files to carry prominent notices
|
137 |
+
stating that You changed the files; and
|
138 |
+
|
139 |
+
(c) You must retain, in the Source form of any Derivative Works
|
140 |
+
that You distribute, all copyright, patent, trademark, and
|
141 |
+
attribution notices from the Source form of the Work,
|
142 |
+
excluding those notices that do not pertain to any part of
|
143 |
+
the Derivative Works; and
|
144 |
+
|
145 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
146 |
+
distribution, then any Derivative Works that You distribute must
|
147 |
+
include a readable copy of the attribution notices contained
|
148 |
+
within such NOTICE file, excluding those notices that do not
|
149 |
+
pertain to any part of the Derivative Works, in at least one
|
150 |
+
of the following places: within a NOTICE text file distributed
|
151 |
+
as part of the Derivative Works; within the Source form or
|
152 |
+
documentation, if provided along with the Derivative Works; or,
|
153 |
+
within a display generated by the Derivative Works, if and
|
154 |
+
wherever such third-party notices normally appear. The contents
|
155 |
+
of the NOTICE file are for informational purposes only and
|
156 |
+
do not modify the License. You may add Your own attribution
|
157 |
+
notices within Derivative Works that You distribute, alongside
|
158 |
+
or as an addendum to the NOTICE text from the Work, provided
|
159 |
+
that such additional attribution notices cannot be construed
|
160 |
+
as modifying the License.
|
161 |
+
|
162 |
+
You may add Your own copyright statement to Your modifications and
|
163 |
+
may provide additional or different license terms and conditions
|
164 |
+
for use, reproduction, or distribution of Your modifications, or
|
165 |
+
for any such Derivative Works as a whole, provided Your use,
|
166 |
+
reproduction, and distribution of the Work otherwise complies with
|
167 |
+
the conditions stated in this License.
|
168 |
+
|
169 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
170 |
+
any Contribution intentionally submitted for inclusion in the Work
|
171 |
+
by You to the Licensor shall be under the terms and conditions of
|
172 |
+
this License, without any additional terms or conditions.
|
173 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
174 |
+
the terms of any separate license agreement you may have executed
|
175 |
+
with Licensor regarding such Contributions.
|
176 |
+
|
177 |
+
6. Trademarks. This License does not grant permission to use the trade
|
178 |
+
names, trademarks, service marks, or product names of the Licensor,
|
179 |
+
except as required for reasonable and customary use in describing the
|
180 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
181 |
+
|
182 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
183 |
+
agreed to in writing, Licensor provides the Work (and each
|
184 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
185 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
186 |
+
implied, including, without limitation, any warranties or conditions
|
187 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
188 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
189 |
+
appropriateness of using or redistributing the Work and assume any
|
190 |
+
risks associated with Your exercise of permissions under this License.
|
191 |
+
|
192 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
193 |
+
whether in tort (including negligence), contract, or otherwise,
|
194 |
+
unless required by applicable law (such as deliberate and grossly
|
195 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
196 |
+
liable to You for damages, including any direct, indirect, special,
|
197 |
+
incidental, or consequential damages of any character arising as a
|
198 |
+
result of this License or out of the use or inability to use the
|
199 |
+
Work (including but not limited to damages for loss of goodwill,
|
200 |
+
work stoppage, computer failure or malfunction, or any and all
|
201 |
+
other commercial damages or losses), even if such Contributor
|
202 |
+
has been advised of the possibility of such damages.
|
203 |
+
|
204 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
205 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
206 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
207 |
+
or other liability obligations and/or rights consistent with this
|
208 |
+
License. However, in accepting such obligations, You may act only
|
209 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
210 |
+
of any other Contributor, and only if You agree to indemnify,
|
211 |
+
defend, and hold each Contributor harmless for any liability
|
212 |
+
incurred by, or claims asserted against, such Contributor by reason
|
213 |
+
of your accepting any such warranty or additional liability.
|
214 |
+
|
215 |
+
END OF TERMS AND CONDITIONS
|
216 |
+
|
217 |
+
APPENDIX: How to apply the Apache License to your work.
|
218 |
+
|
219 |
+
To apply the Apache License to your work, attach the following
|
220 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
221 |
+
replaced with your own identifying information. (Don't include
|
222 |
+
the brackets!) The text should be enclosed in the appropriate
|
223 |
+
comment syntax for the file format. We also recommend that a
|
224 |
+
file or class name and description of purpose be included on the
|
225 |
+
same "printed page" as the copyright notice for easier
|
226 |
+
identification within third-party archives.
|
227 |
+
|
228 |
+
Copyright [yyyy] [name of copyright owner]
|
229 |
+
|
230 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
231 |
+
you may not use this file except in compliance with the License.
|
232 |
+
You may obtain a copy of the License at
|
233 |
+
|
234 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
235 |
+
|
236 |
+
Unless required by applicable law or agreed to in writing, software
|
237 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
238 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
239 |
+
See the License for the specific language governing permissions and
|
240 |
+
limitations under the License.
|
241 |
+
|
242 |
+
------------- LICENSE FOR Facebook Fairseq code --------------
|
243 |
+
|
244 |
+
MIT License
|
245 |
+
|
246 |
+
Copyright (c) Facebook, Inc. and its affiliates.
|
247 |
+
|
248 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
249 |
+
of this software and associated documentation files (the "Software"), to deal
|
250 |
+
in the Software without restriction, including without limitation the rights
|
251 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
252 |
+
copies of the Software, and to permit persons to whom the Software is
|
253 |
+
furnished to do so, subject to the following conditions:
|
254 |
+
|
255 |
+
The above copyright notice and this permission notice shall be included in all
|
256 |
+
copies or substantial portions of the Software.
|
257 |
+
|
258 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
259 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
260 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
261 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
262 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
263 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
264 |
+
SOFTWARE.
|
265 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/MANIFEST.in
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
include megatron/data/Makefile
|
2 |
+
include megatron/data/helpers.cpp
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/README.md
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Megatron-DeepSpeed BLOOM for GPU Migration Toolkit
|
2 |
+
This directory provides scripts for training large transformer language models such as Bloom at scale and is tested and maintained by Habana.
|
3 |
+
|
4 |
+
The model has been enabled using an experimental feature called GPU Migration Toolkit. For more details, refer to [GPU Migration Toolkit documentation](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Model_Porting/GPU_Migration_Toolkit/GPU_Migration_Toolkit.html). NOTE: You can review the [BLOOM model](https://github.com/HabanaAI/Model-References/tree/master/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed) enabled with a more traditional approach.
|
5 |
+
|
6 |
+
For more information on training and inference of deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/). To obtain model performance data, refer to the [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-models-performance/#performance).
|
7 |
+
|
8 |
+
## Table of Contents
|
9 |
+
* [Model References](https://github.com/HabanaAI/Model-References/blob/master/README.md)
|
10 |
+
* [Model Overview](#model-overview)
|
11 |
+
* [Setup](#setup)
|
12 |
+
* [Training and Examples](#training-and-examples)
|
13 |
+
* [Enabling the Model from scratch](#enabling-the-model-from-scratch)
|
14 |
+
* [GPU Migration Toolkit Logs](#gpu-migration-logs)
|
15 |
+
|
16 |
+
## Model Overview
|
17 |
+
This implementation is based on https://github.com/microsoft/Megatron-DeepSpeed at 0c58dbb. Megatron ([1](https://arxiv.org/pdf/1909.08053.pdf) and [2](https://arxiv.org/pdf/2104.04473.pdf) is a large, powerful transformer developed by the Applied Deep Learning Research team at NVIDIA. Codebase is capable of efficiently training very large (hundreds of billions of parameters) language models with both model and data parallelism.
|
18 |
+
|
19 |
+
Enabling model functionality is made easy by GPU Migration Toolkit. While some performance optimizations are usually still required, GPU Migration Toolkit handles several steps required.
|
20 |
+
|
21 |
+
The following is a list of the different advantages of using GPU Migration Toolkit:
|
22 |
+
- Re-checking the device type in multiple places is not required.
|
23 |
+
- Passing additional ‘device’ parameters to scripts and objects is not required.
|
24 |
+
|
25 |
+
For further details, refer to [Enabling the Model from Scratch](#enabling-the-model-from-scratch).
|
26 |
+
|
27 |
+
### How to use
|
28 |
+
Users acknowledge and understand that the models referenced by Habana are mere examples for models that can be run on Gaudi. Users bear sole liability and responsibility to follow and comply with any third party licenses pertaining to such models, and Habana Labs disclaims and will bear no any warranty or liability with respect to users' use or compliance with such third party licenses.
|
29 |
+
|
30 |
+
## Setup
|
31 |
+
Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) to set up the environment including the $PYTHON environment variable.
|
32 |
+
To achieve the best performance, please follow the methods outlined in the Optimizing Training Platform guide. The guides will walk you through the process of setting up your system to run the model on Gaudi2.
|
33 |
+
|
34 |
+
### Clone Habana Model-References
|
35 |
+
In the docker container, clone this repository and switch to the branch that matches your SynapseAI version.
|
36 |
+
You can run the [`hl-smi`](https://docs.habana.ai/en/latest/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) utility to determine the SynapseAI version.
|
37 |
+
|
38 |
+
```bash
|
39 |
+
git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References
|
40 |
+
```
|
41 |
+
|
42 |
+
For convenience, export a MODEL_REFERENCES_PATH & PYTHONPATH environment variable:
|
43 |
+
```bash
|
44 |
+
export MODEL_REFERENCES_ROOT=/path/to/Model-References
|
45 |
+
```
|
46 |
+
|
47 |
+
### Install Model Requirements
|
48 |
+
- In the docker container, go to the model directory:
|
49 |
+
```bash
|
50 |
+
cd Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed
|
51 |
+
```
|
52 |
+
|
53 |
+
- Install the required packages using pip:
|
54 |
+
```bash
|
55 |
+
pip install -r requirements.txt
|
56 |
+
```
|
57 |
+
### Install Habana DeepSpeed-fork
|
58 |
+
Please follow the instructions provided in the [DeepSpeed Installation Guide](https://docs.habana.ai/en/latest/PyTorch/DeepSpeed/Getting_Started_with_DeepSpeed/Getting_Started_with_DeepSpeed.html) to install deepspeed-fork.
|
59 |
+
|
60 |
+
### Install Apex
|
61 |
+
Please follow the instructions provided [here](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Model_Porting/GPU_Migration_Toolkit/GPU_Migration_Toolkit.html#limitations) to install Apex.
|
62 |
+
|
63 |
+
### Dataset Preparation
|
64 |
+
Follow the instructions in https://github.com/bigscience-workshop/bigscience/tree/master/data/oscar to download oscar-en full dataset. Note that the dataset takes around 550G of disk space.
|
65 |
+
|
66 |
+
## Training and Examples
|
67 |
+
Bloom13B model training is based on https://github.com/bigscience-workshop/bigscience/blob/master/train/tr1-13B-base/tr1-13B-round1.slurm.
|
68 |
+
|
69 |
+
### Multi-Card Training Examples
|
70 |
+
- Update data root directory with a path of your choice:
|
71 |
+
```bash
|
72 |
+
export HL_DATA_DIR_ROOT=/data/bigscience/oscar-en
|
73 |
+
```
|
74 |
+
- Run BLOOM on 8 HPUs with BF16 precision. Make sure to change the IP addresses in hostsfile according to your setup.
|
75 |
+
```bash
|
76 |
+
HL_HOSTSFILE=scripts/hostsfile HL_NUM_NODES=1 HL_PP=2 HL_TP=4 HL_DP=1 scripts/run_bloom13b.sh
|
77 |
+
```
|
78 |
+
## Enabling the Model from scratch
|
79 |
+
Habana provides scripts ready-to-use on Gaudi. Listed below are the steps to enable the model from a reference source.
|
80 |
+
This section outlines the overall procedure for enabling any given model with GPU Migration Toolkit feature. However, model-specific modifications will be required to enable the functionality and improve performance.
|
81 |
+
|
82 |
+
1. Clone the original GitHub repository and reset it to the commit this example is based on.
|
83 |
+
```bash
|
84 |
+
git clone https://github.com/microsoft/Megatron-DeepSpeed.git && git checkout 0c58dbb
|
85 |
+
```
|
86 |
+
|
87 |
+
2. Navigate to Megatron-Deepspeed subfolder and install requirements:
|
88 |
+
```bash
|
89 |
+
pip install -r requirements.txt
|
90 |
+
```
|
91 |
+
|
92 |
+
3. Apply a set of patches. You can stop at any patch to see which steps have been performed to reach a particular level of functionality and performance.
|
93 |
+
The first patch adds the bare minimum to run the model on HPU. For purely functional changes (without performance optimization), run the following command:
|
94 |
+
```bash
|
95 |
+
git apply Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/patches/functional_changes.diff
|
96 |
+
```
|
97 |
+
First patch adds:
|
98 |
+
- GPU Migration Toolkit package import in main script (pretrain_gpt.py).
|
99 |
+
- Since HPU does not support CUDA kernels, there is no requirement to compile the kernels associated with CUDA (`megatron/initialize.py`).
|
100 |
+
- Remove call to ds_report() which uses 3rd party calls to nvcc (`pretrain_gpt.py`).
|
101 |
+
- HPU does not support fused_layer_norm_cuda (as explained above), therefore LayerNorm from Apex is used instead (It is eventually overwritten to torch.optim.LayerNorm by GPU Migration Toolkit) (`megatron/model/__init__.py`).
|
102 |
+
- HPU supports BF16 data type (For this particular topology, mixed precision support directly comes from [Habana's DeepSpeed](https://docs.habana.ai/en/latest/PyTorch/DeepSpeed/DeepSpeed_User_Guide/DeepSpeed_User_Guide.html)). BF16 offers FP32-like dynamic range and loss scaling is not required in [BF16 mixed precision training](https://arxiv.org/pdf/1905.12322.pdf). Hence, cur_scale attribute is not available for BF16 Optimizer (`megatron/training.py`).
|
103 |
+
- A script for running the Bloom model. Based on https://github.com/bigscience-workshop/bigscience/blob/master/train/tr1-13B-base/tr1-13B-round1.slurm (`scripts/run_bloom13b.sh`).
|
104 |
+
|
105 |
+
4. To improve the performance, apply the patch (which sets skip_bias_add argument to False for mpu.ColumnParallelLinear & mpu.RowParallelLinear)
|
106 |
+
```bash
|
107 |
+
git apply Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/patches/performance_patch_1.diff
|
108 |
+
```
|
109 |
+
## GPU Migration Toolkit Logs
|
110 |
+
You can review GPU Migration Toolkit logs under `gpu_migration_logs/gpu_migration_424488.log`. For further information, refer to [GPU Migration Toolkit documentation](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Model_Porting/GPU_Migration_Toolkit/GPU_Migration_Toolkit.html#enabling-logging-feature).
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/SECURITY.md
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
|
2 |
+
|
3 |
+
## Security
|
4 |
+
|
5 |
+
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
6 |
+
|
7 |
+
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
8 |
+
|
9 |
+
## Reporting Security Issues
|
10 |
+
|
11 |
+
**Please do not report security vulnerabilities through public GitHub issues.**
|
12 |
+
|
13 |
+
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
14 |
+
|
15 |
+
If you prefer to submit without logging in, send email to [[email protected]](mailto:[email protected]). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
16 |
+
|
17 |
+
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
18 |
+
|
19 |
+
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
20 |
+
|
21 |
+
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
22 |
+
* Full paths of source file(s) related to the manifestation of the issue
|
23 |
+
* The location of the affected source code (tag/branch/commit or direct URL)
|
24 |
+
* Any special configuration required to reproduce the issue
|
25 |
+
* Step-by-step instructions to reproduce the issue
|
26 |
+
* Proof-of-concept or exploit code (if possible)
|
27 |
+
* Impact of the issue, including how an attacker might exploit the issue
|
28 |
+
|
29 |
+
This information will help us triage your report more quickly.
|
30 |
+
|
31 |
+
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
32 |
+
|
33 |
+
## Preferred Languages
|
34 |
+
|
35 |
+
We prefer all communications to be in English.
|
36 |
+
|
37 |
+
## Policy
|
38 |
+
|
39 |
+
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
40 |
+
|
41 |
+
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/examples/pretrain_gpt_distributed.sh
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /bin/bash
|
2 |
+
|
3 |
+
# Runs the "345M" parameter model
|
4 |
+
|
5 |
+
GPUS_PER_NODE=8
|
6 |
+
# Change for multinode config
|
7 |
+
MASTER_ADDR=localhost
|
8 |
+
MASTER_PORT=6000
|
9 |
+
NNODES=1
|
10 |
+
NODE_RANK=0
|
11 |
+
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
|
12 |
+
|
13 |
+
DATA_PATH=<Specify path and file prefix>_text_document
|
14 |
+
CHECKPOINT_PATH=<Specify path>
|
15 |
+
|
16 |
+
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
|
17 |
+
|
18 |
+
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
|
19 |
+
pretrain_gpt.py \
|
20 |
+
--num-layers 24 \
|
21 |
+
--hidden-size 1024 \
|
22 |
+
--num-attention-heads 16 \
|
23 |
+
--micro-batch-size 8 \
|
24 |
+
--global-batch-size 64 \
|
25 |
+
--seq-length 1024 \
|
26 |
+
--max-position-embeddings 1024 \
|
27 |
+
--train-iters 500000 \
|
28 |
+
--lr-decay-iters 320000 \
|
29 |
+
--save $CHECKPOINT_PATH \
|
30 |
+
--load $CHECKPOINT_PATH \
|
31 |
+
--data-path $DATA_PATH \
|
32 |
+
--vocab-file gpt2-vocab.json \
|
33 |
+
--merge-file gpt2-merges.txt \
|
34 |
+
--data-impl mmap \
|
35 |
+
--split 949,50,1 \
|
36 |
+
--distributed-backend nccl \
|
37 |
+
--lr 0.00015 \
|
38 |
+
--lr-decay-style cosine \
|
39 |
+
--min-lr 1.0e-5 \
|
40 |
+
--weight-decay 1e-2 \
|
41 |
+
--clip-grad 1.0 \
|
42 |
+
--lr-warmup-fraction .01 \
|
43 |
+
--checkpoint-activations \
|
44 |
+
--log-interval 100 \
|
45 |
+
--save-interval 10000 \
|
46 |
+
--eval-interval 1000 \
|
47 |
+
--eval-iters 10 \
|
48 |
+
--fp16
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/examples/pretrain_t5_distributed_with_mp.sh
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
GPUS_PER_NODE=8
|
4 |
+
# Change for multinode config
|
5 |
+
MASTER_ADDR=localhost
|
6 |
+
MASTER_PORT=6000
|
7 |
+
NNODES=1
|
8 |
+
NODE_RANK=0
|
9 |
+
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
|
10 |
+
|
11 |
+
DATA_PATH=<Specify path and file prefix>
|
12 |
+
CHECKPOINT_PATH=<Specify path>
|
13 |
+
|
14 |
+
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
|
15 |
+
|
16 |
+
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
|
17 |
+
pretrain_t5.py \
|
18 |
+
--tensor-model-parallel-size 2 \
|
19 |
+
--num-layers 12 \
|
20 |
+
--hidden-size 768 \
|
21 |
+
--num-attention-heads 12 \
|
22 |
+
--kv-channels 64 \
|
23 |
+
--ffn-hidden-size 3072 \
|
24 |
+
--encoder-seq-length 512 \
|
25 |
+
--decoder-seq-length 128 \
|
26 |
+
--micro-batch-size 16 \
|
27 |
+
--global-batch-size 2048 \
|
28 |
+
--seq-length 512 \
|
29 |
+
--max-position-embeddings 512 \
|
30 |
+
--train-iters 1000000 \
|
31 |
+
--lr-decay-iters 1000000 \
|
32 |
+
--save $CHECKPOINT_PATH \
|
33 |
+
--load $CHECKPOINT_PATH \
|
34 |
+
--data-path $DATA_PATH \
|
35 |
+
--vocab-file t5-vocab.txt \
|
36 |
+
--data-impl mmap \
|
37 |
+
--split 949,50,1 \
|
38 |
+
--lr 0.0001 \
|
39 |
+
--min-lr 0.00001 \
|
40 |
+
--lr-decay-style linear \
|
41 |
+
--lr-warmup-fraction .01 \
|
42 |
+
--weight-decay 1e-2 \
|
43 |
+
--clip-grad 1.0 \
|
44 |
+
--log-interval 100 \
|
45 |
+
--save-interval 10000 \
|
46 |
+
--eval-interval 1000 \
|
47 |
+
--eval-iters 10 \
|
48 |
+
--fp16
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_bert.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""Pretrain BERT"""
|
17 |
+
|
18 |
+
from functools import partial
|
19 |
+
|
20 |
+
import torch
|
21 |
+
import torch.nn.functional as F
|
22 |
+
|
23 |
+
from megatron import get_args
|
24 |
+
from megatron import print_rank_0
|
25 |
+
from megatron import get_timers
|
26 |
+
from megatron import mpu
|
27 |
+
from megatron.data.dataset_utils import build_train_valid_test_datasets
|
28 |
+
from megatron.model import BertModel
|
29 |
+
from megatron.training import pretrain
|
30 |
+
from megatron.utils import average_losses_across_data_parallel_group
|
31 |
+
|
32 |
+
|
33 |
+
def model_provider(pre_process=True, post_process=True):
|
34 |
+
"""Build the model."""
|
35 |
+
|
36 |
+
print_rank_0('building BERT model ...')
|
37 |
+
|
38 |
+
args = get_args()
|
39 |
+
num_tokentypes = 2 if args.bert_binary_head else 0
|
40 |
+
model = BertModel(
|
41 |
+
num_tokentypes=num_tokentypes,
|
42 |
+
add_binary_head=args.bert_binary_head,
|
43 |
+
parallel_output=True,
|
44 |
+
pre_process=pre_process,
|
45 |
+
post_process=post_process)
|
46 |
+
|
47 |
+
return model
|
48 |
+
|
49 |
+
|
50 |
+
def get_batch(data_iterator):
|
51 |
+
"""Build the batch."""
|
52 |
+
|
53 |
+
# Items and their type.
|
54 |
+
keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask']
|
55 |
+
datatype = torch.int64
|
56 |
+
|
57 |
+
# Broadcast data.
|
58 |
+
if data_iterator is not None:
|
59 |
+
data = next(data_iterator)
|
60 |
+
else:
|
61 |
+
data = None
|
62 |
+
data_b = mpu.broadcast_data(keys, data, datatype)
|
63 |
+
|
64 |
+
# Unpack.
|
65 |
+
tokens = data_b['text'].long()
|
66 |
+
types = data_b['types'].long()
|
67 |
+
sentence_order = data_b['is_random'].long()
|
68 |
+
loss_mask = data_b['loss_mask'].float()
|
69 |
+
lm_labels = data_b['labels'].long()
|
70 |
+
padding_mask = data_b['padding_mask'].long()
|
71 |
+
|
72 |
+
return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
|
73 |
+
|
74 |
+
|
75 |
+
def loss_func(loss_mask, sentence_order, output_tensor):
|
76 |
+
lm_loss_, sop_logits = output_tensor
|
77 |
+
|
78 |
+
lm_loss_ = lm_loss_.float()
|
79 |
+
loss_mask = loss_mask.float()
|
80 |
+
lm_loss = torch.sum(
|
81 |
+
lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
|
82 |
+
|
83 |
+
if sop_logits is not None:
|
84 |
+
sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(),
|
85 |
+
sentence_order.view(-1),
|
86 |
+
ignore_index=-1)
|
87 |
+
sop_loss = sop_loss.float()
|
88 |
+
loss = lm_loss + sop_loss
|
89 |
+
averaged_losses = average_losses_across_data_parallel_group(
|
90 |
+
[lm_loss, sop_loss])
|
91 |
+
return loss, {'lm loss': averaged_losses[0],
|
92 |
+
'sop loss': averaged_losses[1]}
|
93 |
+
|
94 |
+
else:
|
95 |
+
loss = lm_loss
|
96 |
+
averaged_losses = average_losses_across_data_parallel_group(
|
97 |
+
[lm_loss])
|
98 |
+
return loss, {'lm loss': averaged_losses[0]}
|
99 |
+
|
100 |
+
|
101 |
+
def forward_step(data_iterator, model):
|
102 |
+
"""Forward step."""
|
103 |
+
args = get_args()
|
104 |
+
timers = get_timers()
|
105 |
+
|
106 |
+
# Get the batch.
|
107 |
+
timers('batch-generator').start()
|
108 |
+
tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch(
|
109 |
+
data_iterator)
|
110 |
+
timers('batch-generator').stop()
|
111 |
+
|
112 |
+
if not args.bert_binary_head:
|
113 |
+
types = None
|
114 |
+
|
115 |
+
# Forward pass through the model.
|
116 |
+
output_tensor = model(tokens, padding_mask, tokentype_ids=types,
|
117 |
+
lm_labels=lm_labels)
|
118 |
+
|
119 |
+
return output_tensor, partial(loss_func, loss_mask, sentence_order)
|
120 |
+
|
121 |
+
|
122 |
+
def train_valid_test_datasets_provider(train_val_test_num_samples):
|
123 |
+
"""Build train, valid, and test datasets."""
|
124 |
+
args = get_args()
|
125 |
+
|
126 |
+
print_rank_0('> building train, validation, and test datasets '
|
127 |
+
'for BERT ...')
|
128 |
+
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
|
129 |
+
data_prefix=args.data_path,
|
130 |
+
data_impl=args.data_impl,
|
131 |
+
splits_string=args.split,
|
132 |
+
train_valid_test_num_samples=train_val_test_num_samples,
|
133 |
+
max_seq_length=args.seq_length,
|
134 |
+
masked_lm_prob=args.mask_prob,
|
135 |
+
short_seq_prob=args.short_seq_prob,
|
136 |
+
seed=args.seed,
|
137 |
+
skip_warmup=(not args.mmap_warmup),
|
138 |
+
binary_head=args.bert_binary_head)
|
139 |
+
print_rank_0("> finished creating BERT datasets ...")
|
140 |
+
|
141 |
+
return train_ds, valid_ds, test_ds
|
142 |
+
|
143 |
+
|
144 |
+
if __name__ == "__main__":
|
145 |
+
|
146 |
+
pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
|
147 |
+
args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_gpt.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""Pretrain GPT"""
|
17 |
+
import habana_frameworks.torch.gpu_migration
|
18 |
+
|
19 |
+
import torch
|
20 |
+
from functools import partial
|
21 |
+
from megatron import get_args
|
22 |
+
from megatron import print_rank_0
|
23 |
+
from megatron import get_timers
|
24 |
+
from megatron import get_tokenizer
|
25 |
+
from megatron import mpu
|
26 |
+
from megatron.data.gpt_dataset import build_train_valid_test_datasets
|
27 |
+
from megatron.model import GPTModel, GPTModelPipe
|
28 |
+
from megatron.training import pretrain
|
29 |
+
from megatron.utils import get_ltor_masks_and_position_ids
|
30 |
+
from megatron.utils import average_losses_across_data_parallel_group
|
31 |
+
|
32 |
+
import deepspeed
|
33 |
+
from deepspeed.runtime.utils import see_memory_usage
|
34 |
+
import os
|
35 |
+
import subprocess
|
36 |
+
|
37 |
+
from torch import nn
|
38 |
+
import torch.nn.functional as F
|
39 |
+
|
40 |
+
def model_provider(pre_process=True, post_process=True):
|
41 |
+
"""Build the model."""
|
42 |
+
|
43 |
+
print_rank_0('building GPT model ...')
|
44 |
+
see_memory_usage(f"Before Building Model", force=True)
|
45 |
+
|
46 |
+
args = get_args()
|
47 |
+
with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(),
|
48 |
+
remote_device=None if args.remote_device == 'none' else args.remote_device,
|
49 |
+
config_dict_or_path=args.deepspeed_config,
|
50 |
+
enabled=args.zero_stage == 3,
|
51 |
+
mpu=mpu):
|
52 |
+
if args.deepspeed and not args.no_pipeline_parallel:
|
53 |
+
model = GPTModelPipe(
|
54 |
+
num_tokentypes=0,
|
55 |
+
parallel_output=True
|
56 |
+
)
|
57 |
+
# This is a hack to give us a reference to get_batch_pipe from within training.py
|
58 |
+
# We need to call model.set_batch_fn after deepspeed.initialize
|
59 |
+
model._megatron_batch_fn = get_batch_pipe
|
60 |
+
|
61 |
+
# Predompute the attention mask and store it in args. This avoids having to
|
62 |
+
# pipeline it as an activation during training. The mask is constant, and thus
|
63 |
+
# we can reuse it.
|
64 |
+
attention_mask = torch.tril(torch.ones(
|
65 |
+
(1, args.seq_length, args.seq_length), device=torch.cuda.current_device())).view(
|
66 |
+
1, 1, args.seq_length, args.seq_length)
|
67 |
+
|
68 |
+
# Convert attention mask to binary:
|
69 |
+
attention_mask = (attention_mask < 0.5)
|
70 |
+
if args.fp16:
|
71 |
+
attention_mask = attention_mask.half()
|
72 |
+
elif args.bf16:
|
73 |
+
attention_mask = attention_mask.bfloat16()
|
74 |
+
|
75 |
+
# Attention mask must be bool.
|
76 |
+
args.attn_mask = attention_mask.to(torch.bool)
|
77 |
+
|
78 |
+
else:
|
79 |
+
model = GPTModel(
|
80 |
+
num_tokentypes=0,
|
81 |
+
parallel_output=True,
|
82 |
+
pre_process=pre_process,
|
83 |
+
post_process=post_process
|
84 |
+
)
|
85 |
+
see_memory_usage(f"After Building Model", force=True)
|
86 |
+
return model
|
87 |
+
|
88 |
+
|
89 |
+
def get_batch(data_iterator):
|
90 |
+
"""Generate a batch"""
|
91 |
+
args = get_args()
|
92 |
+
tokenizer = get_tokenizer()
|
93 |
+
|
94 |
+
# Items and their type.
|
95 |
+
keys = ['text']
|
96 |
+
datatype = torch.int64
|
97 |
+
|
98 |
+
# Broadcast data.
|
99 |
+
if data_iterator is not None:
|
100 |
+
data = next(data_iterator)
|
101 |
+
else:
|
102 |
+
data = None
|
103 |
+
data_b = mpu.broadcast_data(keys, data, datatype)
|
104 |
+
|
105 |
+
# Unpack.
|
106 |
+
tokens_ = data_b['text'].long()
|
107 |
+
labels = tokens_[:, 1:].contiguous()
|
108 |
+
tokens = tokens_[:, :-1].contiguous()
|
109 |
+
|
110 |
+
# Get the masks and postition ids.
|
111 |
+
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
|
112 |
+
tokens,
|
113 |
+
tokenizer.eod,
|
114 |
+
args.reset_position_ids,
|
115 |
+
args.reset_attention_mask,
|
116 |
+
args.eod_mask_loss)
|
117 |
+
|
118 |
+
return tokens, labels, loss_mask, attention_mask, position_ids
|
119 |
+
|
120 |
+
|
121 |
+
def get_batch_pipe(data):
|
122 |
+
"""Modification of `get_batch` to work on `next(data_iterator)` instead of `data_iterator`"""
|
123 |
+
args = get_args()
|
124 |
+
tokenizer = get_tokenizer()
|
125 |
+
|
126 |
+
# Items and their type.
|
127 |
+
keys = ['text']
|
128 |
+
datatype = torch.int64
|
129 |
+
|
130 |
+
# Broadcast data.
|
131 |
+
data_b = mpu.broadcast_data(keys, data, datatype)
|
132 |
+
|
133 |
+
# Unpack.
|
134 |
+
tokens_ = data_b['text'].long()
|
135 |
+
labels = tokens_[:, 1:].contiguous()
|
136 |
+
tokens = tokens_[:, :-1].contiguous()
|
137 |
+
|
138 |
+
# Get the masks and postition ids.
|
139 |
+
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
|
140 |
+
tokens,
|
141 |
+
tokenizer.eod,
|
142 |
+
args.reset_position_ids,
|
143 |
+
args.reset_attention_mask,
|
144 |
+
args.eod_mask_loss)
|
145 |
+
if args.curriculum_learning and args.curriculum_seqlen < tokens.size()[1]:
|
146 |
+
# seqlen-based curriculum learning
|
147 |
+
# tokens, position_ids, labels, loss_mask have size [batch size, seqlen]
|
148 |
+
tokens = tokens[:, :args.curriculum_seqlen].contiguous()
|
149 |
+
position_ids = position_ids[:, :args.curriculum_seqlen].contiguous()
|
150 |
+
if labels is not None:
|
151 |
+
labels = labels[:, :args.curriculum_seqlen].contiguous()
|
152 |
+
loss_mask = loss_mask[:, :args.curriculum_seqlen].contiguous()
|
153 |
+
|
154 |
+
return (tokens, position_ids, attention_mask), (labels, loss_mask)
|
155 |
+
|
156 |
+
|
157 |
+
def loss_func(loss_mask, moe_loss, mos_loss, output_tensor):
|
158 |
+
args = get_args()
|
159 |
+
losses = output_tensor.float()
|
160 |
+
loss_mask = loss_mask.view(-1).float()
|
161 |
+
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
|
162 |
+
|
163 |
+
# Reduce loss for logging.
|
164 |
+
averaged_loss = average_losses_across_data_parallel_group([loss])
|
165 |
+
if args.mos or args.kd:
|
166 |
+
# assert max(args.num_experts) >= 1
|
167 |
+
loss = loss + moe_loss + mos_loss
|
168 |
+
if args.mos:
|
169 |
+
return loss, {'total loss': loss, 'lm loss': averaged_loss[0], 'moe loss': moe_loss, 'mos loss': mos_loss}
|
170 |
+
elif args.kd:
|
171 |
+
return loss, {'total loss': loss, 'lm loss': averaged_loss[0], 'moe loss': moe_loss, 'kd loss': mos_loss}
|
172 |
+
print_rank_0('>>> total loss: {}, lm loss {}, kd loss {}'.format(loss, averaged_loss[0], mos_loss))
|
173 |
+
else:
|
174 |
+
if max(args.num_experts) <= 1:
|
175 |
+
return loss, {'lm loss': averaged_loss[0]}
|
176 |
+
else:
|
177 |
+
loss = loss + moe_loss
|
178 |
+
return loss, {'lm loss': averaged_loss[0], 'moe loss': moe_loss}
|
179 |
+
|
180 |
+
def calculate_mos_loss(args, stu_output, teacher_model, tokens, position_ids, attention_mask):
|
181 |
+
mos_loss = 0
|
182 |
+
alpha = args.kd_alpha_ce
|
183 |
+
beta = args.kd_beta_ce
|
184 |
+
kd_temp = args.kd_temp
|
185 |
+
|
186 |
+
if teacher_model:
|
187 |
+
with torch.no_grad():
|
188 |
+
if args.curriculum_learning and args.curriculum_seqlen < args.seq_length:
|
189 |
+
assert args.curriculum_seqlen is not None
|
190 |
+
curriculum_seqlen = args.curriculum_seqlen
|
191 |
+
tokens = tokens[:, :curriculum_seqlen].contiguous()
|
192 |
+
position_ids = position_ids[:, :curriculum_seqlen].contiguous()
|
193 |
+
attention_mask = attention_mask[:, :, :curriculum_seqlen, :curriculum_seqlen].contiguous()
|
194 |
+
# No need to truncate labels as we do not need it for the teacher logits
|
195 |
+
tea_output, *tea_other_losses = teacher_model(tokens, position_ids, attention_mask)
|
196 |
+
assert stu_output.size() == tea_output.size(), 'teacher and student output should match in size. Student: {}, Teacher: {}, CL seq length {}'.format(stu_output.size(), tea_output.size(), args.curriculum_seqlen)
|
197 |
+
|
198 |
+
student_logits = F.log_softmax(stu_output / kd_temp, dim=2)
|
199 |
+
tea_logits = F.softmax(tea_output / kd_temp, dim=2) # The target logits is expected to be probabilities. If we use log_softmax, then we need to set target_log to true when initializing the KLDivLoss.
|
200 |
+
|
201 |
+
mos_loss = kd_temp * kd_temp * nn.KLDivLoss(reduction='batchmean')(student_logits, tea_logits)
|
202 |
+
|
203 |
+
mos_loss = mos_loss.div(args.seq_length) * beta
|
204 |
+
return mos_loss
|
205 |
+
|
206 |
+
def forward_step(data_iterator, model, teacher_model=None):
|
207 |
+
"""Forward step."""
|
208 |
+
args = get_args()
|
209 |
+
timers = get_timers()
|
210 |
+
|
211 |
+
# Get the batch.
|
212 |
+
timers('batch-generator').start()
|
213 |
+
tokens, labels, loss_mask, attention_mask, position_ids = get_batch(
|
214 |
+
data_iterator)
|
215 |
+
timers('batch-generator').stop()
|
216 |
+
|
217 |
+
if args.mos or args.kd:
|
218 |
+
# The forward func can return either the loss or the logits, depending on whether passing in the labels or not.
|
219 |
+
stu_output, *other_losses = model(tokens, position_ids, attention_mask)
|
220 |
+
if args.curriculum_learning and args.curriculum_seqlen < args.seq_length:
|
221 |
+
assert args.curriculum_seqlen is not None
|
222 |
+
labels = labels[:, :args.curriculum_seqlen].contiguous()
|
223 |
+
output_tensor = mpu.vocab_parallel_cross_entropy(stu_output.contiguous().float(), labels)
|
224 |
+
else:
|
225 |
+
output_tensor, *other_losses = model(tokens, position_ids, attention_mask,
|
226 |
+
labels=labels)
|
227 |
+
if args.curriculum_learning and args.curriculum_seqlen < args.seq_length:
|
228 |
+
loss_mask = loss_mask[:, :args.curriculum_seqlen].contiguous()
|
229 |
+
|
230 |
+
moe_losses = []
|
231 |
+
for moe_loss in other_losses:
|
232 |
+
if moe_loss is not None:
|
233 |
+
moe_losses.append(moe_loss)
|
234 |
+
moe_loss = sum(moe_losses) * args.moe_loss_coeff
|
235 |
+
|
236 |
+
mos_loss = 0
|
237 |
+
if args.mos or args.kd:
|
238 |
+
assert model.training
|
239 |
+
mos_loss = calculate_mos_loss(args, stu_output, teacher_model, tokens, position_ids, attention_mask)
|
240 |
+
|
241 |
+
# Output_tensor stores the standard loss, loos_func calculates the total loss.
|
242 |
+
return output_tensor, partial(loss_func, loss_mask, moe_loss, mos_loss)
|
243 |
+
|
244 |
+
|
245 |
+
def train_valid_test_datasets_provider(train_val_test_num_samples):
|
246 |
+
"""Build train, valid, and test datasets."""
|
247 |
+
args = get_args()
|
248 |
+
|
249 |
+
print_rank_0('> building train, validation, and test datasets '
|
250 |
+
'for GPT ...')
|
251 |
+
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
|
252 |
+
data_prefix=args.data_path,
|
253 |
+
data_impl=args.data_impl,
|
254 |
+
splits_string=args.split,
|
255 |
+
train_valid_test_num_samples=train_val_test_num_samples,
|
256 |
+
seq_length=args.seq_length,
|
257 |
+
seed=args.seed,
|
258 |
+
skip_warmup=(not args.mmap_warmup))
|
259 |
+
print_rank_0("> finished creating GPT datasets ...")
|
260 |
+
|
261 |
+
return train_ds, valid_ds, test_ds
|
262 |
+
|
263 |
+
|
264 |
+
def command_exists(cmd):
|
265 |
+
result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
|
266 |
+
return result.wait() == 0
|
267 |
+
|
268 |
+
|
269 |
+
def git_ds_info():
|
270 |
+
from deepspeed.env_report import main as ds_report
|
271 |
+
# ds_report()
|
272 |
+
|
273 |
+
# Write out version/git info
|
274 |
+
git_hash_cmd = "git rev-parse --short HEAD"
|
275 |
+
git_branch_cmd = "git rev-parse --abbrev-ref HEAD"
|
276 |
+
if command_exists('git'):
|
277 |
+
try:
|
278 |
+
result = subprocess.check_output(git_hash_cmd, shell=True)
|
279 |
+
git_hash = result.decode('utf-8').strip()
|
280 |
+
result = subprocess.check_output(git_branch_cmd, shell=True)
|
281 |
+
git_branch = result.decode('utf-8').strip()
|
282 |
+
except subprocess.CalledProcessError:
|
283 |
+
git_hash = "unknown"
|
284 |
+
git_branch = "unknown"
|
285 |
+
else:
|
286 |
+
git_hash = "unknown"
|
287 |
+
git_branch = "unknown"
|
288 |
+
print(f'**** Git info for Megatron: git_hash={git_hash} git_branch={git_branch} ****')
|
289 |
+
|
290 |
+
|
291 |
+
if __name__ == "__main__":
|
292 |
+
git_ds_info()
|
293 |
+
pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
|
294 |
+
args_defaults={'tokenizer_type': 'GPT2BPETokenizer'})
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_ict.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""Pretrain BERT for Inverse Cloze Task"""
|
17 |
+
import math
|
18 |
+
|
19 |
+
import torch
|
20 |
+
import torch.distributed as dist
|
21 |
+
import torch.nn.functional as F
|
22 |
+
|
23 |
+
from megatron import get_args
|
24 |
+
from megatron import print_rank_0
|
25 |
+
from megatron import get_timers
|
26 |
+
from megatron import mpu
|
27 |
+
from megatron.data.biencoder_dataset_utils import get_ict_batch
|
28 |
+
from megatron.data.dataset_utils import build_train_valid_test_datasets
|
29 |
+
from megatron.model.biencoder_model import biencoder_model_provider
|
30 |
+
from megatron.training import pretrain
|
31 |
+
from megatron.utils import average_losses_across_data_parallel_group
|
32 |
+
|
33 |
+
|
34 |
+
def pretrain_ict_model_provider():
|
35 |
+
args = get_args()
|
36 |
+
model = biencoder_model_provider(
|
37 |
+
only_context_model=False,
|
38 |
+
only_query_model=False,
|
39 |
+
biencoder_shared_query_context_model=\
|
40 |
+
args.biencoder_shared_query_context_model)
|
41 |
+
return model
|
42 |
+
|
43 |
+
def get_group_world_size_rank():
|
44 |
+
|
45 |
+
group = mpu.get_data_parallel_group()
|
46 |
+
rank = torch.distributed.get_rank(group=group)
|
47 |
+
world_size = torch.distributed.get_world_size(group=group)
|
48 |
+
|
49 |
+
return group, rank, world_size
|
50 |
+
|
51 |
+
|
52 |
+
class AllgatherFromDataParallelRegion(torch.autograd.Function):
|
53 |
+
|
54 |
+
@staticmethod
|
55 |
+
def forward(ctx, input_):
|
56 |
+
assert input_.dim() == 2
|
57 |
+
group, rank, world_size = get_group_world_size_rank()
|
58 |
+
|
59 |
+
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
|
60 |
+
tensor_list[rank] = input_
|
61 |
+
torch.distributed.all_gather(tensor_list, input_, group=group)
|
62 |
+
|
63 |
+
output = torch.cat(tensor_list, dim=0).contiguous()
|
64 |
+
|
65 |
+
return output
|
66 |
+
|
67 |
+
|
68 |
+
@staticmethod
|
69 |
+
def backward(ctx, grad_output):
|
70 |
+
group, rank, world_size = get_group_world_size_rank()
|
71 |
+
|
72 |
+
assert grad_output.shape[0] % world_size == 0
|
73 |
+
dim_size = grad_output.shape[0] // world_size
|
74 |
+
output_list = torch.split(grad_output, dim_size, dim=0)
|
75 |
+
|
76 |
+
# get chunk from this rank
|
77 |
+
output = output_list[rank].contiguous()
|
78 |
+
return output
|
79 |
+
|
80 |
+
def forward_step(data_iterator, model, input_tensor):
|
81 |
+
"""Forward step."""
|
82 |
+
args = get_args()
|
83 |
+
timers = get_timers()
|
84 |
+
|
85 |
+
# Get the batch.
|
86 |
+
timers('batch-generator').start()
|
87 |
+
query_tokens, query_mask, \
|
88 |
+
context_tokens, context_mask, context_indices = get_ict_batch(data_iterator)
|
89 |
+
timers('batch-generator').stop()
|
90 |
+
|
91 |
+
# Query and Context Types
|
92 |
+
query_types = torch.cuda.LongTensor(*query_tokens.shape).fill_(0)
|
93 |
+
context_types = torch.cuda.LongTensor(*context_tokens.shape).fill_(0)
|
94 |
+
|
95 |
+
# Forward model.
|
96 |
+
query_logits, context_logits = model(query_tokens, query_mask,
|
97 |
+
query_types, context_tokens,
|
98 |
+
context_mask, context_types)
|
99 |
+
|
100 |
+
micro_batch_size = query_logits.shape[0]
|
101 |
+
# recall we assert that tensor_model_parallel_size == 1
|
102 |
+
assert mpu.get_tensor_model_parallel_world_size() == 1, \
|
103 |
+
"Model parallel size > 1 not supported for ICT"
|
104 |
+
|
105 |
+
global_batch_size = dist.get_world_size() * micro_batch_size
|
106 |
+
all_query_logits = AllgatherFromDataParallelRegion.apply(query_logits)
|
107 |
+
all_context_logits = AllgatherFromDataParallelRegion.apply(context_logits)
|
108 |
+
|
109 |
+
# scores are inner products between query and context embeddings
|
110 |
+
retrieval_scores = torch.matmul(all_query_logits,
|
111 |
+
torch.transpose(all_context_logits, 0, 1))
|
112 |
+
# scaling the retriever scores
|
113 |
+
if args.retriever_score_scaling:
|
114 |
+
retrieval_scores = retrieval_scores / math.sqrt(args.hidden_size)
|
115 |
+
|
116 |
+
softmax_scores = F.log_softmax(retrieval_scores, dim=1)
|
117 |
+
sorted_vals, sorted_indices = torch.topk(softmax_scores,
|
118 |
+
k=softmax_scores.shape[1], sorted=True)
|
119 |
+
|
120 |
+
def topk_accuracy(k):
|
121 |
+
return torch.cuda.FloatTensor([sum([int(i in sorted_indices[i, :k]) \
|
122 |
+
for i in range(global_batch_size)]) / global_batch_size])
|
123 |
+
|
124 |
+
topk_accs = [topk_accuracy(int(k)) for k in args.retriever_report_topk_accuracies]
|
125 |
+
|
126 |
+
labels = torch.arange(global_batch_size).long().cuda()
|
127 |
+
loss = F.nll_loss(softmax_scores, labels, reduction='mean')
|
128 |
+
reduced_losses = average_losses_across_data_parallel_group([loss, *topk_accs])
|
129 |
+
|
130 |
+
# Scale the retrieval loss
|
131 |
+
loss = loss * mpu.get_data_parallel_world_size()
|
132 |
+
|
133 |
+
# create stats_dict with retrieval loss and all specified top-k accuracies
|
134 |
+
topk_acc_dict = {'top{}_acc'.format(k): v * 100 for k, v in \
|
135 |
+
zip(args.retriever_report_topk_accuracies, reduced_losses[1:])}
|
136 |
+
stats_dict = dict(loss=reduced_losses[0], **topk_acc_dict)
|
137 |
+
return loss, stats_dict
|
138 |
+
|
139 |
+
|
140 |
+
def train_valid_test_datasets_provider(train_val_test_num_samples):
|
141 |
+
"""Build train, valid and test datasets."""
|
142 |
+
args = get_args()
|
143 |
+
print_rank_0('> building train, validation, and test datasets '
|
144 |
+
'for BERT ICT...')
|
145 |
+
|
146 |
+
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
|
147 |
+
data_prefix=args.data_path,
|
148 |
+
data_impl=args.data_impl,
|
149 |
+
splits_string=args.split,
|
150 |
+
train_valid_test_num_samples=train_val_test_num_samples,
|
151 |
+
max_seq_length=args.seq_length,
|
152 |
+
masked_lm_prob=args.mask_prob,
|
153 |
+
short_seq_prob=args.short_seq_prob,
|
154 |
+
seed=args.seed,
|
155 |
+
skip_warmup=(not args.mmap_warmup),
|
156 |
+
binary_head=False,
|
157 |
+
dataset_type='ict')
|
158 |
+
print_rank_0("> finished creating BERT ICT datasets ...")
|
159 |
+
|
160 |
+
return train_ds, valid_ds, test_ds
|
161 |
+
|
162 |
+
|
163 |
+
if __name__ == "__main__":
|
164 |
+
pretrain(train_valid_test_datasets_provider,
|
165 |
+
pretrain_ict_model_provider,
|
166 |
+
forward_step,
|
167 |
+
args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_t5.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""Pretrain T5"""
|
17 |
+
|
18 |
+
from functools import partial
|
19 |
+
|
20 |
+
import torch
|
21 |
+
|
22 |
+
from megatron import (
|
23 |
+
get_args,
|
24 |
+
get_timers,
|
25 |
+
mpu,
|
26 |
+
print_rank_0
|
27 |
+
)
|
28 |
+
from megatron.data.dataset_utils import build_train_valid_test_datasets
|
29 |
+
from megatron.model import T5Model
|
30 |
+
from megatron.training import pretrain
|
31 |
+
from megatron.utils import average_losses_across_data_parallel_group
|
32 |
+
|
33 |
+
|
34 |
+
def model_provider(pre_process=True, post_process=True):
|
35 |
+
"""Build the model."""
|
36 |
+
assert pre_process and post_process, "T5 doesn't yet support pipelining"
|
37 |
+
|
38 |
+
print_rank_0('building T5 model ...')
|
39 |
+
model = T5Model(num_tokentypes=0,
|
40 |
+
parallel_output=True)
|
41 |
+
return model
|
42 |
+
|
43 |
+
|
44 |
+
def get_batch(data_iterator):
|
45 |
+
"""Build the batch."""
|
46 |
+
|
47 |
+
keys = ['text_enc', 'text_dec', 'labels', 'loss_mask',
|
48 |
+
'enc_mask', 'dec_mask', 'enc_dec_mask']
|
49 |
+
datatype = torch.int64
|
50 |
+
|
51 |
+
# Broadcast data.
|
52 |
+
if data_iterator is not None:
|
53 |
+
data = next(data_iterator)
|
54 |
+
else:
|
55 |
+
data = None
|
56 |
+
data_b = mpu.broadcast_data(keys, data, datatype)
|
57 |
+
|
58 |
+
# Unpack.
|
59 |
+
tokens_enc = data_b['text_enc'].long()
|
60 |
+
tokens_dec = data_b['text_dec'].long()
|
61 |
+
labels = data_b['labels'].long()
|
62 |
+
loss_mask = data_b['loss_mask'].float()
|
63 |
+
|
64 |
+
enc_mask = (data_b['enc_mask'] < 0.5)
|
65 |
+
dec_mask = (data_b['dec_mask'] < 0.5)
|
66 |
+
enc_dec_mask = (data_b['enc_dec_mask'] < 0.5)
|
67 |
+
|
68 |
+
return tokens_enc, tokens_dec, loss_mask, labels, \
|
69 |
+
enc_mask, dec_mask, enc_dec_mask
|
70 |
+
|
71 |
+
|
72 |
+
def loss_func(loss_mask, output_tensor):
|
73 |
+
lm_loss_, _ = output_tensor
|
74 |
+
|
75 |
+
lm_loss_ = lm_loss_.float()
|
76 |
+
lm_loss = torch.sum(
|
77 |
+
lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
|
78 |
+
|
79 |
+
loss = lm_loss
|
80 |
+
averaged_losses = average_losses_across_data_parallel_group([lm_loss])
|
81 |
+
|
82 |
+
return loss, {'lm loss': averaged_losses[0]}
|
83 |
+
|
84 |
+
|
85 |
+
def forward_step(data_iterator, model):
|
86 |
+
"""Forward step."""
|
87 |
+
args = get_args()
|
88 |
+
timers = get_timers()
|
89 |
+
|
90 |
+
# Get the batch.
|
91 |
+
timers('batch generator').start()
|
92 |
+
tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask \
|
93 |
+
= get_batch(data_iterator)
|
94 |
+
timers('batch generator').stop()
|
95 |
+
|
96 |
+
# Forward model lm_labels
|
97 |
+
output_tensor = model(tokens_enc,
|
98 |
+
tokens_dec,
|
99 |
+
enc_mask,
|
100 |
+
dec_mask,
|
101 |
+
enc_dec_mask,
|
102 |
+
tokentype_ids=None,
|
103 |
+
lm_labels=lm_labels)
|
104 |
+
|
105 |
+
return output_tensor, partial(loss_func, loss_mask)
|
106 |
+
|
107 |
+
|
108 |
+
def train_valid_test_datasets_provider(train_val_test_num_samples):
|
109 |
+
"""Build train, valid, and test datasets."""
|
110 |
+
args = get_args()
|
111 |
+
|
112 |
+
print_rank_0('> building train, validation, and test datasets '
|
113 |
+
'for T5 ...')
|
114 |
+
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
|
115 |
+
data_prefix=args.data_path,
|
116 |
+
data_impl=args.data_impl,
|
117 |
+
splits_string=args.split,
|
118 |
+
train_valid_test_num_samples=train_val_test_num_samples,
|
119 |
+
max_seq_length=args.encoder_seq_length,
|
120 |
+
max_seq_length_dec=args.decoder_seq_length,
|
121 |
+
masked_lm_prob=args.mask_prob,
|
122 |
+
short_seq_prob=args.short_seq_prob,
|
123 |
+
seed=args.seed,
|
124 |
+
skip_warmup=(not args.mmap_warmup),
|
125 |
+
dataset_type='t5')
|
126 |
+
print_rank_0("> finished creating T5 datasets ...")
|
127 |
+
|
128 |
+
return train_ds, valid_ds, test_ds
|
129 |
+
|
130 |
+
|
131 |
+
if __name__ == "__main__":
|
132 |
+
|
133 |
+
pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
|
134 |
+
args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/pretrain_vit.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""Pretrain VIT"""
|
17 |
+
|
18 |
+
import torch
|
19 |
+
import torch.nn.functional as F
|
20 |
+
from megatron import get_args, get_timers, mpu, print_rank_0
|
21 |
+
from megatron.data.vit_dataset import build_train_valid_datasets
|
22 |
+
from megatron.model.vit_model import VitModel
|
23 |
+
from megatron.training import pretrain
|
24 |
+
from megatron.utils import average_losses_across_data_parallel_group
|
25 |
+
|
26 |
+
def model_provider():
|
27 |
+
"""Build the model."""
|
28 |
+
|
29 |
+
print_rank_0("building VIT model ...")
|
30 |
+
args = get_args()
|
31 |
+
|
32 |
+
model = VitModel(num_classes=args.num_classes)
|
33 |
+
return model
|
34 |
+
|
35 |
+
def get_batch(data_iterator):
|
36 |
+
"""Build the batch."""
|
37 |
+
data = next(data_iterator)
|
38 |
+
|
39 |
+
# only data parallelism; no need for broadcast
|
40 |
+
images = data[0].cuda()
|
41 |
+
labels = data[1].cuda()
|
42 |
+
|
43 |
+
return images, labels
|
44 |
+
|
45 |
+
def forward_step(data_iterator, model, input_tensor):
|
46 |
+
"""Forward step."""
|
47 |
+
timers = get_timers()
|
48 |
+
assert input_tensor is None
|
49 |
+
|
50 |
+
# Get the batch.
|
51 |
+
timers("batch-generator").start()
|
52 |
+
(
|
53 |
+
images,
|
54 |
+
labels,
|
55 |
+
) = get_batch(data_iterator)
|
56 |
+
timers("batch-generator").stop()
|
57 |
+
|
58 |
+
# Forward model. lm_labels
|
59 |
+
logits = model(images).contiguous().float()
|
60 |
+
loss = F.cross_entropy(logits, labels)
|
61 |
+
|
62 |
+
outputs = torch.argmax(logits, -1)
|
63 |
+
correct = (outputs == labels).float()
|
64 |
+
accuracy = torch.mean(correct)
|
65 |
+
|
66 |
+
averaged_loss = average_losses_across_data_parallel_group([loss, accuracy])
|
67 |
+
|
68 |
+
return loss, {"loss": averaged_loss[0], "accuracy": averaged_loss[1]}
|
69 |
+
|
70 |
+
|
71 |
+
def train_valid_test_datasets_provider(train_val_test_num_samples):
|
72 |
+
"""Build train, valid, and test datasets."""
|
73 |
+
args = get_args()
|
74 |
+
|
75 |
+
print_rank_0(
|
76 |
+
"> building train, validation, and test datasets " "for VIT ..."
|
77 |
+
)
|
78 |
+
train_ds, valid_ds = build_train_valid_datasets(data_path=args.data_path)
|
79 |
+
print_rank_0("> finished creating VIT datasets ...")
|
80 |
+
|
81 |
+
return train_ds, valid_ds, None
|
82 |
+
|
83 |
+
|
84 |
+
if __name__ == "__main__":
|
85 |
+
|
86 |
+
pretrain(
|
87 |
+
train_valid_test_datasets_provider,
|
88 |
+
model_provider,
|
89 |
+
forward_step,
|
90 |
+
args_defaults={'dataloader_type': 'cyclic'}
|
91 |
+
)
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pybind11
|
2 |
+
torch
|
3 |
+
six
|
4 |
+
regex
|
5 |
+
numpy
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/DeepSpeedExamples/Megatron-DeepSpeed/setup.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""Setup for pip package."""
|
17 |
+
|
18 |
+
import os
|
19 |
+
import sys
|
20 |
+
import setuptools
|
21 |
+
|
22 |
+
if sys.version_info < (3,):
|
23 |
+
raise Exception("Python 2 is not supported by Megatron.")
|
24 |
+
|
25 |
+
from megatron.package_info import (
|
26 |
+
__description__,
|
27 |
+
__contact_names__,
|
28 |
+
__url__,
|
29 |
+
__download_url__,
|
30 |
+
__keywords__,
|
31 |
+
__license__,
|
32 |
+
__package_name__,
|
33 |
+
__version__,
|
34 |
+
)
|
35 |
+
|
36 |
+
with open("README.md", "r") as fh:
|
37 |
+
long_description = fh.read()
|
38 |
+
|
39 |
+
###############################################################################
|
40 |
+
# Dependency Loading #
|
41 |
+
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
|
42 |
+
|
43 |
+
|
44 |
+
def req_file(filename):
|
45 |
+
with open(filename) as f:
|
46 |
+
content = f.readlines()
|
47 |
+
return [x.strip() for x in content]
|
48 |
+
|
49 |
+
|
50 |
+
install_requires = req_file("requirements.txt")
|
51 |
+
|
52 |
+
setuptools.setup(
|
53 |
+
name=__package_name__,
|
54 |
+
# Versions should comply with PEP440. For a discussion on single-sourcing
|
55 |
+
# the version across setup.py and the project code, see
|
56 |
+
# https://packaging.python.org/en/latest/single_source_version.html
|
57 |
+
version=__version__,
|
58 |
+
description=__description__,
|
59 |
+
long_description=long_description,
|
60 |
+
long_description_content_type="text/markdown",
|
61 |
+
# The project's main homepage.
|
62 |
+
url=__url__,
|
63 |
+
author=__contact_names__,
|
64 |
+
maintainer=__contact_names__,
|
65 |
+
# The licence under which the project is released
|
66 |
+
license=__license__,
|
67 |
+
classifiers=[
|
68 |
+
'Intended Audience :: Developers',
|
69 |
+
'Intended Audience :: Science/Research',
|
70 |
+
'Intended Audience :: Information Technology',
|
71 |
+
# Indicate what your project relates to
|
72 |
+
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
73 |
+
'Topic :: Software Development :: Libraries :: Python Modules',
|
74 |
+
# Supported python versions
|
75 |
+
'Programming Language :: Python :: 3.6',
|
76 |
+
'Programming Language :: Python :: 3.7',
|
77 |
+
'Programming Language :: Python :: 3.8',
|
78 |
+
# Additional Setting
|
79 |
+
'Environment :: Console',
|
80 |
+
'Natural Language :: English',
|
81 |
+
'Operating System :: OS Independent',
|
82 |
+
],
|
83 |
+
python_requires='>=3.6',
|
84 |
+
packages=setuptools.find_packages(),
|
85 |
+
install_requires=install_requires,
|
86 |
+
# Add in any packaged data.
|
87 |
+
include_package_data=True,
|
88 |
+
zip_safe=False,
|
89 |
+
# PyPI package information.
|
90 |
+
keywords=__keywords__
|
91 |
+
)
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/LICENSE
ADDED
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (C) 2023 Habana Labs Ltd. an Intel Company
|
2 |
+
|
3 |
+
Apache License
|
4 |
+
Version 2.0, January 2004
|
5 |
+
http://www.apache.org/licenses/
|
6 |
+
|
7 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
8 |
+
|
9 |
+
1. Definitions.
|
10 |
+
|
11 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
12 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
13 |
+
|
14 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
15 |
+
the copyright owner that is granting the License.
|
16 |
+
|
17 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
18 |
+
other entities that control, are controlled by, or are under common
|
19 |
+
control with that entity. For the purposes of this definition,
|
20 |
+
"control" means (i) the power, direct or indirect, to cause the
|
21 |
+
direction or management of such entity, whether by contract or
|
22 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
23 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
24 |
+
|
25 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
26 |
+
exercising permissions granted by this License.
|
27 |
+
|
28 |
+
"Source" form shall mean the preferred form for making modifications,
|
29 |
+
including but not limited to software source code, documentation
|
30 |
+
source, and configuration files.
|
31 |
+
|
32 |
+
"Object" form shall mean any form resulting from mechanical
|
33 |
+
transformation or translation of a Source form, including but
|
34 |
+
not limited to compiled object code, generated documentation,
|
35 |
+
and conversions to other media types.
|
36 |
+
|
37 |
+
"Work" shall mean the work of authorship, whether in Source or
|
38 |
+
Object form, made available under the License, as indicated by a
|
39 |
+
copyright notice that is included in or attached to the work
|
40 |
+
(an example is provided in the Appendix below).
|
41 |
+
|
42 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
43 |
+
form, that is based on (or derived from) the Work and for which the
|
44 |
+
editorial revisions, annotations, elaborations, or other modifications
|
45 |
+
represent, as a whole, an original work of authorship. For the purposes
|
46 |
+
of this License, Derivative Works shall not include works that remain
|
47 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
48 |
+
the Work and Derivative Works thereof.
|
49 |
+
|
50 |
+
"Contribution" shall mean any work of authorship, including
|
51 |
+
the original version of the Work and any modifications or additions
|
52 |
+
to that Work or Derivative Works thereof, that is intentionally
|
53 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
54 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
55 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
56 |
+
means any form of electronic, verbal, or written communication sent
|
57 |
+
to the Licensor or its representatives, including but not limited to
|
58 |
+
communication on electronic mailing lists, source code control systems,
|
59 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
60 |
+
Licensor for the purpose of discussing and improving the Work, but
|
61 |
+
excluding communication that is conspicuously marked or otherwise
|
62 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
63 |
+
|
64 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
65 |
+
on behalf of whom a Contribution has been received by Licensor and
|
66 |
+
subsequently incorporated within the Work.
|
67 |
+
|
68 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
69 |
+
this License, each Contributor hereby grants to You a perpetual,
|
70 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
71 |
+
copyright license to reproduce, prepare Derivative Works of,
|
72 |
+
publicly display, publicly perform, sublicense, and distribute the
|
73 |
+
Work and such Derivative Works in Source or Object form.
|
74 |
+
|
75 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
76 |
+
this License, each Contributor hereby grants to You a perpetual,
|
77 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
78 |
+
(except as stated in this section) patent license to make, have made,
|
79 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
80 |
+
where such license applies only to those patent claims licensable
|
81 |
+
by such Contributor that are necessarily infringed by their
|
82 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
83 |
+
with the Work to which such Contribution(s) was submitted. If You
|
84 |
+
institute patent litigation against any entity (including a
|
85 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
86 |
+
or a Contribution incorporated within the Work constitutes direct
|
87 |
+
or contributory patent infringement, then any patent licenses
|
88 |
+
granted to You under this License for that Work shall terminate
|
89 |
+
as of the date such litigation is filed.
|
90 |
+
|
91 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
92 |
+
Work or Derivative Works thereof in any medium, with or without
|
93 |
+
modifications, and in Source or Object form, provided that You
|
94 |
+
meet the following conditions:
|
95 |
+
|
96 |
+
(a) You must give any other recipients of the Work or
|
97 |
+
Derivative Works a copy of this License; and
|
98 |
+
|
99 |
+
(b) You must cause any modified files to carry prominent notices
|
100 |
+
stating that You changed the files; and
|
101 |
+
|
102 |
+
(c) You must retain, in the Source form of any Derivative Works
|
103 |
+
that You distribute, all copyright, patent, trademark, and
|
104 |
+
attribution notices from the Source form of the Work,
|
105 |
+
excluding those notices that do not pertain to any part of
|
106 |
+
the Derivative Works; and
|
107 |
+
|
108 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
109 |
+
distribution, then any Derivative Works that You distribute must
|
110 |
+
include a readable copy of the attribution notices contained
|
111 |
+
within such NOTICE file, excluding those notices that do not
|
112 |
+
pertain to any part of the Derivative Works, in at least one
|
113 |
+
of the following places: within a NOTICE text file distributed
|
114 |
+
as part of the Derivative Works; within the Source form or
|
115 |
+
documentation, if provided along with the Derivative Works; or,
|
116 |
+
within a display generated by the Derivative Works, if and
|
117 |
+
wherever such third-party notices normally appear. The contents
|
118 |
+
of the NOTICE file are for informational purposes only and
|
119 |
+
do not modify the License. You may add Your own attribution
|
120 |
+
notices within Derivative Works that You distribute, alongside
|
121 |
+
or as an addendum to the NOTICE text from the Work, provided
|
122 |
+
that such additional attribution notices cannot be construed
|
123 |
+
as modifying the License.
|
124 |
+
|
125 |
+
You may add Your own copyright statement to Your modifications and
|
126 |
+
may provide additional or different license terms and conditions
|
127 |
+
for use, reproduction, or distribution of Your modifications, or
|
128 |
+
for any such Derivative Works as a whole, provided Your use,
|
129 |
+
reproduction, and distribution of the Work otherwise complies with
|
130 |
+
the conditions stated in this License.
|
131 |
+
|
132 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
133 |
+
any Contribution intentionally submitted for inclusion in the Work
|
134 |
+
by You to the Licensor shall be under the terms and conditions of
|
135 |
+
this License, without any additional terms or conditions.
|
136 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
137 |
+
the terms of any separate license agreement you may have executed
|
138 |
+
with Licensor regarding such Contributions.
|
139 |
+
|
140 |
+
6. Trademarks. This License does not grant permission to use the trade
|
141 |
+
names, trademarks, service marks, or product names of the Licensor,
|
142 |
+
except as required for reasonable and customary use in describing the
|
143 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
144 |
+
|
145 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
146 |
+
agreed to in writing, Licensor provides the Work (and each
|
147 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
148 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
149 |
+
implied, including, without limitation, any warranties or conditions
|
150 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
151 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
152 |
+
appropriateness of using or redistributing the Work and assume any
|
153 |
+
risks associated with Your exercise of permissions under this License.
|
154 |
+
|
155 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
156 |
+
whether in tort (including negligence), contract, or otherwise,
|
157 |
+
unless required by applicable law (such as deliberate and grossly
|
158 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
159 |
+
liable to You for damages, including any direct, indirect, special,
|
160 |
+
incidental, or consequential damages of any character arising as a
|
161 |
+
result of this License or out of the use or inability to use the
|
162 |
+
Work (including but not limited to damages for loss of goodwill,
|
163 |
+
work stoppage, computer failure or malfunction, or any and all
|
164 |
+
other commercial damages or losses), even if such Contributor
|
165 |
+
has been advised of the possibility of such damages.
|
166 |
+
|
167 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
168 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
169 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
170 |
+
or other liability obligations and/or rights consistent with this
|
171 |
+
License. However, in accepting such obligations, You may act only
|
172 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
173 |
+
of any other Contributor, and only if You agree to indemnify,
|
174 |
+
defend, and hold each Contributor harmless for any liability
|
175 |
+
incurred by, or claims asserted against, such Contributor by reason
|
176 |
+
of your accepting any such warranty or additional liability.
|
177 |
+
|
178 |
+
END OF TERMS AND CONDITIONS
|
179 |
+
|
180 |
+
Copyright 2019 NVIDIA CORPORATION. All rights reserved.
|
181 |
+
APPENDIX: How to apply the Apache License to your work.
|
182 |
+
|
183 |
+
To apply the Apache License to your work, attach the following
|
184 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
185 |
+
replaced with your own identifying information. (Don't include
|
186 |
+
the brackets!) The text should be enclosed in the appropriate
|
187 |
+
comment syntax for the file format. We also recommend that a
|
188 |
+
file or class name and description of purpose be included on the
|
189 |
+
same "printed page" as the copyright notice for easier
|
190 |
+
identification within third-party archives.
|
191 |
+
|
192 |
+
Copyright [yyyy] [name of copyright owner]
|
193 |
+
|
194 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
195 |
+
you may not use this file except in compliance with the License.
|
196 |
+
You may obtain a copy of the License at
|
197 |
+
|
198 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
199 |
+
|
200 |
+
Unless required by applicable law or agreed to in writing, software
|
201 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
202 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
203 |
+
See the License for the specific language governing permissions and
|
204 |
+
limitations under the License.
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/bert_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"attention_probs_dropout_prob": 0.1,
|
3 |
+
"hidden_act": "gelu",
|
4 |
+
"hidden_dropout_prob": 0.1,
|
5 |
+
"hidden_size": 1024,
|
6 |
+
"initializer_range": 0.02,
|
7 |
+
"intermediate_size": 4096,
|
8 |
+
"max_position_embeddings": 512,
|
9 |
+
"num_attention_heads": 16,
|
10 |
+
"num_hidden_layers": 24,
|
11 |
+
"type_vocab_size": 2,
|
12 |
+
"vocab_size": 30522
|
13 |
+
}
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/checkpoints/.keep
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/BooksDownloader.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
import subprocess
|
15 |
+
|
16 |
+
class BooksDownloader:
|
17 |
+
def __init__(self, save_path):
|
18 |
+
self.save_path = save_path
|
19 |
+
pass
|
20 |
+
|
21 |
+
|
22 |
+
def download(self):
|
23 |
+
bookscorpus_download_command = 'python3 /workspace/bookcorpus/download_files.py --list /workspace/bookcorpus/url_list.jsonl --out'
|
24 |
+
bookscorpus_download_command += ' ' + self.save_path + '/bookscorpus'
|
25 |
+
bookscorpus_download_command += ' --trash-bad-count'
|
26 |
+
bookscorpus_download_process = subprocess.run(bookscorpus_download_command, shell=True, check=True)
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/BookscorpusTextFormatting.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
import glob
|
15 |
+
import os
|
16 |
+
|
17 |
+
class BookscorpusTextFormatting:
|
18 |
+
def __init__(self, books_path, output_filename, recursive = False):
|
19 |
+
self.books_path = books_path
|
20 |
+
self.recursive = recursive
|
21 |
+
self.output_filename = output_filename
|
22 |
+
|
23 |
+
|
24 |
+
# This puts one book per line
|
25 |
+
def merge(self):
|
26 |
+
with open(self.output_filename, mode='w', newline='\n') as ofile:
|
27 |
+
for filename in glob.glob(self.books_path + '/' + '*.txt', recursive=True):
|
28 |
+
with open(filename, mode='r', encoding='utf-8-sig', newline='\n') as file:
|
29 |
+
for line in file:
|
30 |
+
if line.strip() != '':
|
31 |
+
ofile.write(line.strip() + ' ')
|
32 |
+
ofile.write("\n\n")
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/Downloader.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
from GooglePretrainedWeightDownloader import GooglePretrainedWeightDownloader
|
15 |
+
from NVIDIAPretrainedWeightDownloader import NVIDIAPretrainedWeightDownloader
|
16 |
+
from WikiDownloader import WikiDownloader
|
17 |
+
from BooksDownloader import BooksDownloader
|
18 |
+
from GLUEDownloader import GLUEDownloader
|
19 |
+
from SquadDownloader import SquadDownloader
|
20 |
+
|
21 |
+
|
22 |
+
class Downloader:
|
23 |
+
|
24 |
+
def __init__(self, dataset_name, save_path):
|
25 |
+
self.dataset_name = dataset_name
|
26 |
+
self.save_path = save_path
|
27 |
+
|
28 |
+
def download(self):
|
29 |
+
if self.dataset_name == 'bookscorpus':
|
30 |
+
self.download_bookscorpus()
|
31 |
+
|
32 |
+
elif self.dataset_name == 'wikicorpus_en':
|
33 |
+
self.download_wikicorpus('en')
|
34 |
+
|
35 |
+
elif self.dataset_name == 'wikicorpus_zh':
|
36 |
+
self.download_wikicorpus('zh')
|
37 |
+
|
38 |
+
elif self.dataset_name == 'google_pretrained_weights':
|
39 |
+
self.download_google_pretrained_weights()
|
40 |
+
|
41 |
+
elif self.dataset_name == 'nvidia_pretrained_weights':
|
42 |
+
self.download_nvidia_pretrained_weights()
|
43 |
+
|
44 |
+
elif self.dataset_name in {'mrpc', 'sst-2'}:
|
45 |
+
self.download_glue(self.dataset_name)
|
46 |
+
|
47 |
+
elif self.dataset_name == 'squad':
|
48 |
+
self.download_squad()
|
49 |
+
|
50 |
+
elif self.dataset_name == 'all':
|
51 |
+
self.download_bookscorpus()
|
52 |
+
self.download_wikicorpus('en')
|
53 |
+
self.download_wikicorpus('zh')
|
54 |
+
self.download_google_pretrained_weights()
|
55 |
+
self.download_nvidia_pretrained_weights()
|
56 |
+
self.download_glue('mrpc')
|
57 |
+
self.download_glue('sst-2')
|
58 |
+
self.download_squad()
|
59 |
+
|
60 |
+
else:
|
61 |
+
print(self.dataset_name)
|
62 |
+
assert False, 'Unknown dataset_name provided to downloader'
|
63 |
+
|
64 |
+
def download_bookscorpus(self):
|
65 |
+
downloader = BooksDownloader(self.save_path)
|
66 |
+
downloader.download()
|
67 |
+
|
68 |
+
def download_wikicorpus(self, language):
|
69 |
+
downloader = WikiDownloader(language, self.save_path)
|
70 |
+
downloader.download()
|
71 |
+
|
72 |
+
def download_google_pretrained_weights(self):
|
73 |
+
downloader = GooglePretrainedWeightDownloader(self.save_path)
|
74 |
+
downloader.download()
|
75 |
+
|
76 |
+
def download_nvidia_pretrained_weights(self):
|
77 |
+
downloader = NVIDIAPretrainedWeightDownloader(self.save_path)
|
78 |
+
downloader.download()
|
79 |
+
|
80 |
+
def download_glue(self, task_name):
|
81 |
+
downloader = GLUEDownloader(self.save_path)
|
82 |
+
downloader.download(task_name)
|
83 |
+
|
84 |
+
def download_squad(self):
|
85 |
+
downloader = SquadDownloader(self.save_path)
|
86 |
+
downloader.download()
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/GLUEDownloader.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
import sys
|
15 |
+
import wget
|
16 |
+
|
17 |
+
from pathlib import Path
|
18 |
+
|
19 |
+
|
20 |
+
def mkdir(path):
|
21 |
+
Path(path).mkdir(parents=True, exist_ok=True)
|
22 |
+
|
23 |
+
|
24 |
+
class GLUEDownloader:
|
25 |
+
|
26 |
+
def __init__(self, save_path):
|
27 |
+
self.save_path = save_path + '/glue'
|
28 |
+
|
29 |
+
def download(self, task_name):
|
30 |
+
mkdir(self.save_path)
|
31 |
+
if task_name in {'mrpc', 'mnli'}:
|
32 |
+
task_name = task_name.upper()
|
33 |
+
elif task_name == 'cola':
|
34 |
+
task_name = 'CoLA'
|
35 |
+
else: # SST-2
|
36 |
+
assert task_name == 'sst-2'
|
37 |
+
task_name = 'SST'
|
38 |
+
wget.download(
|
39 |
+
'https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/1502038877f6a88c225a34450793fbc3ea87eaba/download_glue_data.py',
|
40 |
+
out=self.save_path,
|
41 |
+
)
|
42 |
+
sys.path.append(self.save_path)
|
43 |
+
import download_glue_data
|
44 |
+
download_glue_data.main(
|
45 |
+
['--data_dir', self.save_path, '--tasks', task_name])
|
46 |
+
sys.path.pop()
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/GooglePretrainedWeightDownloader.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
import hashlib
|
15 |
+
import os
|
16 |
+
import urllib.request
|
17 |
+
import zipfile
|
18 |
+
|
19 |
+
class GooglePretrainedWeightDownloader:
|
20 |
+
def __init__(self, save_path):
|
21 |
+
self.save_path = save_path + '/google_pretrained_weights'
|
22 |
+
|
23 |
+
if not os.path.exists(self.save_path):
|
24 |
+
os.makedirs(self.save_path)
|
25 |
+
|
26 |
+
# Download urls
|
27 |
+
self.model_urls = {
|
28 |
+
'bert_base_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-12_H-768_A-12.zip'),
|
29 |
+
'bert_large_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'uncased_L-24_H-1024_A-16.zip'),
|
30 |
+
'bert_base_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'cased_L-12_H-768_A-12.zip'),
|
31 |
+
'bert_large_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip', 'cased_L-24_H-1024_A-16.zip'),
|
32 |
+
'bert_base_multilingual_cased': ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'),
|
33 |
+
'bert_large_multilingual_uncased': ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'),
|
34 |
+
'bert_base_chinese': ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip')
|
35 |
+
}
|
36 |
+
|
37 |
+
# SHA256sum verification for file download integrity (and checking for changes from the download source over time)
|
38 |
+
self.bert_base_uncased_sha = {
|
39 |
+
'bert_config.json': '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc',
|
40 |
+
'bert_model.ckpt.data-00000-of-00001': '58580dc5e0bf0ae0d2efd51d0e8272b2f808857f0a43a88aaf7549da6d7a8a84',
|
41 |
+
'bert_model.ckpt.index': '04c1323086e2f1c5b7c0759d8d3e484afbb0ab45f51793daab9f647113a0117b',
|
42 |
+
'bert_model.ckpt.meta': 'dd5682170a10c3ea0280c2e9b9a45fee894eb62da649bbdea37b38b0ded5f60e',
|
43 |
+
'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3',
|
44 |
+
}
|
45 |
+
|
46 |
+
self.bert_large_uncased_sha = {
|
47 |
+
'bert_config.json': 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb',
|
48 |
+
'bert_model.ckpt.data-00000-of-00001': 'bc6b3363e3be458c99ecf64b7f472d2b7c67534fd8f564c0556a678f90f4eea1',
|
49 |
+
'bert_model.ckpt.index': '68b52f2205ffc64dc627d1120cf399c1ef1cbc35ea5021d1afc889ffe2ce2093',
|
50 |
+
'bert_model.ckpt.meta': '6fcce8ff7628f229a885a593625e3d5ff9687542d5ef128d9beb1b0c05edc4a1',
|
51 |
+
'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3',
|
52 |
+
}
|
53 |
+
|
54 |
+
self.bert_base_cased_sha = {
|
55 |
+
'bert_config.json': 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc',
|
56 |
+
'bert_model.ckpt.data-00000-of-00001': '734d5a1b68bf98d4e9cb6b6692725d00842a1937af73902e51776905d8f760ea',
|
57 |
+
'bert_model.ckpt.index': '517d6ef5c41fc2ca1f595276d6fccf5521810d57f5a74e32616151557790f7b1',
|
58 |
+
'bert_model.ckpt.meta': '5f8a9771ff25dadd61582abb4e3a748215a10a6b55947cbb66d0f0ba1694be98',
|
59 |
+
'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02',
|
60 |
+
}
|
61 |
+
|
62 |
+
self.bert_large_cased_sha = {
|
63 |
+
'bert_config.json': '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57',
|
64 |
+
'bert_model.ckpt.data-00000-of-00001': '6ff33640f40d472f7a16af0c17b1179ca9dcc0373155fb05335b6a4dd1657ef0',
|
65 |
+
'bert_model.ckpt.index': 'ef42a53f577fbe07381f4161b13c7cab4f4fc3b167cec6a9ae382c53d18049cf',
|
66 |
+
'bert_model.ckpt.meta': 'd2ddff3ed33b80091eac95171e94149736ea74eb645e575d942ec4a5e01a40a1',
|
67 |
+
'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02',
|
68 |
+
}
|
69 |
+
|
70 |
+
self.bert_base_multilingual_cased_sha = {
|
71 |
+
'bert_config.json': 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0',
|
72 |
+
'bert_model.ckpt.data-00000-of-00001': '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5',
|
73 |
+
'bert_model.ckpt.index': '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37',
|
74 |
+
'bert_model.ckpt.meta': '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa',
|
75 |
+
'vocab.txt': 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c',
|
76 |
+
}
|
77 |
+
|
78 |
+
self.bert_large_multilingual_uncased_sha = {
|
79 |
+
'bert_config.json': '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624',
|
80 |
+
'bert_model.ckpt.data-00000-of-00001': '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429',
|
81 |
+
'bert_model.ckpt.index': '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7',
|
82 |
+
'bert_model.ckpt.meta': '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29',
|
83 |
+
'vocab.txt': '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f',
|
84 |
+
}
|
85 |
+
|
86 |
+
self.bert_base_chinese_sha = {
|
87 |
+
'bert_config.json': '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015',
|
88 |
+
'bert_model.ckpt.data-00000-of-00001': '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba',
|
89 |
+
'bert_model.ckpt.index': '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e',
|
90 |
+
'bert_model.ckpt.meta': 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047',
|
91 |
+
'vocab.txt': '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c',
|
92 |
+
}
|
93 |
+
|
94 |
+
# Relate SHA to urls for loop below
|
95 |
+
self.model_sha = {
|
96 |
+
'bert_base_uncased': self.bert_base_uncased_sha,
|
97 |
+
'bert_large_uncased': self.bert_large_uncased_sha,
|
98 |
+
'bert_base_cased': self.bert_base_cased_sha,
|
99 |
+
'bert_large_cased': self.bert_large_cased_sha,
|
100 |
+
'bert_base_multilingual_cased': self.bert_base_multilingual_cased_sha,
|
101 |
+
'bert_large_multilingual_uncased': self.bert_large_multilingual_uncased_sha,
|
102 |
+
'bert_base_chinese': self.bert_base_chinese_sha
|
103 |
+
}
|
104 |
+
|
105 |
+
# Helper to get sha256sum of a file
|
106 |
+
def sha256sum(self, filename):
|
107 |
+
h = hashlib.sha256()
|
108 |
+
b = bytearray(128*1024)
|
109 |
+
mv = memoryview(b)
|
110 |
+
with open(filename, 'rb', buffering=0) as f:
|
111 |
+
for n in iter(lambda : f.readinto(mv), 0):
|
112 |
+
h.update(mv[:n])
|
113 |
+
|
114 |
+
return h.hexdigest()
|
115 |
+
|
116 |
+
def download(self):
|
117 |
+
# Iterate over urls: download, unzip, verify sha256sum
|
118 |
+
found_mismatch_sha = False
|
119 |
+
for model in self.model_urls:
|
120 |
+
url = self.model_urls[model][0]
|
121 |
+
file = self.save_path + '/' + self.model_urls[model][1]
|
122 |
+
|
123 |
+
print('Downloading', url)
|
124 |
+
response = urllib.request.urlopen(url)
|
125 |
+
with open(file, 'wb') as handle:
|
126 |
+
handle.write(response.read())
|
127 |
+
|
128 |
+
print('Unzipping', file)
|
129 |
+
zip = zipfile.ZipFile(file, 'r')
|
130 |
+
zip.extractall(self.save_path)
|
131 |
+
zip.close()
|
132 |
+
|
133 |
+
sha_dict = self.model_sha[model]
|
134 |
+
for extracted_file in sha_dict:
|
135 |
+
sha = sha_dict[extracted_file]
|
136 |
+
if sha != self.sha256sum(file[:-4] + '/' + extracted_file):
|
137 |
+
found_mismatch_sha = True
|
138 |
+
print('SHA256sum does not match on file:', extracted_file, 'from download url:', url)
|
139 |
+
else:
|
140 |
+
print(file[:-4] + '/' + extracted_file, '\t', 'verified')
|
141 |
+
|
142 |
+
if not found_mismatch_sha:
|
143 |
+
print("All downloads pass sha256sum verification.")
|
144 |
+
|
145 |
+
def serialize(self):
|
146 |
+
pass
|
147 |
+
|
148 |
+
def deserialize(self):
|
149 |
+
pass
|
150 |
+
|
151 |
+
def listAvailableWeights(self):
|
152 |
+
print("Available Weight Datasets")
|
153 |
+
for item in self.model_urls:
|
154 |
+
print(item)
|
155 |
+
|
156 |
+
def listLocallyStoredWeights(self):
|
157 |
+
pass
|
158 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/NVIDIAPretrainedWeightDownloader.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
import os
|
15 |
+
|
16 |
+
class NVIDIAPretrainedWeightDownloader:
|
17 |
+
def __init__(self, save_path):
|
18 |
+
self.save_path = save_path + '/nvidia_pretrained_weights'
|
19 |
+
|
20 |
+
if not os.path.exists(self.save_path):
|
21 |
+
os.makedirs(self.save_path)
|
22 |
+
|
23 |
+
pass
|
24 |
+
|
25 |
+
|
26 |
+
def download(self):
|
27 |
+
assert False, 'NVIDIAPretrainedWeightDownloader not implemented yet.'
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/SquadDownloader.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
import bz2
|
15 |
+
import os
|
16 |
+
import urllib.request
|
17 |
+
import sys
|
18 |
+
|
19 |
+
class SquadDownloader:
|
20 |
+
def __init__(self, save_path):
|
21 |
+
self.save_path = save_path + '/squad'
|
22 |
+
|
23 |
+
if not os.path.exists(self.save_path):
|
24 |
+
os.makedirs(self.save_path)
|
25 |
+
|
26 |
+
if not os.path.exists(self.save_path + '/v1.1'):
|
27 |
+
os.makedirs(self.save_path + '/v1.1')
|
28 |
+
|
29 |
+
if not os.path.exists(self.save_path + '/v2.0'):
|
30 |
+
os.makedirs(self.save_path + '/v2.0')
|
31 |
+
|
32 |
+
self.download_urls = {
|
33 |
+
'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json' : 'v1.1/train-v1.1.json',
|
34 |
+
'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json' : 'v1.1/dev-v1.1.json',
|
35 |
+
'https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/' : 'v1.1/evaluate-v1.1.py',
|
36 |
+
'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json' : 'v2.0/train-v2.0.json',
|
37 |
+
'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json' : 'v2.0/dev-v2.0.json',
|
38 |
+
'https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/' : 'v2.0/evaluate-v2.0.py',
|
39 |
+
}
|
40 |
+
|
41 |
+
def download(self):
|
42 |
+
for item in self.download_urls:
|
43 |
+
url = item
|
44 |
+
file = self.download_urls[item]
|
45 |
+
|
46 |
+
print('Downloading:', url)
|
47 |
+
if os.path.isfile(self.save_path + '/' + file):
|
48 |
+
print('** Download file already exists, skipping download')
|
49 |
+
else:
|
50 |
+
response = urllib.request.urlopen(url)
|
51 |
+
with open(self.save_path + '/' + file, "wb") as handle:
|
52 |
+
handle.write(response.read())
|
53 |
+
|
54 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/TextSharding.py
ADDED
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
from collections import defaultdict
|
15 |
+
from itertools import islice
|
16 |
+
|
17 |
+
import multiprocessing
|
18 |
+
import statistics
|
19 |
+
|
20 |
+
class Sharding:
|
21 |
+
def __init__(self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set):
|
22 |
+
assert len(input_files) > 0, 'The input file list must contain at least one file.'
|
23 |
+
assert n_training_shards > 0, 'There must be at least one output shard.'
|
24 |
+
assert n_test_shards > 0, 'There must be at least one output shard.'
|
25 |
+
|
26 |
+
self.n_training_shards = n_training_shards
|
27 |
+
self.n_test_shards = n_test_shards
|
28 |
+
self.fraction_test_set = fraction_test_set
|
29 |
+
|
30 |
+
self.input_files = input_files
|
31 |
+
|
32 |
+
self.output_name_prefix = output_name_prefix
|
33 |
+
self.output_training_identifier = '_training'
|
34 |
+
self.output_test_identifier = '_test'
|
35 |
+
self.output_file_extension = '.txt'
|
36 |
+
|
37 |
+
self.articles = {} # key: integer identifier, value: list of articles
|
38 |
+
self.sentences = {} # key: integer identifier, value: list of sentences
|
39 |
+
self.output_training_files = {} # key: filename, value: list of articles to go into file
|
40 |
+
self.output_test_files = {} # key: filename, value: list of articles to go into file
|
41 |
+
|
42 |
+
self.init_output_files()
|
43 |
+
|
44 |
+
|
45 |
+
# Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines)
|
46 |
+
def load_articles(self):
|
47 |
+
print('Start: Loading Articles')
|
48 |
+
|
49 |
+
global_article_count = 0
|
50 |
+
for input_file in self.input_files:
|
51 |
+
print('input file:', input_file)
|
52 |
+
with open(input_file, mode='r', newline='\n') as f:
|
53 |
+
for i, line in enumerate(f):
|
54 |
+
if line.strip():
|
55 |
+
self.articles[global_article_count] = line.rstrip()
|
56 |
+
global_article_count += 1
|
57 |
+
|
58 |
+
print('End: Loading Articles: There are', len(self.articles), 'articles.')
|
59 |
+
|
60 |
+
|
61 |
+
def segment_articles_into_sentences(self, segmenter):
|
62 |
+
print('Start: Sentence Segmentation')
|
63 |
+
if len(self.articles) is 0:
|
64 |
+
self.load_articles()
|
65 |
+
|
66 |
+
assert len(self.articles) is not 0, 'Please check that input files are present and contain data.'
|
67 |
+
|
68 |
+
# TODO: WIP: multiprocessing (create independent ranges and spawn processes)
|
69 |
+
use_multiprocessing = 'serial'
|
70 |
+
|
71 |
+
def chunks(data, size=len(self.articles)):
|
72 |
+
it = iter(data)
|
73 |
+
for i in range(0, len(data), size):
|
74 |
+
yield {k: data[k] for k in islice(it, size)}
|
75 |
+
|
76 |
+
if use_multiprocessing == 'manager':
|
77 |
+
manager = multiprocessing.Manager()
|
78 |
+
return_dict = manager.dict()
|
79 |
+
jobs = []
|
80 |
+
n_processes = 7 # in addition to the main process, total = n_proc+1
|
81 |
+
|
82 |
+
def work(articles, return_dict):
|
83 |
+
sentences = {}
|
84 |
+
for i, article in enumerate(articles):
|
85 |
+
sentences[i] = segmenter.segment_string(articles[article])
|
86 |
+
|
87 |
+
if i % 5000 == 0:
|
88 |
+
print('Segmenting article', i)
|
89 |
+
|
90 |
+
return_dict.update(sentences)
|
91 |
+
|
92 |
+
for item in chunks(self.articles, len(self.articles)):
|
93 |
+
p = multiprocessing.Process(target=work, args=(item, return_dict))
|
94 |
+
|
95 |
+
# Busy wait
|
96 |
+
while len(jobs) >= n_processes:
|
97 |
+
pass
|
98 |
+
|
99 |
+
jobs.append(p)
|
100 |
+
p.start()
|
101 |
+
|
102 |
+
for proc in jobs:
|
103 |
+
proc.join()
|
104 |
+
|
105 |
+
elif use_multiprocessing == 'queue':
|
106 |
+
work_queue = multiprocessing.Queue()
|
107 |
+
jobs = []
|
108 |
+
|
109 |
+
for item in chunks(self.articles, len(self.articles)):
|
110 |
+
pass
|
111 |
+
|
112 |
+
else: # serial option
|
113 |
+
for i, article in enumerate(self.articles):
|
114 |
+
self.sentences[i] = segmenter.segment_string(self.articles[article])
|
115 |
+
|
116 |
+
if i % 5000 == 0:
|
117 |
+
print('Segmenting article', i)
|
118 |
+
|
119 |
+
print('End: Sentence Segmentation')
|
120 |
+
|
121 |
+
|
122 |
+
def init_output_files(self):
|
123 |
+
print('Start: Init Output Files')
|
124 |
+
assert len(self.output_training_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
|
125 |
+
assert len(self.output_test_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.'
|
126 |
+
|
127 |
+
for i in range(self.n_training_shards):
|
128 |
+
name = self.output_name_prefix + self.output_training_identifier + '_' + str(i) + self.output_file_extension
|
129 |
+
self.output_training_files[name] = []
|
130 |
+
|
131 |
+
for i in range(self.n_test_shards):
|
132 |
+
name = self.output_name_prefix + self.output_test_identifier + '_' + str(i) + self.output_file_extension
|
133 |
+
self.output_test_files[name] = []
|
134 |
+
|
135 |
+
print('End: Init Output Files')
|
136 |
+
|
137 |
+
|
138 |
+
def get_sentences_per_shard(self, shard):
|
139 |
+
result = 0
|
140 |
+
for article_id in shard:
|
141 |
+
result += len(self.sentences[article_id])
|
142 |
+
|
143 |
+
return result
|
144 |
+
|
145 |
+
|
146 |
+
def distribute_articles_over_shards(self):
|
147 |
+
print('Start: Distribute Articles Over Shards')
|
148 |
+
assert len(self.articles) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.'
|
149 |
+
|
150 |
+
# Create dictionary with - key: sentence count per article, value: article id number
|
151 |
+
sentence_counts = defaultdict(lambda: [])
|
152 |
+
|
153 |
+
max_sentences = 0
|
154 |
+
total_sentences = 0
|
155 |
+
|
156 |
+
for article_id in self.sentences:
|
157 |
+
current_length = len(self.sentences[article_id])
|
158 |
+
sentence_counts[current_length].append(article_id)
|
159 |
+
max_sentences = max(max_sentences, current_length)
|
160 |
+
total_sentences += current_length
|
161 |
+
|
162 |
+
n_sentences_assigned_to_training = int((1 - self.fraction_test_set) * total_sentences)
|
163 |
+
nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards
|
164 |
+
nominal_sentences_per_test_shard = (total_sentences - n_sentences_assigned_to_training) // self.n_test_shards
|
165 |
+
|
166 |
+
consumed_article_set = set({})
|
167 |
+
unused_article_set = set(self.articles.keys())
|
168 |
+
|
169 |
+
# Make first pass and add one article worth of lines per file
|
170 |
+
for file in self.output_training_files:
|
171 |
+
current_article_id = sentence_counts[max_sentences][-1]
|
172 |
+
sentence_counts[max_sentences].pop(-1)
|
173 |
+
self.output_training_files[file].append(current_article_id)
|
174 |
+
consumed_article_set.add(current_article_id)
|
175 |
+
unused_article_set.remove(current_article_id)
|
176 |
+
|
177 |
+
# Maintain the max sentence count
|
178 |
+
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
|
179 |
+
max_sentences -= 1
|
180 |
+
|
181 |
+
if len(self.sentences[current_article_id]) > nominal_sentences_per_training_shard:
|
182 |
+
nominal_sentences_per_training_shard = len(self.sentences[current_article_id])
|
183 |
+
print('Warning: A single article contains more than the nominal number of sentences per training shard.')
|
184 |
+
|
185 |
+
for file in self.output_test_files:
|
186 |
+
current_article_id = sentence_counts[max_sentences][-1]
|
187 |
+
sentence_counts[max_sentences].pop(-1)
|
188 |
+
self.output_test_files[file].append(current_article_id)
|
189 |
+
consumed_article_set.add(current_article_id)
|
190 |
+
unused_article_set.remove(current_article_id)
|
191 |
+
|
192 |
+
# Maintain the max sentence count
|
193 |
+
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
|
194 |
+
max_sentences -= 1
|
195 |
+
|
196 |
+
if len(self.sentences[current_article_id]) > nominal_sentences_per_test_shard:
|
197 |
+
nominal_sentences_per_test_shard = len(self.sentences[current_article_id])
|
198 |
+
print('Warning: A single article contains more than the nominal number of sentences per test shard.')
|
199 |
+
|
200 |
+
training_counts = []
|
201 |
+
test_counts = []
|
202 |
+
|
203 |
+
for shard in self.output_training_files:
|
204 |
+
training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard]))
|
205 |
+
|
206 |
+
for shard in self.output_test_files:
|
207 |
+
test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
|
208 |
+
|
209 |
+
training_median = statistics.median(training_counts)
|
210 |
+
test_median = statistics.median(test_counts)
|
211 |
+
|
212 |
+
# Make subsequent passes over files to find articles to add without going over limit
|
213 |
+
history_remaining = []
|
214 |
+
n_history_remaining = 4
|
215 |
+
|
216 |
+
while len(consumed_article_set) < len(self.articles):
|
217 |
+
for fidx, file in enumerate(self.output_training_files):
|
218 |
+
nominal_next_article_size = min(nominal_sentences_per_training_shard - training_counts[fidx], max_sentences)
|
219 |
+
|
220 |
+
# Maintain the max sentence count
|
221 |
+
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
|
222 |
+
max_sentences -= 1
|
223 |
+
|
224 |
+
while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0:
|
225 |
+
nominal_next_article_size -= 1
|
226 |
+
|
227 |
+
if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or training_counts[fidx] > training_median:
|
228 |
+
continue # skip adding to this file, will come back later if no file can accept unused articles
|
229 |
+
|
230 |
+
current_article_id = sentence_counts[nominal_next_article_size][-1]
|
231 |
+
sentence_counts[nominal_next_article_size].pop(-1)
|
232 |
+
|
233 |
+
self.output_training_files[file].append(current_article_id)
|
234 |
+
consumed_article_set.add(current_article_id)
|
235 |
+
unused_article_set.remove(current_article_id)
|
236 |
+
|
237 |
+
for fidx, file in enumerate(self.output_test_files):
|
238 |
+
nominal_next_article_size = min(nominal_sentences_per_test_shard - test_counts[fidx], max_sentences)
|
239 |
+
|
240 |
+
# Maintain the max sentence count
|
241 |
+
while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0:
|
242 |
+
max_sentences -= 1
|
243 |
+
|
244 |
+
while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0:
|
245 |
+
nominal_next_article_size -= 1
|
246 |
+
|
247 |
+
if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or test_counts[fidx] > test_median:
|
248 |
+
continue # skip adding to this file, will come back later if no file can accept unused articles
|
249 |
+
|
250 |
+
current_article_id = sentence_counts[nominal_next_article_size][-1]
|
251 |
+
sentence_counts[nominal_next_article_size].pop(-1)
|
252 |
+
|
253 |
+
self.output_test_files[file].append(current_article_id)
|
254 |
+
consumed_article_set.add(current_article_id)
|
255 |
+
unused_article_set.remove(current_article_id)
|
256 |
+
|
257 |
+
# If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed
|
258 |
+
if len(history_remaining) == n_history_remaining:
|
259 |
+
history_remaining.pop(0)
|
260 |
+
history_remaining.append(len(unused_article_set))
|
261 |
+
|
262 |
+
history_same = True
|
263 |
+
for i in range(1, len(history_remaining)):
|
264 |
+
history_same = history_same and (history_remaining[i-1] == history_remaining[i])
|
265 |
+
|
266 |
+
if history_same:
|
267 |
+
nominal_sentences_per_training_shard += 1
|
268 |
+
# nominal_sentences_per_test_shard += 1
|
269 |
+
|
270 |
+
training_counts = []
|
271 |
+
test_counts = []
|
272 |
+
for shard in self.output_training_files:
|
273 |
+
training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard]))
|
274 |
+
|
275 |
+
for shard in self.output_test_files:
|
276 |
+
test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard]))
|
277 |
+
|
278 |
+
training_median = statistics.median(training_counts)
|
279 |
+
test_median = statistics.median(test_counts)
|
280 |
+
|
281 |
+
print('Distributing data over shards:', len(unused_article_set), 'articles remaining.')
|
282 |
+
|
283 |
+
|
284 |
+
if len(unused_article_set) != 0:
|
285 |
+
print('Warning: Some articles did not make it into output files.')
|
286 |
+
|
287 |
+
|
288 |
+
for shard in self.output_training_files:
|
289 |
+
print('Training shard:', self.get_sentences_per_shard(self.output_training_files[shard]))
|
290 |
+
|
291 |
+
for shard in self.output_test_files:
|
292 |
+
print('Test shard:', self.get_sentences_per_shard(self.output_test_files[shard]))
|
293 |
+
|
294 |
+
print('End: Distribute Articles Over Shards')
|
295 |
+
|
296 |
+
|
297 |
+
def write_shards_to_disk(self):
|
298 |
+
print('Start: Write Shards to Disk')
|
299 |
+
for shard in self.output_training_files:
|
300 |
+
self.write_single_shard(shard, self.output_training_files[shard])
|
301 |
+
|
302 |
+
for shard in self.output_test_files:
|
303 |
+
self.write_single_shard(shard, self.output_test_files[shard])
|
304 |
+
|
305 |
+
print('End: Write Shards to Disk')
|
306 |
+
|
307 |
+
|
308 |
+
def write_single_shard(self, shard_name, shard):
|
309 |
+
with open(shard_name, mode='w', newline='\n') as f:
|
310 |
+
for article_id in shard:
|
311 |
+
for line in self.sentences[article_id]:
|
312 |
+
f.write(line + '\n')
|
313 |
+
|
314 |
+
f.write('\n') # Line break between articles
|
315 |
+
|
316 |
+
|
317 |
+
import nltk
|
318 |
+
|
319 |
+
nltk.download('punkt')
|
320 |
+
|
321 |
+
class NLTKSegmenter:
|
322 |
+
def __init(self):
|
323 |
+
pass
|
324 |
+
|
325 |
+
def segment_string(self, article):
|
326 |
+
return nltk.tokenize.sent_tokenize(article)
|
327 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/WikiDownloader.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
import bz2
|
15 |
+
import os
|
16 |
+
import urllib.request
|
17 |
+
import subprocess
|
18 |
+
import sys
|
19 |
+
|
20 |
+
class WikiDownloader:
|
21 |
+
def __init__(self, language, save_path):
|
22 |
+
self.save_path = save_path + '/wikicorpus_' + language
|
23 |
+
|
24 |
+
if not os.path.exists(self.save_path):
|
25 |
+
os.makedirs(self.save_path)
|
26 |
+
|
27 |
+
self.language = language
|
28 |
+
self.download_urls = {
|
29 |
+
'en' : 'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2',
|
30 |
+
'zh' : 'https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2'
|
31 |
+
}
|
32 |
+
|
33 |
+
self.output_files = {
|
34 |
+
'en' : 'wikicorpus_en.xml.bz2',
|
35 |
+
'zh' : 'wikicorpus_zh.xml.bz2'
|
36 |
+
}
|
37 |
+
|
38 |
+
|
39 |
+
def download(self):
|
40 |
+
if self.language in self.download_urls:
|
41 |
+
url = self.download_urls[self.language]
|
42 |
+
filename = self.output_files[self.language]
|
43 |
+
|
44 |
+
print('Downloading:', url)
|
45 |
+
if os.path.isfile(self.save_path + '/' + filename):
|
46 |
+
print('** Download file already exists, skipping download')
|
47 |
+
else:
|
48 |
+
subprocess.run('wget -O ' + self.save_path + '/' + filename + ' ' + url, shell=True, check=True)
|
49 |
+
|
50 |
+
# Always unzipping since this is relatively fast and will overwrite
|
51 |
+
print('Unzipping:', self.output_files[self.language])
|
52 |
+
subprocess.run('bzip2 -dk ' + self.save_path + '/' + filename, shell=True, check=True)
|
53 |
+
|
54 |
+
else:
|
55 |
+
assert False, 'WikiDownloader not implemented for this language yet.'
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/WikicorpusTextFormatting.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
import glob
|
15 |
+
import os
|
16 |
+
|
17 |
+
class WikicorpusTextFormatting:
|
18 |
+
def __init__(self, wiki_path, output_filename, recursive = False):
|
19 |
+
self.wiki_path = wiki_path
|
20 |
+
self.recursive = recursive
|
21 |
+
self.output_filename = output_filename
|
22 |
+
|
23 |
+
|
24 |
+
# This puts one article per line
|
25 |
+
def merge(self):
|
26 |
+
with open(self.output_filename, mode='w', newline='\n') as ofile:
|
27 |
+
for dirname in glob.glob(self.wiki_path + '/*/', recursive=False):
|
28 |
+
for filename in glob.glob(dirname + 'wiki_*', recursive=self.recursive):
|
29 |
+
print(filename)
|
30 |
+
article_lines = []
|
31 |
+
article_open = False
|
32 |
+
|
33 |
+
with open(filename, mode='r', newline='\n') as file:
|
34 |
+
for line in file:
|
35 |
+
if '<doc id=' in line:
|
36 |
+
article_open = True
|
37 |
+
elif '</doc>' in line:
|
38 |
+
article_open = False
|
39 |
+
for oline in article_lines[1:]:
|
40 |
+
if oline != '\n':
|
41 |
+
ofile.write(oline.rstrip() + " ")
|
42 |
+
ofile.write("\n\n")
|
43 |
+
article_lines = []
|
44 |
+
else:
|
45 |
+
if article_open:
|
46 |
+
article_lines.append(line)
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/__init__.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/bertPrep.py
ADDED
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
import BookscorpusTextFormatting
|
15 |
+
import Downloader
|
16 |
+
import TextSharding
|
17 |
+
import WikicorpusTextFormatting
|
18 |
+
|
19 |
+
import argparse
|
20 |
+
import itertools
|
21 |
+
import multiprocessing
|
22 |
+
import os
|
23 |
+
import pprint
|
24 |
+
import subprocess
|
25 |
+
|
26 |
+
|
27 |
+
def main(args):
|
28 |
+
working_dir = os.environ['BERT_PREP_WORKING_DIR']
|
29 |
+
|
30 |
+
print('Working Directory:', working_dir)
|
31 |
+
print('Action:', args.action)
|
32 |
+
print('Dataset Name:', args.dataset)
|
33 |
+
|
34 |
+
if args.input_files:
|
35 |
+
args.input_files = args.input_files.split(',')
|
36 |
+
|
37 |
+
hdf5_tfrecord_folder_prefix = "_lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \
|
38 |
+
+ "_max_pred_" + str(args.max_predictions_per_seq) + "_masked_lm_prob_" + str(args.masked_lm_prob) \
|
39 |
+
+ "_random_seed_" + str(args.random_seed) + "_dupe_factor_" + str(args.dupe_factor)
|
40 |
+
|
41 |
+
directory_structure = {
|
42 |
+
'download' : working_dir + '/download', # Downloaded and decompressed
|
43 |
+
'extracted' : working_dir +'/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor)
|
44 |
+
'formatted' : working_dir + '/formatted_one_article_per_line', # This is the level where all sources should look the same
|
45 |
+
'sharded' : working_dir + '/sharded_' + "training_shards_" + str(args.n_training_shards) + "_test_shards_" + str(args.n_test_shards) + "_fraction_" + str(args.fraction_test_set),
|
46 |
+
'tfrecord' : working_dir + '/tfrecord'+ hdf5_tfrecord_folder_prefix,
|
47 |
+
'hdf5': working_dir + '/hdf5' + hdf5_tfrecord_folder_prefix
|
48 |
+
}
|
49 |
+
|
50 |
+
print('\nDirectory Structure:')
|
51 |
+
pp = pprint.PrettyPrinter(indent=2)
|
52 |
+
pp.pprint(directory_structure)
|
53 |
+
print('')
|
54 |
+
|
55 |
+
if args.action == 'download':
|
56 |
+
if not os.path.exists(directory_structure['download']):
|
57 |
+
os.makedirs(directory_structure['download'])
|
58 |
+
|
59 |
+
downloader = Downloader.Downloader(args.dataset, directory_structure['download'])
|
60 |
+
downloader.download()
|
61 |
+
|
62 |
+
elif args.action == 'text_formatting':
|
63 |
+
assert args.dataset != 'google_pretrained_weights' and args.dataset != 'nvidia_pretrained_weights' and args.dataset != 'squad' and args.dataset != 'mrpc', 'Cannot perform text_formatting on pretrained weights'
|
64 |
+
|
65 |
+
if not os.path.exists(directory_structure['extracted']):
|
66 |
+
os.makedirs(directory_structure['extracted'])
|
67 |
+
|
68 |
+
if not os.path.exists(directory_structure['formatted']):
|
69 |
+
os.makedirs(directory_structure['formatted'])
|
70 |
+
|
71 |
+
if args.dataset == 'bookscorpus':
|
72 |
+
books_path = directory_structure['download'] + '/bookscorpus'
|
73 |
+
#books_path = directory_structure['download']
|
74 |
+
output_filename = directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt'
|
75 |
+
books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting(books_path, output_filename, recursive=True)
|
76 |
+
books_formatter.merge()
|
77 |
+
|
78 |
+
elif args.dataset == 'wikicorpus_en':
|
79 |
+
if args.skip_wikiextractor == 0:
|
80 |
+
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
|
81 |
+
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
|
82 |
+
print('WikiExtractor Command:', wikiextractor_command)
|
83 |
+
wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
|
84 |
+
#wikiextractor_process.communicate()
|
85 |
+
|
86 |
+
wiki_path = directory_structure['extracted'] + '/wikicorpus_en'
|
87 |
+
output_filename = directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'
|
88 |
+
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
|
89 |
+
wiki_formatter.merge()
|
90 |
+
|
91 |
+
elif args.dataset == 'wikicorpus_zh':
|
92 |
+
assert False, 'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be translated and properly segmented still, and should work once this step is added.'
|
93 |
+
if args.skip_wikiextractor == 0:
|
94 |
+
path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py'
|
95 |
+
wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset
|
96 |
+
print('WikiExtractor Command:', wikiextractor_command)
|
97 |
+
wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True)
|
98 |
+
#wikiextractor_process.communicate()
|
99 |
+
|
100 |
+
wiki_path = directory_structure['extracted'] + '/wikicorpus_zh'
|
101 |
+
output_filename = directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt'
|
102 |
+
wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True)
|
103 |
+
wiki_formatter.merge()
|
104 |
+
|
105 |
+
assert os.stat(output_filename).st_size > 0, 'File glob did not pick up extracted wiki files from WikiExtractor.'
|
106 |
+
|
107 |
+
elif args.action == 'sharding':
|
108 |
+
# Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces)
|
109 |
+
if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset:
|
110 |
+
if args.input_files is None:
|
111 |
+
if args.dataset == 'bookscorpus':
|
112 |
+
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt']
|
113 |
+
elif args.dataset == 'wikicorpus_en':
|
114 |
+
args.input_files = [directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
|
115 |
+
elif args.dataset == 'wikicorpus_zh':
|
116 |
+
args.input_files = [directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt']
|
117 |
+
elif args.dataset == 'books_wiki_en_corpus':
|
118 |
+
args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt', directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt']
|
119 |
+
|
120 |
+
output_file_prefix = directory_structure['sharded'] + '/' + args.dataset + '/' + args.dataset
|
121 |
+
|
122 |
+
if not os.path.exists(directory_structure['sharded']):
|
123 |
+
os.makedirs(directory_structure['sharded'])
|
124 |
+
|
125 |
+
if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset):
|
126 |
+
os.makedirs(directory_structure['sharded'] + '/' + args.dataset)
|
127 |
+
|
128 |
+
# Segmentation is here because all datasets look the same in one article/book/whatever per line format, and
|
129 |
+
# it seemed unnecessarily complicated to add an additional preprocessing step to call just for this.
|
130 |
+
# Different languages (e.g., Chinese simplified/traditional) may require translation and
|
131 |
+
# other packages to be called from here -- just add a conditional branch for those extra steps
|
132 |
+
segmenter = TextSharding.NLTKSegmenter()
|
133 |
+
sharding = TextSharding.Sharding(args.input_files, output_file_prefix, args.n_training_shards, args.n_test_shards, args.fraction_test_set)
|
134 |
+
|
135 |
+
sharding.load_articles()
|
136 |
+
sharding.segment_articles_into_sentences(segmenter)
|
137 |
+
sharding.distribute_articles_over_shards()
|
138 |
+
sharding.write_shards_to_disk()
|
139 |
+
|
140 |
+
else:
|
141 |
+
assert False, 'Unsupported dataset for sharding'
|
142 |
+
|
143 |
+
elif args.action == 'create_tfrecord_files':
|
144 |
+
assert False, 'TFrecord creation not supported in this PyTorch model example release.' \
|
145 |
+
''
|
146 |
+
if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset):
|
147 |
+
os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset)
|
148 |
+
|
149 |
+
def create_record_worker(filename_prefix, shard_id, output_format='tfrecord'):
|
150 |
+
bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py'
|
151 |
+
bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt'
|
152 |
+
bert_preprocessing_command += ' --output_file=' + directory_structure['tfrecord'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format
|
153 |
+
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
|
154 |
+
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
|
155 |
+
bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length)
|
156 |
+
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq)
|
157 |
+
bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob)
|
158 |
+
bert_preprocessing_command += ' --random_seed=' + str(args.random_seed)
|
159 |
+
bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor)
|
160 |
+
bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True)
|
161 |
+
|
162 |
+
last_process = bert_preprocessing_process
|
163 |
+
|
164 |
+
# This could be better optimized (fine if all take equal time)
|
165 |
+
if shard_id % args.n_processes == 0 and shard_id > 0:
|
166 |
+
bert_preprocessing_process.wait()
|
167 |
+
return last_process
|
168 |
+
|
169 |
+
output_file_prefix = args.dataset
|
170 |
+
|
171 |
+
for i in range(args.n_training_shards):
|
172 |
+
last_process =create_record_worker(output_file_prefix + '_training', i)
|
173 |
+
|
174 |
+
last_process.wait()
|
175 |
+
|
176 |
+
for i in range(args.n_test_shards):
|
177 |
+
last_process = create_record_worker(output_file_prefix + '_test', i)
|
178 |
+
|
179 |
+
last_process.wait()
|
180 |
+
|
181 |
+
|
182 |
+
elif args.action == 'create_hdf5_files':
|
183 |
+
last_process = None
|
184 |
+
|
185 |
+
if not os.path.exists(directory_structure['hdf5'] + "/" + args.dataset):
|
186 |
+
os.makedirs(directory_structure['hdf5'] + "/" + args.dataset)
|
187 |
+
|
188 |
+
def create_record_worker(filename_prefix, shard_id, output_format='hdf5'):
|
189 |
+
bert_preprocessing_command = 'python /workspace/bert/create_pretraining_data.py'
|
190 |
+
bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt'
|
191 |
+
bert_preprocessing_command += ' --output_file=' + directory_structure['hdf5'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format
|
192 |
+
bert_preprocessing_command += ' --vocab_file=' + args.vocab_file
|
193 |
+
bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else ''
|
194 |
+
bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length)
|
195 |
+
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq)
|
196 |
+
bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob)
|
197 |
+
bert_preprocessing_command += ' --random_seed=' + str(args.random_seed)
|
198 |
+
bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor)
|
199 |
+
bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True)
|
200 |
+
|
201 |
+
last_process = bert_preprocessing_process
|
202 |
+
|
203 |
+
# This could be better optimized (fine if all take equal time)
|
204 |
+
if shard_id % args.n_processes == 0 and shard_id > 0:
|
205 |
+
bert_preprocessing_process.wait()
|
206 |
+
return last_process
|
207 |
+
|
208 |
+
output_file_prefix = args.dataset
|
209 |
+
|
210 |
+
for i in range(args.n_training_shards):
|
211 |
+
last_process = create_record_worker(output_file_prefix + '_training', i)
|
212 |
+
|
213 |
+
last_process.wait()
|
214 |
+
|
215 |
+
for i in range(args.n_test_shards):
|
216 |
+
last_process = create_record_worker(output_file_prefix + '_test', i)
|
217 |
+
|
218 |
+
last_process.wait()
|
219 |
+
|
220 |
+
|
221 |
+
if __name__ == "__main__":
|
222 |
+
parser = argparse.ArgumentParser(
|
223 |
+
description='Preprocessing Application for Everything BERT-related'
|
224 |
+
)
|
225 |
+
|
226 |
+
parser.add_argument(
|
227 |
+
'--action',
|
228 |
+
type=str,
|
229 |
+
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords',
|
230 |
+
choices={
|
231 |
+
'download', # Download and verify mdf5/sha sums
|
232 |
+
'text_formatting', # Convert into a file that contains one article/book per line
|
233 |
+
'sharding', # Convert previous formatted text into shards containing one sentence per line
|
234 |
+
'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info
|
235 |
+
'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info
|
236 |
+
}
|
237 |
+
)
|
238 |
+
|
239 |
+
parser.add_argument(
|
240 |
+
'--dataset',
|
241 |
+
type=str,
|
242 |
+
help='Specify the dataset to perform --action on',
|
243 |
+
choices={
|
244 |
+
'bookscorpus',
|
245 |
+
'wikicorpus_en',
|
246 |
+
'wikicorpus_zh',
|
247 |
+
'books_wiki_en_corpus',
|
248 |
+
'google_pretrained_weights',
|
249 |
+
'nvidia_pretrained_weights',
|
250 |
+
'mrpc',
|
251 |
+
'sst-2',
|
252 |
+
'squad',
|
253 |
+
'all'
|
254 |
+
}
|
255 |
+
)
|
256 |
+
|
257 |
+
parser.add_argument(
|
258 |
+
'--input_files',
|
259 |
+
type=str,
|
260 |
+
help='Specify the input files in a comma-separated list (no spaces)'
|
261 |
+
)
|
262 |
+
|
263 |
+
parser.add_argument(
|
264 |
+
'--n_training_shards',
|
265 |
+
type=int,
|
266 |
+
help='Specify the number of training shards to generate',
|
267 |
+
default=256
|
268 |
+
)
|
269 |
+
|
270 |
+
parser.add_argument(
|
271 |
+
'--n_test_shards',
|
272 |
+
type=int,
|
273 |
+
help='Specify the number of test shards to generate',
|
274 |
+
default=256
|
275 |
+
)
|
276 |
+
|
277 |
+
parser.add_argument(
|
278 |
+
'--fraction_test_set',
|
279 |
+
type=float,
|
280 |
+
help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)',
|
281 |
+
default=0.1
|
282 |
+
)
|
283 |
+
|
284 |
+
parser.add_argument(
|
285 |
+
'--segmentation_method',
|
286 |
+
type=str,
|
287 |
+
help='Specify your choice of sentence segmentation',
|
288 |
+
choices={
|
289 |
+
'nltk'
|
290 |
+
},
|
291 |
+
default='nltk'
|
292 |
+
)
|
293 |
+
|
294 |
+
parser.add_argument(
|
295 |
+
'--n_processes',
|
296 |
+
type=int,
|
297 |
+
help='Specify the max number of processes to allow at one time',
|
298 |
+
default=4
|
299 |
+
)
|
300 |
+
|
301 |
+
parser.add_argument(
|
302 |
+
'--random_seed',
|
303 |
+
type=int,
|
304 |
+
help='Specify the base seed to use for any random number generation',
|
305 |
+
default=12345
|
306 |
+
)
|
307 |
+
|
308 |
+
parser.add_argument(
|
309 |
+
'--dupe_factor',
|
310 |
+
type=int,
|
311 |
+
help='Specify the duplication factor',
|
312 |
+
default=5
|
313 |
+
)
|
314 |
+
|
315 |
+
parser.add_argument(
|
316 |
+
'--masked_lm_prob',
|
317 |
+
type=float,
|
318 |
+
help='Specify the probability for masked lm',
|
319 |
+
default=0.15
|
320 |
+
)
|
321 |
+
|
322 |
+
parser.add_argument(
|
323 |
+
'--max_seq_length',
|
324 |
+
type=int,
|
325 |
+
help='Specify the maximum sequence length',
|
326 |
+
default=512
|
327 |
+
)
|
328 |
+
|
329 |
+
parser.add_argument(
|
330 |
+
'--max_predictions_per_seq',
|
331 |
+
type=int,
|
332 |
+
help='Specify the maximum number of masked words per sequence',
|
333 |
+
default=20
|
334 |
+
)
|
335 |
+
|
336 |
+
parser.add_argument(
|
337 |
+
'--do_lower_case',
|
338 |
+
type=int,
|
339 |
+
help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)',
|
340 |
+
default=1
|
341 |
+
)
|
342 |
+
|
343 |
+
parser.add_argument(
|
344 |
+
'--vocab_file',
|
345 |
+
type=str,
|
346 |
+
help='Specify absolute path to vocab file to use)'
|
347 |
+
)
|
348 |
+
|
349 |
+
parser.add_argument(
|
350 |
+
'--skip_wikiextractor',
|
351 |
+
type=int,
|
352 |
+
help='Specify whether to skip wikiextractor step 0=False, 1=True',
|
353 |
+
default=0
|
354 |
+
)
|
355 |
+
|
356 |
+
parser.add_argument(
|
357 |
+
'--interactive_json_config_generator',
|
358 |
+
type=str,
|
359 |
+
help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords'
|
360 |
+
)
|
361 |
+
|
362 |
+
args = parser.parse_args()
|
363 |
+
main(args)
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/create_datasets_from_start.sh
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
to_download=${1:-"wiki_only"}
|
17 |
+
|
18 |
+
#Download
|
19 |
+
if [ "$to_download" = "wiki_books" ] ; then
|
20 |
+
python3 /workspace/bert/data/bertPrep.py --action download --dataset bookscorpus
|
21 |
+
fi
|
22 |
+
|
23 |
+
python3 /workspace/bert/data/bertPrep.py --action download --dataset wikicorpus_en
|
24 |
+
python3 /workspace/bert/data/bertPrep.py --action download --dataset google_pretrained_weights # Includes vocab
|
25 |
+
python3 /workspace/bert/data/bertPrep.py --action download --dataset squad
|
26 |
+
python3 /workspace/bert/data/bertPrep.py --action download --dataset mrpc
|
27 |
+
python3 /workspace/bert/data/bertPrep.py --action download --dataset sst-2
|
28 |
+
|
29 |
+
# Properly format the text files
|
30 |
+
if [ "$to_download" = "wiki_books" ] ; then
|
31 |
+
python3 /workspace/bert/data/bertPrep.py --action text_formatting --dataset bookscorpus
|
32 |
+
fi
|
33 |
+
python3 /workspace/bert/data/bertPrep.py --action text_formatting --dataset wikicorpus_en
|
34 |
+
|
35 |
+
if [ "$to_download" = "wiki_books" ] ; then
|
36 |
+
DATASET="books_wiki_en_corpus"
|
37 |
+
else
|
38 |
+
DATASET="wikicorpus_en"
|
39 |
+
# Shard the text files
|
40 |
+
fi
|
41 |
+
|
42 |
+
# Shard the text files
|
43 |
+
python3 /workspace/bert/data/bertPrep.py --action sharding --dataset $DATASET
|
44 |
+
|
45 |
+
# Create HDF5 files Phase 1
|
46 |
+
python3 /workspace/bert/data/bertPrep.py --action create_hdf5_files --dataset $DATASET --max_seq_length 128 \
|
47 |
+
--max_predictions_per_seq 20 --vocab_file $BERT_PREP_WORKING_DIR/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt --do_lower_case 1
|
48 |
+
|
49 |
+
# Create HDF5 files Phase 2
|
50 |
+
python3 /workspace/bert/data/bertPrep.py --action create_hdf5_files --dataset $DATASET --max_seq_length 512 \
|
51 |
+
--max_predictions_per_seq 80 --vocab_file $BERT_PREP_WORKING_DIR/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt --do_lower_case 1
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/data/squad/squad_download.sh
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
|
3 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
echo "Downloading dataset for squad..."
|
17 |
+
|
18 |
+
# Download SQuAD
|
19 |
+
|
20 |
+
v1="v1.1"
|
21 |
+
mkdir $v1
|
22 |
+
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json -O $v1/train-v1.1.json
|
23 |
+
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -O $v1/dev-v1.1.json
|
24 |
+
wget https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/ -O $v1/evaluate-v1.1.py
|
25 |
+
|
26 |
+
EXP_TRAIN_v1='981b29407e0affa3b1b156f72073b945 -'
|
27 |
+
EXP_DEV_v1='3e85deb501d4e538b6bc56f786231552 -'
|
28 |
+
EXP_EVAL_v1='afb04912d18ff20696f7f88eed49bea9 -'
|
29 |
+
CALC_TRAIN_v1=`cat ${v1}/train-v1.1.json |md5sum`
|
30 |
+
CALC_DEV_v1=`cat ${v1}/dev-v1.1.json |md5sum`
|
31 |
+
CALC_EVAL_v1=`cat ${v1}/evaluate-v1.1.py |md5sum`
|
32 |
+
|
33 |
+
v2="v2.0"
|
34 |
+
mkdir $v2
|
35 |
+
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json -O $v2/train-v2.0.json
|
36 |
+
wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json -O $v2/dev-v2.0.json
|
37 |
+
wget https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/ -O $v2/evaluate-v2.0.py
|
38 |
+
|
39 |
+
EXP_TRAIN_v2='62108c273c268d70893182d5cf8df740 -'
|
40 |
+
EXP_DEV_v2='246adae8b7002f8679c027697b0b7cf8 -'
|
41 |
+
EXP_EVAL_v2='ff23213bed5516ea4a6d9edb6cd7d627 -'
|
42 |
+
|
43 |
+
CALC_TRAIN_v2=`cat ${v2}/train-v2.0.json |md5sum`
|
44 |
+
CALC_DEV_v2=`cat ${v2}/dev-v2.0.json |md5sum`
|
45 |
+
CALC_EVAL_v2=`cat ${v2}/evaluate-v2.0.py |md5sum`
|
46 |
+
|
47 |
+
echo "Squad data download done!"
|
48 |
+
|
49 |
+
echo "Verifying Dataset...."
|
50 |
+
|
51 |
+
if [ "$EXP_TRAIN_v1" != "$CALC_TRAIN_v1" ]; then
|
52 |
+
echo "train-v1.1.json is corrupted! md5sum doesn't match"
|
53 |
+
fi
|
54 |
+
|
55 |
+
if [ "$EXP_DEV_v1" != "$CALC_DEV_v1" ]; then
|
56 |
+
echo "dev-v1.1.json is corrupted! md5sum doesn't match"
|
57 |
+
fi
|
58 |
+
if [ "$EXP_EVAL_v1" != "$CALC_EVAL_v1" ]; then
|
59 |
+
echo "evaluate-v1.1.py is corrupted! md5sum doesn't match"
|
60 |
+
fi
|
61 |
+
|
62 |
+
|
63 |
+
if [ "$EXP_TRAIN_v2" != "$CALC_TRAIN_v2" ]; then
|
64 |
+
echo "train-v2.0.json is corrupted! md5sum doesn't match"
|
65 |
+
fi
|
66 |
+
if [ "$EXP_DEV_v2" != "$CALC_DEV_v2" ]; then
|
67 |
+
echo "dev-v2.0.json is corrupted! md5sum doesn't match"
|
68 |
+
fi
|
69 |
+
if [ "$EXP_EVAL_v2" != "$CALC_EVAL_v2" ]; then
|
70 |
+
echo "evaluate-v2.0.py is corrupted! md5sum doesn't match"
|
71 |
+
fi
|
72 |
+
|
73 |
+
echo "Complete!"
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/file_utils.py
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
|
2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3 |
+
# you may not use this file except in compliance with the License.
|
4 |
+
# You may obtain a copy of the License at
|
5 |
+
#
|
6 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7 |
+
#
|
8 |
+
# Unless required by applicable law or agreed to in writing, software
|
9 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11 |
+
# See the License for the specific language governing permissions and
|
12 |
+
# limitations under the License.
|
13 |
+
|
14 |
+
"""
|
15 |
+
Utilities for working with the local dataset cache.
|
16 |
+
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
|
17 |
+
Copyright by the AllenNLP authors.
|
18 |
+
"""
|
19 |
+
|
20 |
+
from __future__ import (absolute_import, division, print_function, unicode_literals)
|
21 |
+
|
22 |
+
import json
|
23 |
+
import logging
|
24 |
+
import os
|
25 |
+
import shutil
|
26 |
+
import tempfile
|
27 |
+
from functools import wraps
|
28 |
+
from hashlib import sha256
|
29 |
+
import sys
|
30 |
+
from io import open
|
31 |
+
|
32 |
+
import boto3
|
33 |
+
import requests
|
34 |
+
from botocore.exceptions import ClientError
|
35 |
+
from tqdm import tqdm
|
36 |
+
|
37 |
+
try:
|
38 |
+
from urllib.parse import urlparse
|
39 |
+
except ImportError:
|
40 |
+
from urlparse import urlparse
|
41 |
+
|
42 |
+
try:
|
43 |
+
from pathlib import Path
|
44 |
+
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
|
45 |
+
Path.home() / '.pytorch_pretrained_bert'))
|
46 |
+
except AttributeError:
|
47 |
+
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
|
48 |
+
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
|
49 |
+
|
50 |
+
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
|
51 |
+
|
52 |
+
|
53 |
+
def url_to_filename(url, etag=None):
|
54 |
+
"""
|
55 |
+
Convert `url` into a hashed filename in a repeatable way.
|
56 |
+
If `etag` is specified, append its hash to the url's, delimited
|
57 |
+
by a period.
|
58 |
+
"""
|
59 |
+
url_bytes = url.encode('utf-8')
|
60 |
+
url_hash = sha256(url_bytes)
|
61 |
+
filename = url_hash.hexdigest()
|
62 |
+
|
63 |
+
if etag:
|
64 |
+
etag_bytes = etag.encode('utf-8')
|
65 |
+
etag_hash = sha256(etag_bytes)
|
66 |
+
filename += '.' + etag_hash.hexdigest()
|
67 |
+
|
68 |
+
return filename
|
69 |
+
|
70 |
+
|
71 |
+
def filename_to_url(filename, cache_dir=None):
|
72 |
+
"""
|
73 |
+
Return the url and etag (which may be ``None``) stored for `filename`.
|
74 |
+
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
|
75 |
+
"""
|
76 |
+
if cache_dir is None:
|
77 |
+
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
|
78 |
+
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
|
79 |
+
cache_dir = str(cache_dir)
|
80 |
+
|
81 |
+
cache_path = os.path.join(cache_dir, filename)
|
82 |
+
if not os.path.exists(cache_path):
|
83 |
+
raise EnvironmentError("file {} not found".format(cache_path))
|
84 |
+
|
85 |
+
meta_path = cache_path + '.json'
|
86 |
+
if not os.path.exists(meta_path):
|
87 |
+
raise EnvironmentError("file {} not found".format(meta_path))
|
88 |
+
|
89 |
+
with open(meta_path, encoding="utf-8") as meta_file:
|
90 |
+
metadata = json.load(meta_file)
|
91 |
+
url = metadata['url']
|
92 |
+
etag = metadata['etag']
|
93 |
+
|
94 |
+
return url, etag
|
95 |
+
|
96 |
+
|
97 |
+
def cached_path(url_or_filename, cache_dir=None):
|
98 |
+
"""
|
99 |
+
Given something that might be a URL (or might be a local path),
|
100 |
+
determine which. If it's a URL, download the file and cache it, and
|
101 |
+
return the path to the cached file. If it's already a local path,
|
102 |
+
make sure the file exists and then return the path.
|
103 |
+
"""
|
104 |
+
if cache_dir is None:
|
105 |
+
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
|
106 |
+
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
|
107 |
+
url_or_filename = str(url_or_filename)
|
108 |
+
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
|
109 |
+
cache_dir = str(cache_dir)
|
110 |
+
|
111 |
+
parsed = urlparse(url_or_filename)
|
112 |
+
|
113 |
+
if parsed.scheme in ('http', 'https', 's3'):
|
114 |
+
# URL, so get it from the cache (downloading if necessary)
|
115 |
+
return get_from_cache(url_or_filename, cache_dir)
|
116 |
+
elif os.path.exists(url_or_filename):
|
117 |
+
# File, and it exists.
|
118 |
+
return url_or_filename
|
119 |
+
elif parsed.scheme == '':
|
120 |
+
# File, but it doesn't exist.
|
121 |
+
raise EnvironmentError("file {} not found".format(url_or_filename))
|
122 |
+
else:
|
123 |
+
# Something unknown
|
124 |
+
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
|
125 |
+
|
126 |
+
|
127 |
+
def split_s3_path(url):
|
128 |
+
"""Split a full s3 path into the bucket name and path."""
|
129 |
+
parsed = urlparse(url)
|
130 |
+
if not parsed.netloc or not parsed.path:
|
131 |
+
raise ValueError("bad s3 path {}".format(url))
|
132 |
+
bucket_name = parsed.netloc
|
133 |
+
s3_path = parsed.path
|
134 |
+
# Remove '/' at beginning of path.
|
135 |
+
if s3_path.startswith("/"):
|
136 |
+
s3_path = s3_path[1:]
|
137 |
+
return bucket_name, s3_path
|
138 |
+
|
139 |
+
|
140 |
+
def s3_request(func):
|
141 |
+
"""
|
142 |
+
Wrapper function for s3 requests in order to create more helpful error
|
143 |
+
messages.
|
144 |
+
"""
|
145 |
+
|
146 |
+
@wraps(func)
|
147 |
+
def wrapper(url, *args, **kwargs):
|
148 |
+
try:
|
149 |
+
return func(url, *args, **kwargs)
|
150 |
+
except ClientError as exc:
|
151 |
+
if int(exc.response["Error"]["Code"]) == 404:
|
152 |
+
raise EnvironmentError("file {} not found".format(url))
|
153 |
+
else:
|
154 |
+
raise
|
155 |
+
|
156 |
+
return wrapper
|
157 |
+
|
158 |
+
|
159 |
+
@s3_request
|
160 |
+
def s3_etag(url):
|
161 |
+
"""Check ETag on S3 object."""
|
162 |
+
s3_resource = boto3.resource("s3")
|
163 |
+
bucket_name, s3_path = split_s3_path(url)
|
164 |
+
s3_object = s3_resource.Object(bucket_name, s3_path)
|
165 |
+
return s3_object.e_tag
|
166 |
+
|
167 |
+
|
168 |
+
@s3_request
|
169 |
+
def s3_get(url, temp_file):
|
170 |
+
"""Pull a file directly from S3."""
|
171 |
+
s3_resource = boto3.resource("s3")
|
172 |
+
bucket_name, s3_path = split_s3_path(url)
|
173 |
+
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
174 |
+
|
175 |
+
|
176 |
+
def http_get(url, temp_file):
|
177 |
+
req = requests.get(url, stream=True)
|
178 |
+
content_length = req.headers.get('Content-Length')
|
179 |
+
total = int(content_length) if content_length is not None else None
|
180 |
+
progress = tqdm(unit="B", total=total)
|
181 |
+
for chunk in req.iter_content(chunk_size=1024):
|
182 |
+
if chunk: # filter out keep-alive new chunks
|
183 |
+
progress.update(len(chunk))
|
184 |
+
temp_file.write(chunk)
|
185 |
+
progress.close()
|
186 |
+
|
187 |
+
|
188 |
+
def get_from_cache(url, cache_dir=None):
|
189 |
+
"""
|
190 |
+
Given a URL, look for the corresponding dataset in the local cache.
|
191 |
+
If it's not there, download it. Then return the path to the cached file.
|
192 |
+
"""
|
193 |
+
if cache_dir is None:
|
194 |
+
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
|
195 |
+
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
|
196 |
+
cache_dir = str(cache_dir)
|
197 |
+
|
198 |
+
if not os.path.exists(cache_dir):
|
199 |
+
os.makedirs(cache_dir)
|
200 |
+
|
201 |
+
# Get eTag to add to filename, if it exists.
|
202 |
+
if url.startswith("s3://"):
|
203 |
+
etag = s3_etag(url)
|
204 |
+
else:
|
205 |
+
response = requests.head(url, allow_redirects=True)
|
206 |
+
if response.status_code != 200:
|
207 |
+
raise IOError("HEAD request failed for url {} with status code {}"
|
208 |
+
.format(url, response.status_code))
|
209 |
+
etag = response.headers.get("ETag")
|
210 |
+
|
211 |
+
filename = url_to_filename(url, etag)
|
212 |
+
|
213 |
+
# get cache path to put the file
|
214 |
+
cache_path = os.path.join(cache_dir, filename)
|
215 |
+
|
216 |
+
if not os.path.exists(cache_path):
|
217 |
+
# Download to temporary file, then copy to cache dir once finished.
|
218 |
+
# Otherwise you get corrupt cache entries if the download gets interrupted.
|
219 |
+
with tempfile.NamedTemporaryFile() as temp_file:
|
220 |
+
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
|
221 |
+
|
222 |
+
# GET file object
|
223 |
+
if url.startswith("s3://"):
|
224 |
+
s3_get(url, temp_file)
|
225 |
+
else:
|
226 |
+
http_get(url, temp_file)
|
227 |
+
|
228 |
+
# we are copying the file before closing it, so flush to avoid truncation
|
229 |
+
temp_file.flush()
|
230 |
+
# shutil.copyfileobj() starts at the current position, so go to the start
|
231 |
+
temp_file.seek(0)
|
232 |
+
|
233 |
+
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
|
234 |
+
with open(cache_path, 'wb') as cache_file:
|
235 |
+
shutil.copyfileobj(temp_file, cache_file)
|
236 |
+
|
237 |
+
logger.info("creating metadata file for %s", cache_path)
|
238 |
+
meta = {'url': url, 'etag': etag}
|
239 |
+
meta_path = cache_path + '.json'
|
240 |
+
with open(meta_path, 'w', encoding="utf-8") as meta_file:
|
241 |
+
json.dump(meta, meta_file)
|
242 |
+
|
243 |
+
logger.info("removing temp file %s", temp_file.name)
|
244 |
+
|
245 |
+
return cache_path
|
246 |
+
|
247 |
+
|
248 |
+
def read_set_from_file(filename):
|
249 |
+
'''
|
250 |
+
Extract a de-duped collection (set) of text from a file.
|
251 |
+
Expected file format is one item per line.
|
252 |
+
'''
|
253 |
+
collection = set()
|
254 |
+
with open(filename, 'r', encoding='utf-8') as file_:
|
255 |
+
for line in file_:
|
256 |
+
collection.add(line.rstrip())
|
257 |
+
return collection
|
258 |
+
|
259 |
+
|
260 |
+
def get_file_extension(path, dot=True, lower=True):
|
261 |
+
ext = os.path.splitext(path)[1]
|
262 |
+
ext = ext if dot else ext[1:]
|
263 |
+
return ext.lower() if lower else ext
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/gpu_migration_logs/gpu_migration_5494.log
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2023-03-17 11:42:27] /usr/local/lib/python3.8/dist-packages/torch/random.py:40
|
2 |
+
[context]: torch.cuda.manual_seed_all(seed)
|
3 |
+
[hpu_match]: torch.cuda.manual_seed_all(seed=42, ) --> torch.hpu.random.manual_seed_all(42)
|
4 |
+
|
5 |
+
[2023-03-17 11:42:27] run_pretraining.py:580
|
6 |
+
[context]: torch.cuda.manual_seed(args.seed + args.local_rank)
|
7 |
+
[hpu_match]: torch.cuda.manual_seed(seed=42, ) --> torch.hpu.random.manual_seed(42)
|
8 |
+
|
9 |
+
[2023-03-17 11:42:27] run_pretraining.py:316
|
10 |
+
[context]: assert (torch.cuda.is_available())
|
11 |
+
[hpu_match]: torch.cuda.is_available() --> torch.hpu.is_available()
|
12 |
+
|
13 |
+
[2023-03-17 11:42:27] run_pretraining.py:325
|
14 |
+
[context]: torch.cuda.set_device(args.local_rank)
|
15 |
+
[hpu_match]: torch.cuda.set_device(device=0, ) --> torch.hpu.set_device(hpu:0)
|
16 |
+
|
17 |
+
[2023-03-17 11:42:27] run_pretraining.py:328
|
18 |
+
[context]: torch.distributed.init_process_group(backend='nccl', init_method='env://')
|
19 |
+
[hpu_match]: torch.distributed.init_process_group(backend=nccl, init_method=env://, timeout=0:30:00, world_size=-1, rank=-1, store=None, group_name=, pg_options=None, ) --> change backend to hccl
|
20 |
+
|
21 |
+
[2023-03-17 11:42:31] /usr/local/lib/python3.8/dist-packages/habana_frameworks/torch/core/weight_sharing.py:150
|
22 |
+
[context]: result = self.original_to(*args, **kwargs)
|
23 |
+
[hpu_match]: torch.Tensor.to(args=(device(type='cuda', index=0), None, False), kwargs={}, ) --> torch.Tensor.to(args=('hpu:0', None, False), kwargs={})
|
24 |
+
|
25 |
+
[2023-03-17 11:42:33] /usr/local/lib/python3.8/dist-packages/apex/amp/scaler.py:56
|
26 |
+
[context]: self._overflow_buf = torch.cuda.IntTensor([0])
|
27 |
+
[hpu_modified]: torch.cuda.__new__(args=([0],), kwargs={}, ) --> torch.IntTensor(args=([0],), kwargs={}).to(hpu)
|
28 |
+
|
29 |
+
[2023-03-17 11:42:33] /usr/local/lib/python3.8/dist-packages/apex/parallel/distributed.py:63
|
30 |
+
[context]: tp = tensor.type()
|
31 |
+
[hpu_match]: torch.Tensor.type(dtype=None, non_blocking=False, kwargs={}, ) --> change output value from torch.hpu.FloatTensor to torch.cuda.FloatTensor
|
32 |
+
|
33 |
+
[2023-03-17 11:42:35] run_pretraining.py:652
|
34 |
+
[context]: train_dataloader = DataLoader(train_data, sampler=train_sampler,
|
35 |
+
[hpu_match]: torch.utils.data.DataLoader.__init__(dataset=dataset, batch_size=64, shuffle=None, sampler=<torch.utils.data.sampler.RandomSampler object at 0x7f893a457dc0>, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=True, drop_last=True, timeout=0, worker_init_fn=<__main__.WorkerInitObj object at 0x7f893fb04670>, multiprocessing_context=None, generator=None, prefetch_factor=2, persistent_workers=False, pin_memory_device=, ) --> change pin_memory_device to hpu
|
36 |
+
|
37 |
+
[2023-03-17 11:42:35] run_pretraining.py:663
|
38 |
+
[context]: overflow_buf = torch.cuda.IntTensor([0])
|
39 |
+
[hpu_modified]: torch.cuda.__new__(args=([0],), kwargs={}, ) --> torch.IntTensor(args=([0],), kwargs={}).to(hpu)
|
40 |
+
|
41 |
+
[2023-03-17 11:42:35] run_pretraining.py:685
|
42 |
+
[context]: batch = [t.to(device) for t in batch]
|
43 |
+
[hpu_match]: torch.Tensor.to(args=(device(type='cuda', index=0),), kwargs={}, ) --> torch.Tensor.to(args=('hpu:0',), kwargs={})
|
44 |
+
|
45 |
+
[2023-03-17 11:42:35] /usr/local/lib/python3.8/dist-packages/torch/cuda/amp/common.py:6
|
46 |
+
[context]: return not (torch.cuda.is_available() or find_spec('torch_xla'))
|
47 |
+
[hpu_match]: torch.cuda.is_available() --> torch.hpu.is_available()
|
48 |
+
|
49 |
+
[2023-03-17 11:42:35] /usr/lib/python3.8/multiprocessing/context.py:277
|
50 |
+
[context]: return Popen(process_obj)
|
51 |
+
[hpu_match]: torch.utils.data.DataLoader.__init__(dataset=dataset, batch_size=64, shuffle=None, sampler=<torch.utils.data.sampler.RandomSampler object at 0x7f88cedbf520>, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=True, drop_last=True, timeout=0, worker_init_fn=<__main__.WorkerInitObj object at 0x7f893a4578b0>, multiprocessing_context=None, generator=None, prefetch_factor=2, persistent_workers=False, pin_memory_device=, ) --> change pin_memory_device to hpu
|
52 |
+
|
53 |
+
[2023-03-17 11:44:11] /usr/lib/python3.8/multiprocessing/context.py:277
|
54 |
+
[context]: return Popen(process_obj)
|
55 |
+
[hpu_match]: torch.utils.data.DataLoader.__init__(dataset=dataset, batch_size=64, shuffle=None, sampler=<torch.utils.data.sampler.RandomSampler object at 0x7f88cedbf6d0>, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=True, drop_last=True, timeout=0, worker_init_fn=<__main__.WorkerInitObj object at 0x7f88cedbf430>, multiprocessing_context=None, generator=None, prefetch_factor=2, persistent_workers=False, pin_memory_device=, ) --> change pin_memory_device to hpu
|
56 |
+
|
57 |
+
[2023-03-17 11:44:27] run_pretraining.py:484
|
58 |
+
[context]: flat_raw = torch.empty(flat_grad_size, device='cuda', dtype=allreduce_dtype)
|
59 |
+
[hpu_match]: torch.empty(args=(336232258,), kwargs={'device': 'hpu', 'dtype': torch.bfloat16}, ) --> torch.Tensor.empty(args=(336232258,), kwargs={device=hpu, dtype=torch.bfloat16, })
|
60 |
+
|
61 |
+
[2023-03-17 11:45:29] /usr/lib/python3.8/multiprocessing/context.py:277
|
62 |
+
[context]: return Popen(process_obj)
|
63 |
+
[hpu_match]: torch.utils.data.DataLoader.__init__(dataset=dataset, batch_size=64, shuffle=None, sampler=<torch.utils.data.sampler.RandomSampler object at 0x7f88cedbf850>, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=True, drop_last=True, timeout=0, worker_init_fn=<__main__.WorkerInitObj object at 0x7f88cedbf730>, multiprocessing_context=None, generator=None, prefetch_factor=2, persistent_workers=False, pin_memory_device=, ) --> change pin_memory_device to hpu
|
64 |
+
|
65 |
+
[2023-03-17 11:45:58] /usr/lib/python3.8/multiprocessing/context.py:277
|
66 |
+
[context]: return Popen(process_obj)
|
67 |
+
[hpu_match]: torch.utils.data.DataLoader.__init__(dataset=dataset, batch_size=64, shuffle=None, sampler=<torch.utils.data.sampler.RandomSampler object at 0x7f88cedbf9d0>, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=True, drop_last=True, timeout=0, worker_init_fn=<__main__.WorkerInitObj object at 0x7f88cedbf8b0>, multiprocessing_context=None, generator=None, prefetch_factor=2, persistent_workers=False, pin_memory_device=, ) --> change pin_memory_device to hpu
|
68 |
+
|
69 |
+
[2023-03-17 11:46:40] /usr/lib/python3.8/multiprocessing/context.py:277
|
70 |
+
[context]: return Popen(process_obj)
|
71 |
+
[hpu_match]: torch.utils.data.DataLoader.__init__(dataset=dataset, batch_size=64, shuffle=None, sampler=<torch.utils.data.sampler.RandomSampler object at 0x7f88cedbfb50>, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=True, drop_last=True, timeout=0, worker_init_fn=<__main__.WorkerInitObj object at 0x7f88cedbfa30>, multiprocessing_context=None, generator=None, prefetch_factor=2, persistent_workers=False, pin_memory_device=, ) --> change pin_memory_device to hpu
|
72 |
+
|
73 |
+
[2023-03-17 11:47:09] /usr/lib/python3.8/multiprocessing/context.py:277
|
74 |
+
[context]: return Popen(process_obj)
|
75 |
+
[hpu_match]: torch.utils.data.DataLoader.__init__(dataset=dataset, batch_size=64, shuffle=None, sampler=<torch.utils.data.sampler.RandomSampler object at 0x7f88cedbfcd0>, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=True, drop_last=True, timeout=0, worker_init_fn=<__main__.WorkerInitObj object at 0x7f88cedbfbb0>, multiprocessing_context=None, generator=None, prefetch_factor=2, persistent_workers=False, pin_memory_device=, ) --> change pin_memory_device to hpu
|
76 |
+
|
77 |
+
[2023-03-17 11:47:38] /usr/lib/python3.8/multiprocessing/context.py:277
|
78 |
+
[context]: return Popen(process_obj)
|
79 |
+
[hpu_match]: torch.utils.data.DataLoader.__init__(dataset=dataset, batch_size=64, shuffle=None, sampler=<torch.utils.data.sampler.RandomSampler object at 0x7f88cedbfe50>, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=True, drop_last=True, timeout=0, worker_init_fn=<__main__.WorkerInitObj object at 0x7f88cedbfd30>, multiprocessing_context=None, generator=None, prefetch_factor=2, persistent_workers=False, pin_memory_device=, ) --> change pin_memory_device to hpu
|
80 |
+
|
81 |
+
[2023-03-17 11:48:07] /usr/lib/python3.8/multiprocessing/context.py:277
|
82 |
+
[context]: return Popen(process_obj)
|
83 |
+
[hpu_match]: torch.utils.data.DataLoader.__init__(dataset=dataset, batch_size=64, shuffle=None, sampler=<torch.utils.data.sampler.RandomSampler object at 0x7f88cedbff70>, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=True, drop_last=True, timeout=0, worker_init_fn=<__main__.WorkerInitObj object at 0x7f88cedbfeb0>, multiprocessing_context=None, generator=None, prefetch_factor=2, persistent_workers=False, pin_memory_device=, ) --> change pin_memory_device to hpu
|
84 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/inference.py
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" BERT inference script. Does not depend on dataset. """
|
17 |
+
|
18 |
+
from __future__ import absolute_import, division, print_function
|
19 |
+
|
20 |
+
import argparse
|
21 |
+
import collections
|
22 |
+
import json
|
23 |
+
import logging
|
24 |
+
import math
|
25 |
+
import os
|
26 |
+
import random
|
27 |
+
import sys
|
28 |
+
from io import open
|
29 |
+
|
30 |
+
import numpy as np
|
31 |
+
import torch
|
32 |
+
from tqdm import tqdm, trange
|
33 |
+
from types import SimpleNamespace
|
34 |
+
|
35 |
+
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
|
36 |
+
from modeling import BertForQuestionAnswering, BertConfig, WEIGHTS_NAME, CONFIG_NAME
|
37 |
+
from tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize)
|
38 |
+
from run_squad import _get_best_indices, _compute_softmax, get_valid_prelim_predictions, get_answer_text
|
39 |
+
|
40 |
+
if sys.version_info[0] == 2:
|
41 |
+
import cPickle as pickle
|
42 |
+
else:
|
43 |
+
import pickle
|
44 |
+
|
45 |
+
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
|
46 |
+
datefmt='%m/%d/%Y %H:%M:%S',
|
47 |
+
level=logging.INFO)
|
48 |
+
logger = logging.getLogger(__name__)
|
49 |
+
|
50 |
+
|
51 |
+
import math
|
52 |
+
import json
|
53 |
+
import numpy as np
|
54 |
+
import collections
|
55 |
+
|
56 |
+
|
57 |
+
def preprocess_tokenized_text(doc_tokens, query_tokens, tokenizer,
|
58 |
+
max_seq_length, max_query_length):
|
59 |
+
""" converts an example into a feature """
|
60 |
+
|
61 |
+
if len(query_tokens) > max_query_length:
|
62 |
+
query_tokens = query_tokens[0:max_query_length]
|
63 |
+
|
64 |
+
tok_to_orig_index = []
|
65 |
+
orig_to_tok_index = []
|
66 |
+
all_doc_tokens = []
|
67 |
+
for (i, token) in enumerate(doc_tokens):
|
68 |
+
orig_to_tok_index.append(len(all_doc_tokens))
|
69 |
+
sub_tokens = tokenizer.tokenize(token)
|
70 |
+
for sub_token in sub_tokens:
|
71 |
+
tok_to_orig_index.append(i)
|
72 |
+
all_doc_tokens.append(sub_token)
|
73 |
+
|
74 |
+
# The -3 accounts for [CLS], [SEP] and [SEP]
|
75 |
+
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
|
76 |
+
|
77 |
+
# truncate if too long
|
78 |
+
length = len(all_doc_tokens)
|
79 |
+
length = min(length, max_tokens_for_doc)
|
80 |
+
|
81 |
+
tokens = []
|
82 |
+
token_to_orig_map = {}
|
83 |
+
token_is_max_context = {}
|
84 |
+
segment_ids = []
|
85 |
+
tokens.append("[CLS]")
|
86 |
+
segment_ids.append(0)
|
87 |
+
for token in query_tokens:
|
88 |
+
tokens.append(token)
|
89 |
+
segment_ids.append(0)
|
90 |
+
tokens.append("[SEP]")
|
91 |
+
segment_ids.append(0)
|
92 |
+
|
93 |
+
for i in range(length):
|
94 |
+
token_to_orig_map[len(tokens)] = tok_to_orig_index[i]
|
95 |
+
token_is_max_context[len(tokens)] = True
|
96 |
+
tokens.append(all_doc_tokens[i])
|
97 |
+
segment_ids.append(1)
|
98 |
+
tokens.append("[SEP]")
|
99 |
+
segment_ids.append(1)
|
100 |
+
|
101 |
+
input_ids = tokenizer.convert_tokens_to_ids(tokens)
|
102 |
+
|
103 |
+
# The mask has 1 for real tokens and 0 for padding tokens. Only real
|
104 |
+
# tokens are attended to.
|
105 |
+
input_mask = [1] * len(input_ids)
|
106 |
+
|
107 |
+
# Zero-pad up to the sequence length.
|
108 |
+
while len(input_ids) < max_seq_length:
|
109 |
+
input_ids.append(0)
|
110 |
+
input_mask.append(0)
|
111 |
+
segment_ids.append(0)
|
112 |
+
|
113 |
+
assert len(input_ids) == max_seq_length
|
114 |
+
assert len(input_mask) == max_seq_length
|
115 |
+
assert len(segment_ids) == max_seq_length
|
116 |
+
|
117 |
+
tensors_for_inference = {
|
118 |
+
'input_ids': input_ids,
|
119 |
+
'input_mask': input_mask,
|
120 |
+
'segment_ids': segment_ids
|
121 |
+
}
|
122 |
+
tensors_for_inference = SimpleNamespace(**tensors_for_inference)
|
123 |
+
|
124 |
+
tokens_for_postprocessing = {
|
125 |
+
'tokens': tokens,
|
126 |
+
'token_to_orig_map': token_to_orig_map,
|
127 |
+
'token_is_max_context': token_is_max_context
|
128 |
+
}
|
129 |
+
tokens_for_postprocessing = SimpleNamespace(**tokens_for_postprocessing)
|
130 |
+
|
131 |
+
return tensors_for_inference, tokens_for_postprocessing
|
132 |
+
|
133 |
+
|
134 |
+
RawResult = collections.namedtuple("RawResult", ["start_logits", "end_logits"])
|
135 |
+
|
136 |
+
|
137 |
+
def get_answer(doc_tokens, tokens_for_postprocessing,
|
138 |
+
start_logits, end_logits, args):
|
139 |
+
|
140 |
+
result = RawResult(start_logits=start_logits, end_logits=end_logits)
|
141 |
+
|
142 |
+
predictions = []
|
143 |
+
Prediction = collections.namedtuple('Prediction', ['text', 'start_logit', 'end_logit'])
|
144 |
+
|
145 |
+
if args.version_2_with_negative:
|
146 |
+
null_val = (float("inf"), 0, 0)
|
147 |
+
|
148 |
+
start_indices = _get_best_indices(result.start_logits, args.n_best_size)
|
149 |
+
end_indices = _get_best_indices(result.end_logits, args.n_best_size)
|
150 |
+
prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices,
|
151 |
+
tokens_for_postprocessing, result, args)
|
152 |
+
prelim_predictions = sorted(
|
153 |
+
prelim_predictions,
|
154 |
+
key=lambda x: (x.start_logit + x.end_logit),
|
155 |
+
reverse=True
|
156 |
+
)
|
157 |
+
if args.version_2_with_negative:
|
158 |
+
score = result.start_logits[0] + result.end_logits[0]
|
159 |
+
if score < null_val[0]:
|
160 |
+
null_val = (score, result.start_logits[0], result.end_logits[0])
|
161 |
+
|
162 |
+
doc_tokens_obj = {
|
163 |
+
'doc_tokens': doc_tokens,
|
164 |
+
}
|
165 |
+
doc_tokens_obj = SimpleNamespace(**doc_tokens_obj)
|
166 |
+
|
167 |
+
curr_predictions = []
|
168 |
+
seen_predictions = []
|
169 |
+
for pred in prelim_predictions:
|
170 |
+
if len(curr_predictions) == args.n_best_size:
|
171 |
+
break
|
172 |
+
if pred.end_index > 0: # this is a non-null prediction
|
173 |
+
final_text = get_answer_text(doc_tokens_obj, tokens_for_postprocessing, pred, args)
|
174 |
+
if final_text in seen_predictions:
|
175 |
+
continue
|
176 |
+
else:
|
177 |
+
final_text = ""
|
178 |
+
|
179 |
+
seen_predictions.append(final_text)
|
180 |
+
curr_predictions.append(Prediction(final_text, pred.start_logit, pred.end_logit))
|
181 |
+
predictions += curr_predictions
|
182 |
+
|
183 |
+
# add empty prediction
|
184 |
+
if args.version_2_with_negative:
|
185 |
+
predictions.append(Prediction('', null_val[1], null_val[2]))
|
186 |
+
|
187 |
+
nbest_answers = []
|
188 |
+
answer = None
|
189 |
+
nbest = sorted(predictions,
|
190 |
+
key=lambda x: (x.start_logit + x.end_logit),
|
191 |
+
reverse=True)[:args.n_best_size]
|
192 |
+
|
193 |
+
total_scores = []
|
194 |
+
best_non_null_entry = None
|
195 |
+
for entry in nbest:
|
196 |
+
total_scores.append(entry.start_logit + entry.end_logit)
|
197 |
+
if not best_non_null_entry and entry.text:
|
198 |
+
best_non_null_entry = entry
|
199 |
+
probs = _compute_softmax(total_scores)
|
200 |
+
for (i, entry) in enumerate(nbest):
|
201 |
+
output = collections.OrderedDict()
|
202 |
+
output["text"] = entry.text
|
203 |
+
output["probability"] = probs[i]
|
204 |
+
output["start_logit"] = entry.start_logit
|
205 |
+
output["end_logit"] = entry.end_logit
|
206 |
+
nbest_answers.append(output)
|
207 |
+
if args.version_2_with_negative:
|
208 |
+
score_diff = null_val[0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit
|
209 |
+
if score_diff > args.null_score_diff_threshold:
|
210 |
+
answer = ""
|
211 |
+
else:
|
212 |
+
answer = best_non_null_entry.text
|
213 |
+
else:
|
214 |
+
answer = nbest_answers[0]['text']
|
215 |
+
|
216 |
+
return answer, nbest_answers
|
217 |
+
|
218 |
+
|
219 |
+
def main():
|
220 |
+
parser = argparse.ArgumentParser()
|
221 |
+
|
222 |
+
## Required parameters
|
223 |
+
parser.add_argument("--bert_model", default=None, type=str, required=True,
|
224 |
+
help="Bert pre-trained model selected in the list: bert-base-uncased, "
|
225 |
+
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
|
226 |
+
"bert-base-multilingual-cased, bert-base-chinese.")
|
227 |
+
parser.add_argument("--init_checkpoint",
|
228 |
+
default=None,
|
229 |
+
type=str,
|
230 |
+
required=True,
|
231 |
+
help="The checkpoint file from pretraining")
|
232 |
+
|
233 |
+
## Other parameters
|
234 |
+
parser.add_argument("--verbose_logging", action='store_true',
|
235 |
+
help="If true, all of the warnings related to data processing will be printed. ")
|
236 |
+
parser.add_argument("--seed", default=1, type=int)
|
237 |
+
parser.add_argument("--question", default="Most antibiotics target bacteria and don't affect what class of organisms? ",
|
238 |
+
type=str, help="question")
|
239 |
+
parser.add_argument("--context", default="Within the genitourinary and gastrointestinal tracts, commensal flora serve as biological barriers by competing with pathogenic bacteria for food and space and, in some cases, by changing the conditions in their environment, such as pH or available iron. This reduces the probability that pathogens will reach sufficient numbers to cause illness. However, since most antibiotics non-specifically target bacteria and do not affect fungi, oral antibiotics can lead to an overgrowth of fungi and cause conditions such as a vaginal candidiasis (a yeast infection). There is good evidence that re-introduction of probiotic flora, such as pure cultures of the lactobacilli normally found in unpasteurized yogurt, helps restore a healthy balance of microbial populations in intestinal infections in children and encouraging preliminary data in studies on bacterial gastroenteritis, inflammatory bowel diseases, urinary tract infection and post-surgical infections. ",
|
240 |
+
type=str, help="context")
|
241 |
+
parser.add_argument("--max_seq_length", default=384, type=int,
|
242 |
+
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
|
243 |
+
"longer than this will be truncated, and sequences shorter than this will be padded.")
|
244 |
+
parser.add_argument("--max_query_length", default=64, type=int,
|
245 |
+
help="The maximum number of tokens for the question. Questions longer than this will "
|
246 |
+
"be truncated to this length.")
|
247 |
+
parser.add_argument("--n_best_size", default=1, type=int,
|
248 |
+
help="The total number of n-best predictions to generate. ")
|
249 |
+
parser.add_argument("--max_answer_length", default=30, type=int,
|
250 |
+
help="The maximum length of an answer that can be generated. This is needed because the start "
|
251 |
+
"and end predictions are not conditioned on one another.")
|
252 |
+
parser.add_argument("--no_cuda",
|
253 |
+
action='store_true',
|
254 |
+
help="Whether not to use CUDA when available")
|
255 |
+
parser.add_argument("--do_lower_case",
|
256 |
+
action='store_true',
|
257 |
+
help="Whether to lower case the input text. True for uncased models, False for cased models.")
|
258 |
+
parser.add_argument('--version_2_with_negative',
|
259 |
+
action='store_true',
|
260 |
+
help='If true, then the model can reply with "unknown". ')
|
261 |
+
parser.add_argument('--null_score_diff_threshold',
|
262 |
+
type=float, default=-11.0,
|
263 |
+
help="If null_score - best_non_null is greater than the threshold predict 'unknown'. ")
|
264 |
+
parser.add_argument('--vocab_file',
|
265 |
+
type=str, default=None, required=True,
|
266 |
+
help="Vocabulary mapping/file BERT was pretrainined on")
|
267 |
+
parser.add_argument("--config_file",
|
268 |
+
default=None,
|
269 |
+
type=str,
|
270 |
+
required=True,
|
271 |
+
help="The BERT model config")
|
272 |
+
parser.add_argument('--fp16',
|
273 |
+
action='store_true',
|
274 |
+
help="use mixed-precision")
|
275 |
+
parser.add_argument("--local_rank", default=-1, help="ordinal of the GPU to use")
|
276 |
+
|
277 |
+
args = parser.parse_args()
|
278 |
+
random.seed(args.seed)
|
279 |
+
np.random.seed(args.seed)
|
280 |
+
torch.manual_seed(args.seed)
|
281 |
+
torch.cuda.manual_seed(args.seed)
|
282 |
+
|
283 |
+
if args.local_rank == -1 or args.no_cuda:
|
284 |
+
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
|
285 |
+
else:
|
286 |
+
torch.cuda.set_device(args.local_rank)
|
287 |
+
device = torch.device("cuda", args.local_rank)
|
288 |
+
|
289 |
+
tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large
|
290 |
+
|
291 |
+
# Prepare model
|
292 |
+
config = BertConfig.from_json_file(args.config_file)
|
293 |
+
|
294 |
+
# Padding for divisibility by 8
|
295 |
+
if config.vocab_size % 8 != 0:
|
296 |
+
config.vocab_size += 8 - (config.vocab_size % 8)
|
297 |
+
|
298 |
+
# initialize model
|
299 |
+
model = BertForQuestionAnswering(config)
|
300 |
+
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu')["model"])
|
301 |
+
model.to(device)
|
302 |
+
if args.fp16:
|
303 |
+
model.half()
|
304 |
+
model.eval()
|
305 |
+
|
306 |
+
print("question: ", args.question)
|
307 |
+
print("context: ", args.context)
|
308 |
+
print()
|
309 |
+
|
310 |
+
# preprocessing
|
311 |
+
doc_tokens = args.context.split()
|
312 |
+
query_tokens = tokenizer.tokenize(args.question)
|
313 |
+
feature = preprocess_tokenized_text(doc_tokens,
|
314 |
+
query_tokens,
|
315 |
+
tokenizer,
|
316 |
+
max_seq_length=args.max_seq_length,
|
317 |
+
max_query_length=args.max_query_length)
|
318 |
+
|
319 |
+
tensors_for_inference, tokens_for_postprocessing = feature
|
320 |
+
|
321 |
+
input_ids = torch.tensor(tensors_for_inference.input_ids, dtype=torch.long).unsqueeze(0)
|
322 |
+
segment_ids = torch.tensor(tensors_for_inference.segment_ids, dtype=torch.long).unsqueeze(0)
|
323 |
+
input_mask = torch.tensor(tensors_for_inference.input_mask, dtype=torch.long).unsqueeze(0)
|
324 |
+
|
325 |
+
# load tensors to device
|
326 |
+
input_ids = input_ids.to(device)
|
327 |
+
input_mask = input_mask.to(device)
|
328 |
+
segment_ids = segment_ids.to(device)
|
329 |
+
|
330 |
+
# run prediction
|
331 |
+
with torch.no_grad():
|
332 |
+
start_logits, end_logits = model(input_ids, segment_ids, input_mask)
|
333 |
+
|
334 |
+
# post-processing
|
335 |
+
start_logits = start_logits[0].detach().cpu().tolist()
|
336 |
+
end_logits = end_logits[0].detach().cpu().tolist()
|
337 |
+
answer, answers = get_answer(doc_tokens, tokens_for_postprocessing,
|
338 |
+
start_logits, end_logits, args)
|
339 |
+
|
340 |
+
# print result
|
341 |
+
print()
|
342 |
+
print(answer)
|
343 |
+
print()
|
344 |
+
print(json.dumps(answers, indent=4))
|
345 |
+
|
346 |
+
|
347 |
+
if __name__ == "__main__":
|
348 |
+
main()
|
349 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/patches/minimal_changes.diff
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
diff --git a/PyTorch/LanguageModeling/BERT/run_pretraining.py b/PyTorch/LanguageModeling/BERT/run_pretraining.py
|
2 |
+
index a3577886..459b1f03 100755
|
3 |
+
--- a/PyTorch/LanguageModeling/BERT/run_pretraining.py
|
4 |
+
+++ b/PyTorch/LanguageModeling/BERT/run_pretraining.py
|
5 |
+
@@ -20,6 +20,8 @@ from __future__ import absolute_import
|
6 |
+
from __future__ import division
|
7 |
+
from __future__ import print_function
|
8 |
+
|
9 |
+
+import habana_frameworks.torch.gpu_migration
|
10 |
+
+import habana_frameworks.torch.core as htcore
|
11 |
+
# ==================
|
12 |
+
import csv
|
13 |
+
import os
|
14 |
+
@@ -607,10 +609,14 @@ def main():
|
15 |
+
else:
|
16 |
+
loss.backward()
|
17 |
+
average_loss += loss.item()
|
18 |
+
+
|
19 |
+
+ htcore.mark_step()
|
20 |
+
|
21 |
+
if training_steps % args.gradient_accumulation_steps == 0:
|
22 |
+
lr_scheduler.step() # learning rate warmup
|
23 |
+
global_step = take_optimizer_step(args, optimizer, model, overflow_buf, global_step)
|
24 |
+
+
|
25 |
+
+ htcore.mark_step()
|
26 |
+
|
27 |
+
if global_step >= args.steps_this_run or timeout_sent:
|
28 |
+
train_time_raw = time.time() - raw_train_start
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/patches/performance_improvements.diff
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
diff --git a/PyTorch/LanguageModeling/BERT/modeling.py b/PyTorch/LanguageModeling/BERT/modeling.py
|
2 |
+
index 31d6f3c3..0b339f82 100755
|
3 |
+
--- a/PyTorch/LanguageModeling/BERT/modeling.py
|
4 |
+
+++ b/PyTorch/LanguageModeling/BERT/modeling.py
|
5 |
+
@@ -117,7 +117,7 @@ def load_tf_weights_in_bert(model, tf_checkpoint_path):
|
6 |
+
return model
|
7 |
+
|
8 |
+
def gelu(x):
|
9 |
+
- return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
|
10 |
+
+ return F.gelu(x)
|
11 |
+
|
12 |
+
#used only for triton inference
|
13 |
+
def bias_gelu(bias, y):
|
14 |
+
@@ -136,8 +136,11 @@ def bias_tanh(bias, y):
|
15 |
+
def swish(x):
|
16 |
+
return x * torch.sigmoid(x)
|
17 |
+
|
18 |
+
+def tanh(x):
|
19 |
+
+ return torch.tanh(x)
|
20 |
+
+
|
21 |
+
#torch.nn.functional.gelu(x) # Breaks ONNX export
|
22 |
+
-ACT2FN = {"gelu": gelu, "bias_gelu": bias_gelu, "bias_tanh": bias_tanh, "relu": torch.nn.functional.relu, "swish": swish}
|
23 |
+
+ACT2FN = {"gelu": gelu, "bias_gelu": bias_gelu, "bias_tanh": bias_tanh, "relu": torch.nn.functional.relu, "swish": swish, "tanh": tanh}
|
24 |
+
|
25 |
+
class LinearActivation(Module):
|
26 |
+
r"""Fused Linear and activation Module.
|
27 |
+
@@ -148,16 +151,16 @@ class LinearActivation(Module):
|
28 |
+
super(LinearActivation, self).__init__()
|
29 |
+
self.in_features = in_features
|
30 |
+
self.out_features = out_features
|
31 |
+
- self.act_fn = nn.Identity() #
|
32 |
+
- self.biased_act_fn = None #
|
33 |
+
+ # setting act_fn to nn.Identity caused issues when re-assigning to gelu.Hence set to None
|
34 |
+
+ self.act_fn = None #
|
35 |
+
+ # self.biased_act_fn = None # not needed after applying perf improvement patch #
|
36 |
+
self.bias = None #
|
37 |
+
if isinstance(act, str) or (sys.version_info[0] == 2 and isinstance(act, unicode)): # For TorchScript
|
38 |
+
- if bias and not 'bias' in act: # compatibility
|
39 |
+
- act = 'bias_' + act #
|
40 |
+
- self.biased_act_fn = ACT2FN[act] #
|
41 |
+
-
|
42 |
+
- else:
|
43 |
+
- self.act_fn = ACT2FN[act]
|
44 |
+
+ # if bias and not 'bias' in act: # # compatibility
|
45 |
+
+ # act = 'bias_' + act # for perf improvement #
|
46 |
+
+ # self.biased_act_fn = ACT2FN[act] # #
|
47 |
+
+ # else:
|
48 |
+
+ self.act_fn = ACT2FN[act]
|
49 |
+
else:
|
50 |
+
self.act_fn = act
|
51 |
+
self.weight = Parameter(torch.Tensor(out_features, in_features))
|
52 |
+
@@ -175,10 +178,10 @@ class LinearActivation(Module):
|
53 |
+
init.uniform_(self.bias, -bound, bound)
|
54 |
+
|
55 |
+
def forward(self, input):
|
56 |
+
- if not self.bias is None:
|
57 |
+
- return self.biased_act_fn(self.bias, F.linear(input, self.weight, None))
|
58 |
+
- else:
|
59 |
+
- return self.act_fn(F.linear(input, self.weight, self.bias))
|
60 |
+
+ # if not self.bias is None: #
|
61 |
+
+ # return self.biased_act_fn(self.bias, F.linear(input, self.weight, None)) # for perf improvement
|
62 |
+
+ # else:
|
63 |
+
+ return self.act_fn(F.linear(input, self.weight, self.bias))
|
64 |
+
|
65 |
+
def extra_repr(self):
|
66 |
+
return 'in_features={}, out_features={}, bias={}'.format(
|
67 |
+
diff --git a/PyTorch/LanguageModeling/BERT/run_pretraining.py b/PyTorch/LanguageModeling/BERT/run_pretraining.py
|
68 |
+
index 2aeffcff..86105df2 100755
|
69 |
+
--- a/PyTorch/LanguageModeling/BERT/run_pretraining.py
|
70 |
+
+++ b/PyTorch/LanguageModeling/BERT/run_pretraining.py
|
71 |
+
@@ -580,6 +580,7 @@ def main():
|
72 |
+
training_steps = 0
|
73 |
+
average_training_time_per_step = 0
|
74 |
+
average_perf_per_step = 0
|
75 |
+
+ loss_list = []
|
76 |
+
|
77 |
+
pool = ProcessPoolExecutor(1)
|
78 |
+
|
79 |
+
@@ -623,8 +624,8 @@ def main():
|
80 |
+
train_sampler = RandomSampler(train_data)
|
81 |
+
train_dataloader = DataLoader(train_data, sampler=train_sampler,
|
82 |
+
batch_size=args.train_batch_size * args.n_gpu,
|
83 |
+
- num_workers=4, worker_init_fn=worker_init,
|
84 |
+
- pin_memory=True)
|
85 |
+
+ num_workers=0, worker_init_fn=worker_init,
|
86 |
+
+ drop_last=True, pin_memory=True)
|
87 |
+
# shared_file_list["0"] = (train_dataloader, data_file)
|
88 |
+
else:
|
89 |
+
train_dataloader = restored_data_loader
|
90 |
+
@@ -683,7 +684,7 @@ def main():
|
91 |
+
scaled_loss.backward()
|
92 |
+
else:
|
93 |
+
loss.backward()
|
94 |
+
- average_loss += loss.item()
|
95 |
+
+ loss_list.append(loss)
|
96 |
+
|
97 |
+
htcore.mark_step()
|
98 |
+
|
99 |
+
@@ -694,6 +695,9 @@ def main():
|
100 |
+
htcore.mark_step()
|
101 |
+
|
102 |
+
if global_step >= args.steps_this_run or timeout_sent or training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0:
|
103 |
+
+ for loss_t in loss_list:
|
104 |
+
+ average_loss += loss_t.item()
|
105 |
+
+ loss_list.clear()
|
106 |
+
train_time = time.time() - starting_time
|
107 |
+
starting_time = time.time()
|
108 |
+
average_training_time_per_step = train_time/(args.gradient_accumulation_steps * args.log_freq)
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/patches/use_packed_dataset.diff
ADDED
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
diff --git a/PyTorch/LanguageModeling/BERT/modeling.py b/PyTorch/LanguageModeling/BERT/modeling.py
|
2 |
+
index b2151eca..72e7e938 100755
|
3 |
+
--- a/PyTorch/LanguageModeling/BERT/modeling.py
|
4 |
+
+++ b/PyTorch/LanguageModeling/BERT/modeling.py
|
5 |
+
@@ -352,10 +352,13 @@ class BertEmbeddings(nn.Module):
|
6 |
+
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
|
7 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
8 |
+
|
9 |
+
- def forward(self, input_ids, token_type_ids):
|
10 |
+
+ def forward(self, input_ids, token_type_ids, positions = None):
|
11 |
+
seq_length = input_ids.size(1)
|
12 |
+
- position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
|
13 |
+
- position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
|
14 |
+
+ if positions is not None:
|
15 |
+
+ position_ids = positions
|
16 |
+
+ else:
|
17 |
+
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
|
18 |
+
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
|
19 |
+
|
20 |
+
words_embeddings = self.word_embeddings(input_ids)
|
21 |
+
position_embeddings = self.position_embeddings(position_ids)
|
22 |
+
@@ -529,19 +532,33 @@ class BertEncoder(nn.Module):
|
23 |
+
all_encoder_layers.append(hidden_states)
|
24 |
+
return all_encoder_layers
|
25 |
+
|
26 |
+
+def gather_indexes(sequence_tensor, positions):
|
27 |
+
+ """Gathers the vectors at the specific positions over a minibatch."""
|
28 |
+
+ batch_size = sequence_tensor.shape[0]
|
29 |
+
+ seq_length = sequence_tensor.shape[1]
|
30 |
+
+ width = sequence_tensor.shape[2]
|
31 |
+
+
|
32 |
+
+ flat_offsets = (torch.arange(batch_size, dtype=torch.long, device=sequence_tensor.device) * seq_length).unsqueeze(1)
|
33 |
+
+ flat_positions = (positions + flat_offsets).flatten()
|
34 |
+
+ flat_sequence_tensor = sequence_tensor.reshape(batch_size * seq_length, width)
|
35 |
+
+ output_tensor = flat_sequence_tensor[flat_positions]
|
36 |
+
+ return output_tensor.reshape(batch_size, -1, width)
|
37 |
+
+
|
38 |
+
class BertPooler(nn.Module):
|
39 |
+
def __init__(self, config):
|
40 |
+
super(BertPooler, self).__init__()
|
41 |
+
self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh")
|
42 |
+
|
43 |
+
- def forward(self, hidden_states):
|
44 |
+
- # We "pool" the model by simply taking the hidden state corresponding
|
45 |
+
- # to the first token.
|
46 |
+
- first_token_tensor = hidden_states[:, 0]
|
47 |
+
- pooled_output = self.dense_act(first_token_tensor)
|
48 |
+
+ def forward(self, hidden_states, next_sentence_positions = None):
|
49 |
+
+ if next_sentence_positions is not None:
|
50 |
+
+ selected_tokens = gather_indexes(hidden_states, next_sentence_positions)
|
51 |
+
+ else:
|
52 |
+
+ # We "pool" the model by simply taking the hidden state corresponding
|
53 |
+
+ # to the first token.
|
54 |
+
+ selected_tokens = hidden_states[:, 0]
|
55 |
+
+ pooled_output = self.dense_act(selected_tokens)
|
56 |
+
return pooled_output
|
57 |
+
|
58 |
+
-
|
59 |
+
class BertPredictionHeadTransform(nn.Module):
|
60 |
+
def __init__(self, config):
|
61 |
+
super(BertPredictionHeadTransform, self).__init__()
|
62 |
+
@@ -703,7 +720,24 @@ class BertPreTrainedModel(nn.Module):
|
63 |
+
logger.info("extracting archive file {} to temp dir {}".format(
|
64 |
+
resolved_archive_file, tempdir))
|
65 |
+
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
|
66 |
+
- archive.extractall(tempdir)
|
67 |
+
+ def is_within_directory(directory, target):
|
68 |
+
+ abs_directory = os.path.abspath(directory)
|
69 |
+
+ abs_target = os.path.abspath(target)
|
70 |
+
+
|
71 |
+
+ prefix = os.path.commonprefix([abs_directory, abs_target])
|
72 |
+
+
|
73 |
+
+ return prefix == abs_directory
|
74 |
+
+
|
75 |
+
+ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
|
76 |
+
+
|
77 |
+
+ for member in tar.getmembers():
|
78 |
+
+ member_path = os.path.join(path, member.name)
|
79 |
+
+ if not is_within_directory(path, member_path):
|
80 |
+
+ raise Exception("Attempted Path Traversal in Tar File")
|
81 |
+
+
|
82 |
+
+ tar.extractall(path, members, numeric_owner=numeric_owner)
|
83 |
+
+
|
84 |
+
+ safe_extract(archive, tempdir)
|
85 |
+
serialization_dir = tempdir
|
86 |
+
# Load config
|
87 |
+
config_file = os.path.join(serialization_dir, CONFIG_NAME)
|
88 |
+
@@ -819,30 +853,36 @@ class BertModel(BertPreTrainedModel):
|
89 |
+
self.apply(self.init_bert_weights)
|
90 |
+
self.output_all_encoded_layers = config.output_all_encoded_layers
|
91 |
+
|
92 |
+
- def forward(self, input_ids, token_type_ids, attention_mask):
|
93 |
+
- # We create a 3D attention mask from a 2D tensor mask.
|
94 |
+
- # Sizes are [batch_size, 1, 1, to_seq_length]
|
95 |
+
- # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
96 |
+
- # this attention mask is more simple than the triangular masking of causal attention
|
97 |
+
- # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
98 |
+
- extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
99 |
+
-
|
100 |
+
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
101 |
+
- # masked positions, this operation will create a tensor which is 0.0 for
|
102 |
+
- # positions we want to attend and -10000.0 for masked positions.
|
103 |
+
- # Since we are adding it to the raw scores before the softmax, this is
|
104 |
+
- # effectively the same as removing these entirely.
|
105 |
+
- extended_attention_mask = extended_attention_mask.to(dtype=self.embeddings.word_embeddings.weight.dtype) # fp16 compatibility
|
106 |
+
- extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
107 |
+
-
|
108 |
+
- embedding_output = self.embeddings(input_ids, token_type_ids)
|
109 |
+
- encoded_layers = self.encoder(embedding_output, extended_attention_mask)
|
110 |
+
- sequence_output = encoded_layers[-1]
|
111 |
+
- pooled_output = self.pooler(sequence_output)
|
112 |
+
- if not self.output_all_encoded_layers:
|
113 |
+
- encoded_layers = encoded_layers[-1:]
|
114 |
+
- return encoded_layers, pooled_output
|
115 |
+
-
|
116 |
+
+ def forward(self, input_ids, token_type_ids, attention_mask, enable_packed_data_mode = False, positions = None, next_sentence_positions = None):
|
117 |
+
+ if enable_packed_data_mode:
|
118 |
+
+ extended_attention_mask = 0.0
|
119 |
+
+ for i in range(3):
|
120 |
+
+ tmp = (attention_mask == i+1).type(torch.float32).unsqueeze(-1)
|
121 |
+
+ tmp = torch.matmul(tmp, torch.transpose(tmp, 1, 2))
|
122 |
+
+ extended_attention_mask += tmp.unsqueeze(1)
|
123 |
+
+ else:
|
124 |
+
+ # We create a 3D attention mask from a 2D tensor mask.
|
125 |
+
+ # Sizes are [batch_size, 1, 1, to_seq_length]
|
126 |
+
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
127 |
+
+ # this attention mask is more simple than the triangular masking of causal attention
|
128 |
+
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
129 |
+
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
130 |
+
+
|
131 |
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
132 |
+
+ # masked positions, this operation will create a tensor which is 0.0 for
|
133 |
+
+ # positions we want to attend and -10000.0 for masked positions.
|
134 |
+
+ # Since we are adding it to the raw scores before the softmax, this is
|
135 |
+
+ # effectively the same as removing these entirely.
|
136 |
+
+ extended_attention_mask = extended_attention_mask.to(dtype=self.embeddings.word_embeddings.weight.dtype) # fp16 compatibility
|
137 |
+
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
138 |
+
+
|
139 |
+
+ embedding_output = self.embeddings(input_ids, token_type_ids, positions)
|
140 |
+
+ encoded_layers = self.encoder(embedding_output, extended_attention_mask)
|
141 |
+
+ sequence_output = encoded_layers[-1]
|
142 |
+
+ pooled_output = self.pooler(sequence_output, next_sentence_positions)
|
143 |
+
+ if not self.output_all_encoded_layers:
|
144 |
+
+ encoded_layers = encoded_layers[-1:]
|
145 |
+
+ return encoded_layers, pooled_output
|
146 |
+
|
147 |
+
class BertForPreTraining(BertPreTrainedModel):
|
148 |
+
"""BERT model with pre-training heads.
|
149 |
+
@@ -898,16 +938,15 @@ class BertForPreTraining(BertPreTrainedModel):
|
150 |
+
super(BertForPreTraining, self).__init__(config)
|
151 |
+
self.bert = BertModel(config)
|
152 |
+
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
|
153 |
+
+ self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=-1)
|
154 |
+
self.apply(self.init_bert_weights)
|
155 |
+
|
156 |
+
- def forward(self, input_ids, token_type_ids, attention_mask):
|
157 |
+
- encoded_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
|
158 |
+
+ def forward(self, input_ids, token_type_ids, attention_mask, masked_lm_labels=None, next_sentence_labels=None, enable_packed_data_mode = False, positions = None, next_sentence_positions = None):
|
159 |
+
+ encoded_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, enable_packed_data_mode, positions, next_sentence_positions)
|
160 |
+
sequence_output = encoded_layers[-1]
|
161 |
+
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
|
162 |
+
-
|
163 |
+
return prediction_scores, seq_relationship_score
|
164 |
+
|
165 |
+
-
|
166 |
+
class BertForMaskedLM(BertPreTrainedModel):
|
167 |
+
"""BERT model with the masked language modeling head.
|
168 |
+
This module comprises the BERT model followed by the masked language modeling head.
|
169 |
+
diff --git a/PyTorch/LanguageModeling/BERT/run_pretraining.py b/PyTorch/LanguageModeling/BERT/run_pretraining.py
|
170 |
+
index 459b1f03..2aeffcff 100755
|
171 |
+
--- a/PyTorch/LanguageModeling/BERT/run_pretraining.py
|
172 |
+
+++ b/PyTorch/LanguageModeling/BERT/run_pretraining.py
|
173 |
+
@@ -38,6 +38,7 @@ from torch.utils.data.distributed import DistributedSampler
|
174 |
+
import math
|
175 |
+
from apex import amp
|
176 |
+
import multiprocessing
|
177 |
+
+import json
|
178 |
+
|
179 |
+
from tokenization import BertTokenizer
|
180 |
+
import modeling
|
181 |
+
@@ -60,6 +61,7 @@ torch._C._jit_set_profiling_mode(False)
|
182 |
+
torch._C._jit_set_profiling_executor(False)
|
183 |
+
|
184 |
+
skipped_steps = 0
|
185 |
+
+avg_seq_per_pack = 1.0
|
186 |
+
|
187 |
+
# Track whether a SIGTERM (cluster time up) has been handled
|
188 |
+
timeout_sent = False
|
189 |
+
@@ -82,34 +84,43 @@ class WorkerInitObj(object):
|
190 |
+
random.seed(self.seed + id)
|
191 |
+
|
192 |
+
def create_pretraining_dataset(input_file, max_pred_length, shared_list, args, worker_init):
|
193 |
+
- train_data = pretraining_dataset(input_file=input_file, max_pred_length=max_pred_length)
|
194 |
+
+ train_data = pretraining_dataset(input_file=input_file, max_pred_length=max_pred_length, enable_packed_data_mode=args.enable_packed_data_mode)
|
195 |
+
train_sampler = RandomSampler(train_data)
|
196 |
+
train_dataloader = DataLoader(train_data, sampler=train_sampler,
|
197 |
+
- batch_size=args.train_batch_size * args.n_gpu,
|
198 |
+
- num_workers=4, worker_init_fn=worker_init,
|
199 |
+
- pin_memory=True)
|
200 |
+
+ batch_size=args.train_batch_size * args.n_gpu,
|
201 |
+
+ num_workers=0, worker_init_fn=worker_init,
|
202 |
+
+ drop_last=True, pin_memory=True)
|
203 |
+
return train_dataloader, input_file
|
204 |
+
|
205 |
+
class pretraining_dataset(Dataset):
|
206 |
+
|
207 |
+
- def __init__(self, input_file, max_pred_length):
|
208 |
+
+ def __init__(self, input_file, max_pred_length, enable_packed_data_mode:bool=False):
|
209 |
+
self.input_file = input_file
|
210 |
+
self.max_pred_length = max_pred_length
|
211 |
+
f = h5py.File(input_file, "r")
|
212 |
+
- keys = ['input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions', 'masked_lm_ids',
|
213 |
+
- 'next_sentence_labels']
|
214 |
+
+ if enable_packed_data_mode:
|
215 |
+
+ keys = ['input_ids', 'input_mask', 'segment_ids', 'positions',
|
216 |
+
+ 'masked_lm_positions', 'masked_lm_ids',
|
217 |
+
+ 'next_sentence_positions', 'next_sentence_labels', 'next_sentence_weights']
|
218 |
+
+ else:
|
219 |
+
+ keys = ['input_ids', 'input_mask', 'segment_ids',
|
220 |
+
+ 'masked_lm_positions', 'masked_lm_ids',
|
221 |
+
+ 'next_sentence_labels']
|
222 |
+
self.inputs = [np.asarray(f[key][:]) for key in keys]
|
223 |
+
f.close()
|
224 |
+
+ self.enable_packed_data_mode = enable_packed_data_mode
|
225 |
+
|
226 |
+
def __len__(self):
|
227 |
+
'Denotes the total number of samples'
|
228 |
+
return len(self.inputs[0])
|
229 |
+
|
230 |
+
def __getitem__(self, index):
|
231 |
+
-
|
232 |
+
- [input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [
|
233 |
+
- torch.from_numpy(input[index].astype(np.int64)) if indice < 5 else torch.from_numpy(
|
234 |
+
- np.asarray(input[index].astype(np.int64))) for indice, input in enumerate(self.inputs)]
|
235 |
+
+ if self.enable_packed_data_mode:
|
236 |
+
+ [input_ids, input_mask, segment_ids, positions,
|
237 |
+
+ masked_lm_positions, masked_lm_ids,
|
238 |
+
+ next_sentence_positions, next_sentence_labels, next_sentence_weights] = [torch.from_numpy(input[index].astype(np.int64)) for input in self.inputs]
|
239 |
+
+ else:
|
240 |
+
+ [input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [torch.from_numpy(input[index].astype(np.int64)) if indice < 5 else torch.from_numpy(np.asarray(input[index].astype(np.int64))) for indice, input in enumerate(self.inputs)]
|
241 |
+
|
242 |
+
masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -1
|
243 |
+
index = self.max_pred_length
|
244 |
+
@@ -119,8 +130,11 @@ class pretraining_dataset(Dataset):
|
245 |
+
index = padded_mask_indices[0].item()
|
246 |
+
masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]
|
247 |
+
|
248 |
+
- return [input_ids, segment_ids, input_mask,
|
249 |
+
- masked_lm_labels, next_sentence_labels]
|
250 |
+
+ if self.enable_packed_data_mode:
|
251 |
+
+ next_sentence_labels = (next_sentence_weights == 1) * next_sentence_labels + (next_sentence_weights == 0) * -1
|
252 |
+
+ return [input_ids, segment_ids, input_mask, positions, masked_lm_labels, next_sentence_positions, next_sentence_labels]
|
253 |
+
+ else:
|
254 |
+
+ return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels]
|
255 |
+
|
256 |
+
class BertPretrainingCriterion(torch.nn.Module):
|
257 |
+
def __init__(self, vocab_size):
|
258 |
+
@@ -280,6 +294,8 @@ def parse_arguments():
|
259 |
+
help='Disable tqdm progress bar')
|
260 |
+
parser.add_argument('--steps_this_run', type=int, default=-1,
|
261 |
+
help='If provided, only run this many steps before exiting')
|
262 |
+
+ parser.add_argument('--enable_packed_data_mode', default='True', type=lambda x: x.lower() == 'true',
|
263 |
+
+ help='enable/disable training with packed data. Default is True, --input_dir should be set accordingly')
|
264 |
+
|
265 |
+
args = parser.parse_args()
|
266 |
+
args.fp16 = args.fp16 or args.amp
|
267 |
+
@@ -328,6 +344,9 @@ def setup_training(args):
|
268 |
+
|
269 |
+
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
|
270 |
+
|
271 |
+
+ if args.enable_packed_data_mode:
|
272 |
+
+ args.gradient_accumulation_steps = round(args.gradient_accumulation_steps / avg_seq_per_pack)
|
273 |
+
+
|
274 |
+
if not args.do_train:
|
275 |
+
raise ValueError(" `do_train` must be True.")
|
276 |
+
|
277 |
+
@@ -490,8 +509,41 @@ def take_optimizer_step(args, optimizer, model, overflow_buf, global_step):
|
278 |
+
|
279 |
+
return global_step
|
280 |
+
|
281 |
+
+def get_metadata_file_path(input_dir : str) -> str:
|
282 |
+
+ norm_path = os.path.normpath(input_dir)
|
283 |
+
+ head_tail = os.path.split(norm_path)
|
284 |
+
+ metadata_file_name = head_tail[1]
|
285 |
+
+ metadata_file_name = metadata_file_name + '_metadata.json'
|
286 |
+
+ metadata_file_path = os.path.join(head_tail[0],metadata_file_name)
|
287 |
+
+ return metadata_file_path
|
288 |
+
+
|
289 |
+
+def read_avg_seq_per_sample(input_dir : str, max_sequence_length) -> float:
|
290 |
+
+ metadata = None
|
291 |
+
+ metadata_file_path = get_metadata_file_path(input_dir)
|
292 |
+
+ print(f"Reading dataset metadata from: {metadata_file_path}")
|
293 |
+
+ if os.path.exists(metadata_file_path):
|
294 |
+
+ file_handle = open(metadata_file_path, mode='r')
|
295 |
+
+ json_content = file_handle.read()
|
296 |
+
+ metadata = json.loads(json_content)
|
297 |
+
+ else:
|
298 |
+
+ print("Packed dataset metadata file not accessible, falling back to default values of avg_seq_per_sample")
|
299 |
+
+ if max_sequence_length == 128:
|
300 |
+
+ return 1.2
|
301 |
+
+ elif max_sequence_length == 512:
|
302 |
+
+ return 2.0
|
303 |
+
+ else:
|
304 |
+
+ assert f"invalid max_sequence_length"
|
305 |
+
+ avg_seq_per_sample_key = "avg_seq_per_sample"
|
306 |
+
+ if metadata is not None and avg_seq_per_sample_key in metadata.keys():
|
307 |
+
+ avg_seq_per_sample = metadata[avg_seq_per_sample_key]
|
308 |
+
+ else:
|
309 |
+
+ assert False, f"Key {avg_seq_per_sample_key} not present in packed dataset metadata file: {metadata_file_path}"
|
310 |
+
+ print(f"AVG_SEQ_PER_SAMPLE: {avg_seq_per_sample}")
|
311 |
+
+ return avg_seq_per_sample
|
312 |
+
+
|
313 |
+
def main():
|
314 |
+
global timeout_sent
|
315 |
+
+ global avg_seq_per_pack
|
316 |
+
|
317 |
+
args = parse_arguments()
|
318 |
+
|
319 |
+
@@ -501,6 +553,10 @@ def main():
|
320 |
+
torch.cuda.manual_seed(args.seed + args.local_rank)
|
321 |
+
worker_init = WorkerInitObj(args.seed + args.local_rank)
|
322 |
+
|
323 |
+
+ if args.enable_packed_data_mode:
|
324 |
+
+ avg_seq_per_pack = read_avg_seq_per_sample(args.input_dir, args.max_seq_length)
|
325 |
+
+ else:
|
326 |
+
+ avg_seq_per_pack = 1.0
|
327 |
+
device, args = setup_training(args)
|
328 |
+
dllogger.log(step="PARAMETER", data={"Config": [str(args)]})
|
329 |
+
|
330 |
+
@@ -522,16 +578,23 @@ def main():
|
331 |
+
average_loss = 0.0 # averaged loss every args.log_freq steps
|
332 |
+
epoch = 0
|
333 |
+
training_steps = 0
|
334 |
+
+ average_training_time_per_step = 0
|
335 |
+
+ average_perf_per_step = 0
|
336 |
+
|
337 |
+
pool = ProcessPoolExecutor(1)
|
338 |
+
-
|
339 |
+
+
|
340 |
+
+ starting_time = time.time()
|
341 |
+
# Note: We loop infinitely over epochs, termination is handled via iteration count
|
342 |
+
while True:
|
343 |
+
thread = None
|
344 |
+
restored_data_loader = None
|
345 |
+
if not args.resume_from_checkpoint or epoch > 0 or (args.phase2 and global_step < 1) or args.init_checkpoint:
|
346 |
+
- files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if
|
347 |
+
- os.path.isfile(os.path.join(args.input_dir, f)) and 'training' in f]
|
348 |
+
+ if args.enable_packed_data_mode:
|
349 |
+
+ files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if
|
350 |
+
+ os.path.isfile(os.path.join(args.input_dir, f))] # Packed files have no 'training' pre/postfix.
|
351 |
+
+ else:
|
352 |
+
+ files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if
|
353 |
+
+ os.path.isfile(os.path.join(args.input_dir, f)) and 'training' in f]
|
354 |
+
files.sort()
|
355 |
+
num_files = len(files)
|
356 |
+
random.Random(args.seed + epoch).shuffle(files)
|
357 |
+
@@ -556,7 +619,7 @@ def main():
|
358 |
+
previous_file = data_file
|
359 |
+
|
360 |
+
if restored_data_loader is None:
|
361 |
+
- train_data = pretraining_dataset(data_file, args.max_predictions_per_seq)
|
362 |
+
+ train_data = pretraining_dataset(data_file, args.max_predictions_per_seq, args.enable_packed_data_mode)
|
363 |
+
train_sampler = RandomSampler(train_data)
|
364 |
+
train_dataloader = DataLoader(train_data, sampler=train_sampler,
|
365 |
+
batch_size=args.train_batch_size * args.n_gpu,
|
366 |
+
@@ -590,9 +653,21 @@ def main():
|
367 |
+
for step, batch in enumerate(train_iter):
|
368 |
+
|
369 |
+
training_steps += 1
|
370 |
+
+
|
371 |
+
batch = [t.to(device) for t in batch]
|
372 |
+
- input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch
|
373 |
+
- prediction_scores, seq_relationship_score = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
|
374 |
+
+ if args.enable_packed_data_mode:
|
375 |
+
+ input_ids, segment_ids, input_mask, positions, masked_lm_labels, next_sentence_positions, next_sentence_labels = batch
|
376 |
+
+ else:
|
377 |
+
+ input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch
|
378 |
+
+
|
379 |
+
+ if (args.local_rank != -1) and (training_steps % args.gradient_accumulation_steps == 0):
|
380 |
+
+ torch.distributed.barrier()
|
381 |
+
+
|
382 |
+
+ prediction_scores, seq_relationship_score = model(
|
383 |
+
+ input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, enable_packed_data_mode=args.enable_packed_data_mode,
|
384 |
+
+ positions=positions if args.enable_packed_data_mode else None,
|
385 |
+
+ next_sentence_positions=next_sentence_positions if args.enable_packed_data_mode else None)
|
386 |
+
+
|
387 |
+
loss = criterion(prediction_scores, seq_relationship_score, masked_lm_labels, next_sentence_labels)
|
388 |
+
if args.n_gpu > 1:
|
389 |
+
loss = loss.mean() # mean() to average on multi-gpu.
|
390 |
+
@@ -618,6 +693,12 @@ def main():
|
391 |
+
|
392 |
+
htcore.mark_step()
|
393 |
+
|
394 |
+
+ if global_step >= args.steps_this_run or timeout_sent or training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0:
|
395 |
+
+ train_time = time.time() - starting_time
|
396 |
+
+ starting_time = time.time()
|
397 |
+
+ average_training_time_per_step = train_time/(args.gradient_accumulation_steps * args.log_freq)
|
398 |
+
+ average_perf_per_step = args.train_batch_size*avg_seq_per_pack/average_training_time_per_step
|
399 |
+
+
|
400 |
+
if global_step >= args.steps_this_run or timeout_sent:
|
401 |
+
train_time_raw = time.time() - raw_train_start
|
402 |
+
last_num_steps = int(training_steps / args.gradient_accumulation_steps) % args.log_freq
|
403 |
+
@@ -629,12 +710,16 @@ def main():
|
404 |
+
torch.distributed.all_reduce(average_loss)
|
405 |
+
final_loss = average_loss.item()
|
406 |
+
if is_main_process():
|
407 |
+
- dllogger.log(step=(epoch, global_step, ), data={"final_loss": final_loss})
|
408 |
+
+ dllogger.log(step=(epoch, global_step, ), data={"final_loss": final_loss,
|
409 |
+
+ "average_training_time_step": average_training_time_per_step,
|
410 |
+
+ "average_perf_per_step": average_perf_per_step})
|
411 |
+
elif training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0:
|
412 |
+
if is_main_process():
|
413 |
+
dllogger.log(step=(epoch, global_step, ), data={"average_loss": average_loss / (args.log_freq * divisor),
|
414 |
+
"step_loss": loss.item() * args.gradient_accumulation_steps / divisor,
|
415 |
+
- "learning_rate": optimizer.param_groups[0]['lr']})
|
416 |
+
+ "learning_rate": optimizer.param_groups[0]['lr'],
|
417 |
+
+ "average_training_time_step": average_training_time_per_step,
|
418 |
+
+ "average_perf_per_step": average_perf_per_step})
|
419 |
+
average_loss = 0
|
420 |
+
|
421 |
+
|
422 |
+
@@ -690,7 +775,7 @@ if __name__ == "__main__":
|
423 |
+
gpu_count = get_world_size()
|
424 |
+
if is_main_process():
|
425 |
+
e2e_time = time.time() - now
|
426 |
+
- training_perf = args.train_batch_size * args.gradient_accumulation_steps * gpu_count\
|
427 |
+
+ training_perf = args.train_batch_size * args.gradient_accumulation_steps * gpu_count * avg_seq_per_pack\
|
428 |
+
* (global_step - args.resume_step + skipped_steps) / train_time_raw
|
429 |
+
dllogger.log(step=tuple(), data={"e2e_train_time": e2e_time, "training_sequences_per_second": training_perf,
|
430 |
+
"final_loss": final_loss, "raw_train_time": train_time_raw })
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/processors/__init__.py
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/processors/glue.py
ADDED
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
|
3 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
import csv
|
18 |
+
import os
|
19 |
+
import sys
|
20 |
+
|
21 |
+
|
22 |
+
class InputExample(object):
|
23 |
+
"""A single training/test example for simple sequence classification."""
|
24 |
+
|
25 |
+
def __init__(self, guid, text_a, text_b=None, label=None):
|
26 |
+
"""Constructs a InputExample.
|
27 |
+
|
28 |
+
Args:
|
29 |
+
guid: Unique id for the example.
|
30 |
+
text_a: string. The untokenized text of the first sequence. For
|
31 |
+
single sequence tasks, only this sequence must be specified.
|
32 |
+
text_b: (Optional) string. The untokenized text of the second
|
33 |
+
sequence. Only must be specified for sequence pair tasks.
|
34 |
+
label: (Optional) string. The label of the example. This should be
|
35 |
+
specified for train and dev examples, but not for test
|
36 |
+
examples.
|
37 |
+
"""
|
38 |
+
self.guid = guid
|
39 |
+
self.text_a = text_a
|
40 |
+
self.text_b = text_b
|
41 |
+
self.label = label
|
42 |
+
|
43 |
+
|
44 |
+
class InputFeatures(object):
|
45 |
+
"""A single set of features of data."""
|
46 |
+
|
47 |
+
def __init__(self, input_ids, input_mask, segment_ids, label_id):
|
48 |
+
self.input_ids = input_ids
|
49 |
+
self.input_mask = input_mask
|
50 |
+
self.segment_ids = segment_ids
|
51 |
+
self.label_id = label_id
|
52 |
+
|
53 |
+
|
54 |
+
class DataProcessor(object):
|
55 |
+
"""Base class for data converters for sequence classification data sets."""
|
56 |
+
|
57 |
+
def get_train_examples(self, data_dir):
|
58 |
+
"""Gets a collection of `InputExample`s for the train set."""
|
59 |
+
raise NotImplementedError()
|
60 |
+
|
61 |
+
def get_dev_examples(self, data_dir):
|
62 |
+
"""Gets a collection of `InputExample`s for the dev set."""
|
63 |
+
raise NotImplementedError()
|
64 |
+
|
65 |
+
def get_labels(self):
|
66 |
+
"""Gets the list of labels for this data set."""
|
67 |
+
raise NotImplementedError()
|
68 |
+
|
69 |
+
@classmethod
|
70 |
+
def _read_tsv(cls, input_file, quotechar=None):
|
71 |
+
"""Reads a tab separated value file."""
|
72 |
+
with open(input_file, "r") as f:
|
73 |
+
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
|
74 |
+
lines = []
|
75 |
+
for line in reader:
|
76 |
+
if sys.version_info[0] == 2:
|
77 |
+
line = list(unicode(cell, 'utf-8') for cell in line)
|
78 |
+
lines.append(line)
|
79 |
+
return lines
|
80 |
+
|
81 |
+
|
82 |
+
class MrpcProcessor(DataProcessor):
|
83 |
+
"""Processor for the MRPC data set (GLUE version)."""
|
84 |
+
|
85 |
+
def get_train_examples(self, data_dir):
|
86 |
+
"""See base class."""
|
87 |
+
return self._create_examples(
|
88 |
+
self._read_tsv(os.path.join(data_dir, "train.tsv")),
|
89 |
+
"train",
|
90 |
+
)
|
91 |
+
|
92 |
+
def get_dev_examples(self, data_dir):
|
93 |
+
"""See base class."""
|
94 |
+
return self._create_examples(
|
95 |
+
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
|
96 |
+
"dev",
|
97 |
+
)
|
98 |
+
|
99 |
+
def get_labels(self):
|
100 |
+
"""See base class."""
|
101 |
+
return ["0", "1"]
|
102 |
+
|
103 |
+
def _create_examples(self, lines, set_type):
|
104 |
+
"""Creates examples for the training and dev sets."""
|
105 |
+
examples = []
|
106 |
+
for (i, line) in enumerate(lines):
|
107 |
+
if i == 0:
|
108 |
+
continue
|
109 |
+
guid = "%s-%s" % (set_type, i)
|
110 |
+
text_a = line[3]
|
111 |
+
text_b = line[4]
|
112 |
+
label = line[0]
|
113 |
+
examples.append(
|
114 |
+
InputExample(guid=guid,
|
115 |
+
text_a=text_a,
|
116 |
+
text_b=text_b,
|
117 |
+
label=label))
|
118 |
+
return examples
|
119 |
+
|
120 |
+
|
121 |
+
class MnliProcessor(DataProcessor):
|
122 |
+
"""Processor for the MultiNLI data set (GLUE version)."""
|
123 |
+
|
124 |
+
def get_train_examples(self, data_dir):
|
125 |
+
"""See base class."""
|
126 |
+
return self._create_examples(
|
127 |
+
self._read_tsv(os.path.join(data_dir, "train.tsv")),
|
128 |
+
"train",
|
129 |
+
)
|
130 |
+
|
131 |
+
def get_dev_examples(self, data_dir):
|
132 |
+
"""See base class."""
|
133 |
+
return self._create_examples(
|
134 |
+
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
|
135 |
+
"dev_matched",
|
136 |
+
)
|
137 |
+
|
138 |
+
def get_labels(self):
|
139 |
+
"""See base class."""
|
140 |
+
return ["contradiction", "entailment", "neutral"]
|
141 |
+
|
142 |
+
def _create_examples(self, lines, set_type):
|
143 |
+
"""Creates examples for the training and dev sets."""
|
144 |
+
examples = []
|
145 |
+
for (i, line) in enumerate(lines):
|
146 |
+
if i == 0:
|
147 |
+
continue
|
148 |
+
guid = "%s-%s" % (set_type, line[0])
|
149 |
+
text_a = line[8]
|
150 |
+
text_b = line[9]
|
151 |
+
label = line[-1]
|
152 |
+
examples.append(
|
153 |
+
InputExample(guid=guid,
|
154 |
+
text_a=text_a,
|
155 |
+
text_b=text_b,
|
156 |
+
label=label))
|
157 |
+
return examples
|
158 |
+
|
159 |
+
|
160 |
+
class ColaProcessor(DataProcessor):
|
161 |
+
"""Processor for the CoLA data set (GLUE version)."""
|
162 |
+
|
163 |
+
def get_train_examples(self, data_dir):
|
164 |
+
"""See base class."""
|
165 |
+
return self._create_examples(
|
166 |
+
self._read_tsv(os.path.join(data_dir, "train.tsv")),
|
167 |
+
"train",
|
168 |
+
)
|
169 |
+
|
170 |
+
def get_dev_examples(self, data_dir):
|
171 |
+
"""See base class."""
|
172 |
+
return self._create_examples(
|
173 |
+
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
|
174 |
+
"dev",
|
175 |
+
)
|
176 |
+
|
177 |
+
def get_labels(self):
|
178 |
+
"""See base class."""
|
179 |
+
return ["0", "1"]
|
180 |
+
|
181 |
+
def _create_examples(self, lines, set_type):
|
182 |
+
"""Creates examples for the training and dev sets."""
|
183 |
+
examples = []
|
184 |
+
for (i, line) in enumerate(lines):
|
185 |
+
guid = "%s-%s" % (set_type, i)
|
186 |
+
text_a = line[3]
|
187 |
+
label = line[1]
|
188 |
+
examples.append(
|
189 |
+
InputExample(guid=guid, text_a=text_a, text_b=None,
|
190 |
+
label=label))
|
191 |
+
return examples
|
192 |
+
|
193 |
+
|
194 |
+
class Sst2Processor(DataProcessor):
|
195 |
+
"""Processor for the CoLA data set (GLUE version)."""
|
196 |
+
|
197 |
+
def get_train_examples(self, data_dir):
|
198 |
+
"""See base class."""
|
199 |
+
return self._create_examples(
|
200 |
+
self._read_tsv(os.path.join(data_dir, "train.tsv")),
|
201 |
+
"train",
|
202 |
+
)
|
203 |
+
|
204 |
+
def get_dev_examples(self, data_dir):
|
205 |
+
"""See base class."""
|
206 |
+
return self._create_examples(
|
207 |
+
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
|
208 |
+
"dev",
|
209 |
+
)
|
210 |
+
|
211 |
+
def get_labels(self):
|
212 |
+
"""See base class."""
|
213 |
+
return ["0", "1"]
|
214 |
+
|
215 |
+
def _create_examples(self, lines, set_type):
|
216 |
+
"""Creates examples for the training and dev sets."""
|
217 |
+
examples = []
|
218 |
+
for (i, line) in enumerate(lines):
|
219 |
+
if i == 0:
|
220 |
+
continue
|
221 |
+
guid = "%s-%s" % (set_type, i)
|
222 |
+
text_a = line[0]
|
223 |
+
label = line[1]
|
224 |
+
examples.append(
|
225 |
+
InputExample(guid=guid, text_a=text_a, text_b=None,
|
226 |
+
label=label))
|
227 |
+
return examples
|
228 |
+
|
229 |
+
|
230 |
+
def convert_examples_to_features(examples, label_list, max_seq_length,
|
231 |
+
tokenizer):
|
232 |
+
"""Loads a data file into a list of `InputBatch`s."""
|
233 |
+
|
234 |
+
label_map = {label: i for i, label in enumerate(label_list)}
|
235 |
+
|
236 |
+
features = []
|
237 |
+
for (ex_index, example) in enumerate(examples):
|
238 |
+
tokens_a = tokenizer.tokenize(example.text_a)
|
239 |
+
|
240 |
+
tokens_b = None
|
241 |
+
if example.text_b:
|
242 |
+
tokens_b = tokenizer.tokenize(example.text_b)
|
243 |
+
# Modifies `tokens_a` and `tokens_b` in place so that the total
|
244 |
+
# length is less than the specified length.
|
245 |
+
# Account for [CLS], [SEP], [SEP] with "- 3"
|
246 |
+
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
|
247 |
+
else:
|
248 |
+
# Account for [CLS] and [SEP] with "- 2"
|
249 |
+
if len(tokens_a) > max_seq_length - 2:
|
250 |
+
tokens_a = tokens_a[:(max_seq_length - 2)]
|
251 |
+
|
252 |
+
# The convention in BERT is:
|
253 |
+
# (a) For sequence pairs:
|
254 |
+
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
|
255 |
+
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
|
256 |
+
# (b) For single sequences:
|
257 |
+
# tokens: [CLS] the dog is hairy . [SEP]
|
258 |
+
# type_ids: 0 0 0 0 0 0 0
|
259 |
+
#
|
260 |
+
# Where "type_ids" are used to indicate whether this is the first
|
261 |
+
# sequence or the second sequence. The embedding vectors for `type=0` and
|
262 |
+
# `type=1` were learned during pre-training and are added to the wordpiece
|
263 |
+
# embedding vector (and position vector). This is not *strictly* necessary
|
264 |
+
# since the [SEP] token unambigiously separates the sequences, but it makes
|
265 |
+
# it easier for the model to learn the concept of sequences.
|
266 |
+
#
|
267 |
+
# For classification tasks, the first vector (corresponding to [CLS]) is
|
268 |
+
# used as as the "sentence vector". Note that this only makes sense because
|
269 |
+
# the entire model is fine-tuned.
|
270 |
+
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
|
271 |
+
segment_ids = [0] * len(tokens)
|
272 |
+
|
273 |
+
if tokens_b:
|
274 |
+
tokens += tokens_b + ["[SEP]"]
|
275 |
+
segment_ids += [1] * (len(tokens_b) + 1)
|
276 |
+
|
277 |
+
input_ids = tokenizer.convert_tokens_to_ids(tokens)
|
278 |
+
|
279 |
+
# The mask has 1 for real tokens and 0 for padding tokens. Only real
|
280 |
+
# tokens are attended to.
|
281 |
+
input_mask = [1] * len(input_ids)
|
282 |
+
|
283 |
+
# Zero-pad up to the sequence length.
|
284 |
+
padding = [0] * (max_seq_length - len(input_ids))
|
285 |
+
input_ids += padding
|
286 |
+
input_mask += padding
|
287 |
+
segment_ids += padding
|
288 |
+
|
289 |
+
assert len(input_ids) == max_seq_length
|
290 |
+
assert len(input_mask) == max_seq_length
|
291 |
+
assert len(segment_ids) == max_seq_length
|
292 |
+
|
293 |
+
label_id = label_map[example.label]
|
294 |
+
|
295 |
+
features.append(
|
296 |
+
InputFeatures(input_ids=input_ids,
|
297 |
+
input_mask=input_mask,
|
298 |
+
segment_ids=segment_ids,
|
299 |
+
label_id=label_id))
|
300 |
+
return features, label_map
|
301 |
+
|
302 |
+
|
303 |
+
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
|
304 |
+
"""Truncates a sequence pair in place to the maximum length."""
|
305 |
+
|
306 |
+
# This is a simple heuristic which will always truncate the longer sequence
|
307 |
+
# one token at a time. This makes more sense than truncating an equal percent
|
308 |
+
# of tokens from each, since if one sequence is very short then each token
|
309 |
+
# that's truncated likely contains more information than a longer sequence.
|
310 |
+
while True:
|
311 |
+
total_length = len(tokens_a) + len(tokens_b)
|
312 |
+
if total_length <= max_length:
|
313 |
+
break
|
314 |
+
if len(tokens_a) > len(tokens_b):
|
315 |
+
tokens_a.pop()
|
316 |
+
else:
|
317 |
+
tokens_b.pop()
|
318 |
+
|
319 |
+
|
320 |
+
PROCESSORS = {
|
321 |
+
"cola": ColaProcessor,
|
322 |
+
"mnli": MnliProcessor,
|
323 |
+
"mrpc": MrpcProcessor,
|
324 |
+
"sst-2": Sst2Processor,
|
325 |
+
}
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/requirements.txt
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# progress bars in model download and training scripts
|
2 |
+
tqdm
|
3 |
+
# Accessing files from S3 directly.
|
4 |
+
boto3
|
5 |
+
# Used for downloading models over HTTP
|
6 |
+
requests
|
7 |
+
six
|
8 |
+
ipdb
|
9 |
+
#Data processing
|
10 |
+
h5py
|
11 |
+
html2text
|
12 |
+
nltk
|
13 |
+
progressbar
|
14 |
+
#Others
|
15 |
+
onnxruntime
|
16 |
+
git+https://github.com/NVIDIA/dllogger
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/results/.keep
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/results/checkpoints/lddl_log/node-0.txt
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/results/checkpoints/lddl_log/node-0_local-0.txt
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/results/dllogger.json
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/run.sub
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#SBATCH --exclusive
|
3 |
+
#SBATCH --mem=0
|
4 |
+
#SBATCH --overcommit
|
5 |
+
|
6 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
7 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
8 |
+
# you may not use this file except in compliance with the License.
|
9 |
+
# You may obtain a copy of the License at
|
10 |
+
#
|
11 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
12 |
+
#
|
13 |
+
# Unless required by applicable law or agreed to in writing, software
|
14 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
15 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
16 |
+
# See the License for the specific language governing permissions and
|
17 |
+
# limitations under the License.
|
18 |
+
|
19 |
+
set -eux
|
20 |
+
|
21 |
+
# The following variables variables need to be set
|
22 |
+
# Base container to be used - container built in step 1 on quick start guide
|
23 |
+
readonly docker_image="nvcr.io/nvidia/pytorch:20.06-py3"
|
24 |
+
# Location of dataset for phase 1
|
25 |
+
readonly datadir="/raid/datasets/bert/hdf5/shard_1472_test_split_10/seq_128_pred_20_dupe_5/training"
|
26 |
+
# Location of dataset for phase 2
|
27 |
+
readonly datadir_phase2="/raid/datasets/bert/hdf5/shard_1472_test_split_10/seq_512_pred_80_dupe_5/training"
|
28 |
+
# Path to where trained checkpoints will be saved on the system
|
29 |
+
readonly checkpointdir="$PWD/checkpoints"
|
30 |
+
|
31 |
+
readonly mounts=".:/workspace/bert,${datadir}:/workspace/data,${datadir_phase2}:/workspace/data_phase2,${checkpointdir}:/results"
|
32 |
+
|
33 |
+
BIND_CMD="./bind.sh --cpu=exclusive --ib=single --"
|
34 |
+
|
35 |
+
srun --ntasks="${SLURM_JOB_NUM_NODES}" --ntasks-per-node=1 mkdir -p "${checkpointdir}"
|
36 |
+
|
37 |
+
PHASE1="\
|
38 |
+
--train_batch_size=${BATCHSIZE:-16} \
|
39 |
+
--learning_rate=${LR:-6e-3} \
|
40 |
+
--warmup_proportion=${WARMUP_UPDATES:-0.2843} \
|
41 |
+
--input_dir=/workspace/data \
|
42 |
+
--max_seq_length=128 \
|
43 |
+
--max_predictions_per_seq=20 \
|
44 |
+
--max_steps=7038 \
|
45 |
+
--num_steps_per_checkpoint=2500 \
|
46 |
+
"
|
47 |
+
PHASE2="\
|
48 |
+
--train_batch_size=${BATCHSIZE:-4096} \
|
49 |
+
--learning_rate=${LR:-4e-3} \
|
50 |
+
--warmup_proportion=${WARMUP_UPDATES:-0.128} \
|
51 |
+
--input_dir=/workspace/data_phase2 \
|
52 |
+
--phase2 \
|
53 |
+
--max_seq_length=512 \
|
54 |
+
--max_predictions_per_seq=80 \
|
55 |
+
--max_steps=1563 \
|
56 |
+
--num_steps_per_checkpoint=1000 \
|
57 |
+
--resume_from_checkpoint --phase1_end_step=7038 \
|
58 |
+
"
|
59 |
+
PHASES=( "$PHASE1" "$PHASE2" )
|
60 |
+
|
61 |
+
PHASE=${PHASE:-1}
|
62 |
+
|
63 |
+
BERT_CMD="\
|
64 |
+
${BIND_CMD} python -u /workspace/bert/run_pretraining.py \
|
65 |
+
--seed=42 \
|
66 |
+
${PHASES[$((PHASE-1))]} \
|
67 |
+
--do_train \
|
68 |
+
--config_file=/workspace/bert/bert_config.json \
|
69 |
+
--output_dir=/results \
|
70 |
+
--fp16 \
|
71 |
+
--allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
|
72 |
+
--gradient_accumulation_steps=${GRADIENT_STEPS:-2} \
|
73 |
+
--log_freq=1 \
|
74 |
+
--local_rank=\${SLURM_LOCALID}"
|
75 |
+
|
76 |
+
srun -l --container-image="${docker_image}" --container-mounts="${mounts}" sh -c "${BERT_CMD}"
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/run_pretraining.py
ADDED
@@ -0,0 +1,818 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
3 |
+
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
|
4 |
+
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
###############################################################################
|
17 |
+
# Copyright (C) 2023 Habana Labs Ltd. an Intel Company
|
18 |
+
###############################################################################
|
19 |
+
|
20 |
+
"""BERT finetuning runner."""
|
21 |
+
|
22 |
+
from __future__ import absolute_import
|
23 |
+
from __future__ import division
|
24 |
+
from __future__ import print_function
|
25 |
+
|
26 |
+
import habana_frameworks.torch.gpu_migration
|
27 |
+
import habana_frameworks.torch.core as htcore
|
28 |
+
# ==================
|
29 |
+
import csv
|
30 |
+
import os
|
31 |
+
import time
|
32 |
+
import argparse
|
33 |
+
import random
|
34 |
+
import h5py
|
35 |
+
from tqdm import tqdm, trange
|
36 |
+
import os
|
37 |
+
import numpy as np
|
38 |
+
import torch
|
39 |
+
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset
|
40 |
+
from torch.utils.data.distributed import DistributedSampler
|
41 |
+
import math
|
42 |
+
from apex import amp
|
43 |
+
import multiprocessing
|
44 |
+
import json
|
45 |
+
|
46 |
+
from tokenization import BertTokenizer
|
47 |
+
import modeling
|
48 |
+
from apex.optimizers import FusedLAMB
|
49 |
+
from schedulers import PolyWarmUpScheduler
|
50 |
+
|
51 |
+
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
|
52 |
+
from utils import is_main_process, format_step, get_world_size, get_rank
|
53 |
+
from apex.parallel import DistributedDataParallel as DDP
|
54 |
+
from schedulers import LinearWarmUpScheduler
|
55 |
+
from apex.parallel.distributed import flat_dist_call
|
56 |
+
import amp_C
|
57 |
+
from apex.amp import _amp_state
|
58 |
+
|
59 |
+
import dllogger
|
60 |
+
from concurrent.futures import ProcessPoolExecutor
|
61 |
+
|
62 |
+
torch._C._jit_set_profiling_mode(False)
|
63 |
+
torch._C._jit_set_profiling_executor(False)
|
64 |
+
|
65 |
+
skipped_steps = 0
|
66 |
+
avg_seq_per_pack = 1.0
|
67 |
+
|
68 |
+
# Track whether a SIGTERM (cluster time up) has been handled
|
69 |
+
timeout_sent = False
|
70 |
+
|
71 |
+
import signal
|
72 |
+
# handle SIGTERM sent from the scheduler and mark so we
|
73 |
+
# can gracefully save & exit
|
74 |
+
def signal_handler(sig, frame):
|
75 |
+
global timeout_sent
|
76 |
+
timeout_sent = True
|
77 |
+
|
78 |
+
signal.signal(signal.SIGTERM, signal_handler)
|
79 |
+
|
80 |
+
#Workaround because python functions are not picklable
|
81 |
+
class WorkerInitObj(object):
|
82 |
+
def __init__(self, seed):
|
83 |
+
self.seed = seed
|
84 |
+
def __call__(self, id):
|
85 |
+
np.random.seed(seed=self.seed + id)
|
86 |
+
random.seed(self.seed + id)
|
87 |
+
|
88 |
+
def create_pretraining_dataset(input_file, max_pred_length, shared_list, args, worker_init):
|
89 |
+
train_data = pretraining_dataset(input_file=input_file, max_pred_length=max_pred_length, enable_packed_data_mode=args.enable_packed_data_mode)
|
90 |
+
train_sampler = RandomSampler(train_data)
|
91 |
+
train_dataloader = DataLoader(train_data, sampler=train_sampler,
|
92 |
+
batch_size=args.train_batch_size * args.n_gpu,
|
93 |
+
num_workers=0, worker_init_fn=worker_init,
|
94 |
+
drop_last=True, pin_memory=True)
|
95 |
+
return train_dataloader, input_file
|
96 |
+
|
97 |
+
class pretraining_dataset(Dataset):
|
98 |
+
|
99 |
+
def __init__(self, input_file, max_pred_length, enable_packed_data_mode:bool=False):
|
100 |
+
self.input_file = input_file
|
101 |
+
self.max_pred_length = max_pred_length
|
102 |
+
f = h5py.File(input_file, "r")
|
103 |
+
if enable_packed_data_mode:
|
104 |
+
keys = ['input_ids', 'input_mask', 'segment_ids', 'positions',
|
105 |
+
'masked_lm_positions', 'masked_lm_ids',
|
106 |
+
'next_sentence_positions', 'next_sentence_labels', 'next_sentence_weights']
|
107 |
+
else:
|
108 |
+
keys = ['input_ids', 'input_mask', 'segment_ids',
|
109 |
+
'masked_lm_positions', 'masked_lm_ids',
|
110 |
+
'next_sentence_labels']
|
111 |
+
self.inputs = [np.asarray(f[key][:]) for key in keys]
|
112 |
+
f.close()
|
113 |
+
self.enable_packed_data_mode = enable_packed_data_mode
|
114 |
+
|
115 |
+
def __len__(self):
|
116 |
+
'Denotes the total number of samples'
|
117 |
+
return len(self.inputs[0])
|
118 |
+
|
119 |
+
def __getitem__(self, index):
|
120 |
+
if self.enable_packed_data_mode:
|
121 |
+
[input_ids, input_mask, segment_ids, positions,
|
122 |
+
masked_lm_positions, masked_lm_ids,
|
123 |
+
next_sentence_positions, next_sentence_labels, next_sentence_weights] = [torch.from_numpy(input[index].astype(np.int64)) for input in self.inputs]
|
124 |
+
else:
|
125 |
+
[input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [torch.from_numpy(input[index].astype(np.int64)) if indice < 5 else torch.from_numpy(np.asarray(input[index].astype(np.int64))) for indice, input in enumerate(self.inputs)]
|
126 |
+
|
127 |
+
masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -1
|
128 |
+
index = self.max_pred_length
|
129 |
+
# store number of masked tokens in index
|
130 |
+
padded_mask_indices = (masked_lm_positions == 0).nonzero()
|
131 |
+
if len(padded_mask_indices) != 0:
|
132 |
+
index = padded_mask_indices[0].item()
|
133 |
+
masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]
|
134 |
+
|
135 |
+
if self.enable_packed_data_mode:
|
136 |
+
next_sentence_labels = (next_sentence_weights == 1) * next_sentence_labels + (next_sentence_weights == 0) * -1
|
137 |
+
return [input_ids, segment_ids, input_mask, positions, masked_lm_labels, next_sentence_positions, next_sentence_labels]
|
138 |
+
else:
|
139 |
+
return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels]
|
140 |
+
|
141 |
+
|
142 |
+
class BertPretrainingCriterion(torch.nn.Module):
|
143 |
+
def __init__(self, vocab_size):
|
144 |
+
super(BertPretrainingCriterion, self).__init__()
|
145 |
+
self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=-1)
|
146 |
+
self.vocab_size = vocab_size
|
147 |
+
def forward(self, prediction_scores, seq_relationship_score, masked_lm_labels, next_sentence_labels):
|
148 |
+
masked_lm_loss = self.loss_fn(prediction_scores.view(-1, self.vocab_size), masked_lm_labels.view(-1))
|
149 |
+
next_sentence_loss = self.loss_fn(seq_relationship_score.view(-1, 2), next_sentence_labels.view(-1))
|
150 |
+
total_loss = masked_lm_loss + next_sentence_loss
|
151 |
+
return total_loss
|
152 |
+
|
153 |
+
|
154 |
+
def parse_arguments():
|
155 |
+
local_rank = os.getenv('OMPI_COMM_WORLD_LOCAL_RANK', None)
|
156 |
+
if local_rank != None:
|
157 |
+
os.environ['LOCAL_RANK'] = local_rank
|
158 |
+
|
159 |
+
parser = argparse.ArgumentParser()
|
160 |
+
|
161 |
+
## Required parameters
|
162 |
+
parser.add_argument("--input_dir",
|
163 |
+
default=None,
|
164 |
+
type=str,
|
165 |
+
required=True,
|
166 |
+
help="The input data dir. Should contain .hdf5 files for the task.")
|
167 |
+
|
168 |
+
parser.add_argument("--config_file",
|
169 |
+
default=None,
|
170 |
+
type=str,
|
171 |
+
required=True,
|
172 |
+
help="The BERT model config")
|
173 |
+
|
174 |
+
parser.add_argument("--bert_model", default="bert-large-uncased", type=str,
|
175 |
+
help="Bert pre-trained model selected in the list: bert-base-uncased, "
|
176 |
+
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
|
177 |
+
|
178 |
+
parser.add_argument("--output_dir",
|
179 |
+
default=None,
|
180 |
+
type=str,
|
181 |
+
required=True,
|
182 |
+
help="The output directory where the model checkpoints will be written.")
|
183 |
+
|
184 |
+
## Other parameters
|
185 |
+
parser.add_argument("--init_checkpoint",
|
186 |
+
default=None,
|
187 |
+
type=str,
|
188 |
+
help="The initial checkpoint to start training from.")
|
189 |
+
|
190 |
+
parser.add_argument("--max_seq_length",
|
191 |
+
default=512,
|
192 |
+
type=int,
|
193 |
+
help="The maximum total input sequence length after WordPiece tokenization. \n"
|
194 |
+
"Sequences longer than this will be truncated, and sequences shorter \n"
|
195 |
+
"than this will be padded.")
|
196 |
+
parser.add_argument("--max_predictions_per_seq",
|
197 |
+
default=80,
|
198 |
+
type=int,
|
199 |
+
help="The maximum total of masked tokens in input sequence")
|
200 |
+
parser.add_argument("--train_batch_size",
|
201 |
+
default=32,
|
202 |
+
type=int,
|
203 |
+
help="Total batch size for training.")
|
204 |
+
parser.add_argument("--learning_rate",
|
205 |
+
default=5e-5,
|
206 |
+
type=float,
|
207 |
+
help="The initial learning rate for Adam.")
|
208 |
+
parser.add_argument("--num_train_epochs",
|
209 |
+
default=3.0,
|
210 |
+
type=float,
|
211 |
+
help="Total number of training epochs to perform.")
|
212 |
+
parser.add_argument("--max_steps",
|
213 |
+
default=1000,
|
214 |
+
type=float,
|
215 |
+
help="Total number of training steps to perform.")
|
216 |
+
parser.add_argument("--warmup_proportion",
|
217 |
+
default=0.01,
|
218 |
+
type=float,
|
219 |
+
help="Proportion of training to perform linear learning rate warmup for. "
|
220 |
+
"E.g., 0.1 = 10%% of training.")
|
221 |
+
parser.add_argument("--local_rank",
|
222 |
+
type=int,
|
223 |
+
default=os.getenv('LOCAL_RANK', -1),
|
224 |
+
help="local_rank for distributed training on gpus")
|
225 |
+
parser.add_argument('--seed',
|
226 |
+
type=int,
|
227 |
+
default=42,
|
228 |
+
help="random seed for initialization")
|
229 |
+
parser.add_argument('--gradient_accumulation_steps',
|
230 |
+
type=int,
|
231 |
+
default=1,
|
232 |
+
help="Number of updates steps to accumualte before performing a backward/update pass.")
|
233 |
+
parser.add_argument('--fp16',
|
234 |
+
default=False,
|
235 |
+
action='store_true',
|
236 |
+
help="Mixed precision training")
|
237 |
+
parser.add_argument('--amp',
|
238 |
+
default=False,
|
239 |
+
action='store_true',
|
240 |
+
help="Mixed precision training")
|
241 |
+
parser.add_argument('--loss_scale',
|
242 |
+
type=float, default=0.0,
|
243 |
+
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
|
244 |
+
parser.add_argument('--log_freq',
|
245 |
+
type=float, default=1.0,
|
246 |
+
help='frequency of logging loss.')
|
247 |
+
parser.add_argument('--checkpoint_activations',
|
248 |
+
default=False,
|
249 |
+
action='store_true',
|
250 |
+
help="Whether to use gradient checkpointing")
|
251 |
+
parser.add_argument("--resume_from_checkpoint",
|
252 |
+
default=False,
|
253 |
+
action='store_true',
|
254 |
+
help="Whether to resume training from checkpoint.")
|
255 |
+
parser.add_argument('--resume_step',
|
256 |
+
type=int,
|
257 |
+
default=-1,
|
258 |
+
help="Step to resume training from.")
|
259 |
+
parser.add_argument('--num_steps_per_checkpoint',
|
260 |
+
type=int,
|
261 |
+
default=100,
|
262 |
+
help="Number of update steps until a model checkpoint is saved to disk.")
|
263 |
+
parser.add_argument('--skip_checkpoint',
|
264 |
+
default=False,
|
265 |
+
action='store_true',
|
266 |
+
help="Whether to save checkpoints")
|
267 |
+
parser.add_argument('--phase2',
|
268 |
+
default=False,
|
269 |
+
action='store_true',
|
270 |
+
help="Whether to train with seq len 512")
|
271 |
+
parser.add_argument('--allreduce_post_accumulation',
|
272 |
+
default=False,
|
273 |
+
action='store_true',
|
274 |
+
help="Whether to do allreduces during gradient accumulation steps.")
|
275 |
+
parser.add_argument('--allreduce_post_accumulation_fp16',
|
276 |
+
default=False,
|
277 |
+
action='store_true',
|
278 |
+
help="Whether to do fp16 allreduce post accumulation.")
|
279 |
+
parser.add_argument('--phase1_end_step',
|
280 |
+
type=int,
|
281 |
+
default=7038,
|
282 |
+
help="Number of training steps in Phase1 - seq len 128")
|
283 |
+
parser.add_argument('--init_loss_scale',
|
284 |
+
type=int,
|
285 |
+
default=2**20,
|
286 |
+
help="Initial loss scaler value")
|
287 |
+
parser.add_argument("--do_train",
|
288 |
+
default=False,
|
289 |
+
action='store_true',
|
290 |
+
help="Whether to run training.")
|
291 |
+
parser.add_argument('--json-summary', type=str, default="results/dllogger.json",
|
292 |
+
help='If provided, the json summary will be written to'
|
293 |
+
'the specified file.')
|
294 |
+
parser.add_argument("--use_env",
|
295 |
+
action='store_true',
|
296 |
+
help="Whether to read local rank from ENVVAR")
|
297 |
+
parser.add_argument('--disable_progress_bar',
|
298 |
+
default=False,
|
299 |
+
action='store_true',
|
300 |
+
help='Disable tqdm progress bar')
|
301 |
+
parser.add_argument('--steps_this_run', type=int, default=-1,
|
302 |
+
help='If provided, only run this many steps before exiting')
|
303 |
+
parser.add_argument('--enable_packed_data_mode', default='True', type=lambda x: x.lower() == 'true',
|
304 |
+
help='enable/disable training with packed data. Default is True, --input_dir should be set accordingly')
|
305 |
+
parser.add_argument("--use_torch_compile", help="Compile model with torch compile", action="store_true")
|
306 |
+
|
307 |
+
args = parser.parse_args()
|
308 |
+
args.fp16 = args.fp16 or args.amp
|
309 |
+
|
310 |
+
if args.steps_this_run < 0:
|
311 |
+
args.steps_this_run = args.max_steps
|
312 |
+
|
313 |
+
return args
|
314 |
+
|
315 |
+
def setup_training(args):
|
316 |
+
|
317 |
+
assert (torch.cuda.is_available())
|
318 |
+
|
319 |
+
if args.local_rank == -1:
|
320 |
+
device = torch.device("cuda")
|
321 |
+
# DP is not supported
|
322 |
+
args.n_gpu = 1
|
323 |
+
args.allreduce_post_accumulation = False
|
324 |
+
args.allreduce_post_accumulation_fp16 = False
|
325 |
+
else:
|
326 |
+
torch.cuda.set_device(args.local_rank)
|
327 |
+
device = torch.device("cuda", args.local_rank)
|
328 |
+
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
|
329 |
+
torch.distributed.init_process_group(backend='nccl', init_method='env://')
|
330 |
+
args.n_gpu = 1
|
331 |
+
|
332 |
+
if args.gradient_accumulation_steps == 1:
|
333 |
+
args.allreduce_post_accumulation = False
|
334 |
+
args.allreduce_post_accumulation_fp16 = False
|
335 |
+
|
336 |
+
if is_main_process():
|
337 |
+
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
|
338 |
+
filename=args.json_summary),
|
339 |
+
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)])
|
340 |
+
else:
|
341 |
+
dllogger.init(backends=[])
|
342 |
+
|
343 |
+
print("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
|
344 |
+
device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
|
345 |
+
|
346 |
+
if args.gradient_accumulation_steps < 1:
|
347 |
+
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
|
348 |
+
args.gradient_accumulation_steps))
|
349 |
+
if args.train_batch_size % args.gradient_accumulation_steps != 0:
|
350 |
+
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, batch size {} should be divisible".format(
|
351 |
+
args.gradient_accumulation_steps, args.train_batch_size))
|
352 |
+
|
353 |
+
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
|
354 |
+
|
355 |
+
if args.enable_packed_data_mode:
|
356 |
+
args.gradient_accumulation_steps = round(args.gradient_accumulation_steps / avg_seq_per_pack)
|
357 |
+
|
358 |
+
if not args.do_train:
|
359 |
+
raise ValueError(" `do_train` must be True.")
|
360 |
+
|
361 |
+
if not args.resume_from_checkpoint and os.path.exists(args.output_dir) and (
|
362 |
+
os.listdir(args.output_dir) and any([i.startswith('ckpt') for i in os.listdir(args.output_dir)])):
|
363 |
+
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
|
364 |
+
|
365 |
+
if (not args.resume_from_checkpoint or not os.path.exists(args.output_dir)) and is_main_process():
|
366 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
367 |
+
|
368 |
+
return device, args
|
369 |
+
|
370 |
+
def prepare_model_and_optimizer(args, device):
|
371 |
+
|
372 |
+
# Prepare model
|
373 |
+
config = modeling.BertConfig.from_json_file(args.config_file)
|
374 |
+
|
375 |
+
# Padding for divisibility by 8
|
376 |
+
if config.vocab_size % 8 != 0:
|
377 |
+
config.vocab_size += 8 - (config.vocab_size % 8)
|
378 |
+
|
379 |
+
modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training
|
380 |
+
model = modeling.BertForPreTraining(config)
|
381 |
+
|
382 |
+
checkpoint = None
|
383 |
+
if not args.resume_from_checkpoint:
|
384 |
+
global_step = 0
|
385 |
+
else:
|
386 |
+
if args.resume_step == -1 and not args.init_checkpoint:
|
387 |
+
model_names = [f for f in os.listdir(args.output_dir) if f.endswith(".pt")]
|
388 |
+
args.resume_step = max([int(x.split('.pt')[0].split('_')[1].strip()) for x in model_names])
|
389 |
+
|
390 |
+
global_step = args.resume_step if not args.init_checkpoint else 0
|
391 |
+
|
392 |
+
if not args.init_checkpoint:
|
393 |
+
checkpoint = torch.load(os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)), map_location="cpu")
|
394 |
+
else:
|
395 |
+
checkpoint = torch.load(args.init_checkpoint, map_location="cpu")
|
396 |
+
|
397 |
+
model.load_state_dict(checkpoint['model'], strict=False)
|
398 |
+
|
399 |
+
if args.phase2 and not args.init_checkpoint:
|
400 |
+
global_step -= args.phase1_end_step
|
401 |
+
if is_main_process():
|
402 |
+
print("resume step from ", args.resume_step)
|
403 |
+
|
404 |
+
model.to(device)
|
405 |
+
|
406 |
+
# BERT modeling uses weight sharing between word embedding and prediction decoder.
|
407 |
+
# So make sure the storage is pointing properly even after model is moved to device.
|
408 |
+
# Can be removed after SW-108363 is merged
|
409 |
+
model.cls.predictions.decoder.weight = model.bert.embeddings.word_embeddings.weight
|
410 |
+
|
411 |
+
param_optimizer = list(model.named_parameters())
|
412 |
+
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']
|
413 |
+
|
414 |
+
optimizer_grouped_parameters = [
|
415 |
+
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
|
416 |
+
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
|
417 |
+
|
418 |
+
optimizer = FusedLAMB(optimizer_grouped_parameters,
|
419 |
+
lr=args.learning_rate)
|
420 |
+
lr_scheduler = PolyWarmUpScheduler(optimizer,
|
421 |
+
warmup=args.warmup_proportion,
|
422 |
+
total_steps=args.max_steps)
|
423 |
+
if args.fp16:
|
424 |
+
|
425 |
+
if args.loss_scale == 0:
|
426 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic", cast_model_outputs=torch.float16)
|
427 |
+
else:
|
428 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale, cast_model_outputs=torch.float16)
|
429 |
+
amp._amp_state.loss_scalers[0]._loss_scale = args.init_loss_scale
|
430 |
+
|
431 |
+
model.checkpoint_activations(args.checkpoint_activations)
|
432 |
+
|
433 |
+
if args.resume_from_checkpoint:
|
434 |
+
if args.phase2 or args.init_checkpoint:
|
435 |
+
keys = list(checkpoint['optimizer']['state'].keys())
|
436 |
+
#Override hyperparameters from previous checkpoint
|
437 |
+
for key in keys:
|
438 |
+
checkpoint['optimizer']['state'][key]['step'] = global_step
|
439 |
+
for iter, item in enumerate(checkpoint['optimizer']['param_groups']):
|
440 |
+
checkpoint['optimizer']['param_groups'][iter]['step'] = global_step
|
441 |
+
checkpoint['optimizer']['param_groups'][iter]['t_total'] = args.max_steps
|
442 |
+
checkpoint['optimizer']['param_groups'][iter]['warmup'] = args.warmup_proportion
|
443 |
+
checkpoint['optimizer']['param_groups'][iter]['lr'] = args.learning_rate
|
444 |
+
optimizer.load_state_dict(checkpoint['optimizer']) # , strict=False)
|
445 |
+
|
446 |
+
# Restore AMP master parameters
|
447 |
+
if args.fp16 and amp._amp_state.opt_properties.enabled:
|
448 |
+
optimizer._lazy_init_maybe_master_weights()
|
449 |
+
optimizer._amp_stash.lazy_init_called = True
|
450 |
+
optimizer.load_state_dict(checkpoint['optimizer'])
|
451 |
+
for param, saved_param in zip(amp.master_params(optimizer), checkpoint['master params']):
|
452 |
+
param.data.copy_(saved_param.data)
|
453 |
+
|
454 |
+
if args.local_rank != -1:
|
455 |
+
if not args.allreduce_post_accumulation:
|
456 |
+
model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size())
|
457 |
+
else:
|
458 |
+
flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0,) )
|
459 |
+
elif args.n_gpu > 1:
|
460 |
+
model = torch.nn.DataParallel(model)
|
461 |
+
|
462 |
+
criterion = BertPretrainingCriterion(config.vocab_size)
|
463 |
+
|
464 |
+
return model, optimizer, lr_scheduler, checkpoint, global_step, criterion
|
465 |
+
|
466 |
+
def update_tensors(grad_tensors, allreduced_views):
|
467 |
+
offset = 0
|
468 |
+
for grad in grad_tensors:
|
469 |
+
numel = grad.numel()
|
470 |
+
grad.copy_(allreduced_views.narrow(0, offset, numel).view_as(grad))
|
471 |
+
offset += numel
|
472 |
+
return allreduced_views
|
473 |
+
|
474 |
+
def take_optimizer_step(args, optimizer, model, overflow_buf, global_step):
|
475 |
+
|
476 |
+
global skipped_steps
|
477 |
+
if args.allreduce_post_accumulation:
|
478 |
+
# manually allreduce gradients after all accumulation steps
|
479 |
+
# check for Inf/NaN
|
480 |
+
# 1. allocate an uninitialized buffer for flattened gradient
|
481 |
+
loss_scale = _amp_state.loss_scalers[0].loss_scale() if args.fp16 else 1
|
482 |
+
master_grads = [p.grad for p in amp.master_params(optimizer) if p.grad is not None]
|
483 |
+
flat_grad_size = sum(p.numel() for p in master_grads)
|
484 |
+
allreduce_dtype = torch.float16 if args.allreduce_post_accumulation_fp16 else torch.float32
|
485 |
+
flat_raw = torch.empty(flat_grad_size, device='cuda', dtype=allreduce_dtype)
|
486 |
+
# 2. combine unflattening and predivision of unscaled 'raw' gradient
|
487 |
+
# The torch._C._nn.unflatten_dense_tensors, which apex_C.unflatten basing on,
|
488 |
+
# does not work correctly on HPU.
|
489 |
+
# Remove this change when SW-117256 and SW-120216 are solved
|
490 |
+
# allreduced_views = apex_C.unflatten(flat_raw, master_grads)
|
491 |
+
allreduced_views = torch.cat([t.contiguous().view(-1) for t in master_grads], dim=0)
|
492 |
+
allreduced_views.div_(float(get_world_size() * args.gradient_accumulation_steps))
|
493 |
+
overflow_buf.zero_()
|
494 |
+
amp_C.multi_tensor_scale(65536,
|
495 |
+
overflow_buf,
|
496 |
+
[master_grads, allreduced_views],
|
497 |
+
loss_scale / (get_world_size() * args.gradient_accumulation_steps))
|
498 |
+
# 3. sum gradient across ranks. Because of the predivision, this averages the gradient
|
499 |
+
torch.distributed.all_reduce(allreduced_views)
|
500 |
+
# 4. combine unscaling and unflattening of allreduced gradient
|
501 |
+
overflow_buf.zero_()
|
502 |
+
amp_C.multi_tensor_scale(65536,
|
503 |
+
overflow_buf,
|
504 |
+
[allreduced_views, master_grads],
|
505 |
+
1. / loss_scale)
|
506 |
+
update_tensors(master_grads, allreduced_views)
|
507 |
+
# 5. update loss scale
|
508 |
+
if args.fp16:
|
509 |
+
scaler = _amp_state.loss_scalers[0]
|
510 |
+
old_overflow_buf = scaler._overflow_buf
|
511 |
+
scaler._overflow_buf = overflow_buf
|
512 |
+
had_overflow = scaler.update_scale()
|
513 |
+
scaler._overfloat_buf = old_overflow_buf
|
514 |
+
else:
|
515 |
+
had_overflow = 0
|
516 |
+
# 6. call optimizer step function
|
517 |
+
if had_overflow == 0:
|
518 |
+
optimizer.step()
|
519 |
+
global_step += 1
|
520 |
+
else:
|
521 |
+
# Overflow detected, print message and clear gradients
|
522 |
+
skipped_steps += 1
|
523 |
+
if is_main_process():
|
524 |
+
scaler = _amp_state.loss_scalers[0]
|
525 |
+
dllogger.log(step="PARAMETER", data={"loss_scale": scaler.loss_scale()})
|
526 |
+
if _amp_state.opt_properties.master_weights:
|
527 |
+
for param in optimizer._amp_stash.all_fp32_from_fp16_params:
|
528 |
+
param.grad = None
|
529 |
+
for param in model.parameters():
|
530 |
+
param.grad = None
|
531 |
+
else:
|
532 |
+
optimizer.step()
|
533 |
+
#optimizer.zero_grad()
|
534 |
+
for param in model.parameters():
|
535 |
+
param.grad = None
|
536 |
+
global_step += 1
|
537 |
+
|
538 |
+
return global_step
|
539 |
+
|
540 |
+
def get_metadata_file_path(input_dir : str) -> str:
|
541 |
+
norm_path = os.path.normpath(input_dir)
|
542 |
+
head_tail = os.path.split(norm_path)
|
543 |
+
metadata_file_name = head_tail[1]
|
544 |
+
metadata_file_name = metadata_file_name + '_metadata.json'
|
545 |
+
metadata_file_path = os.path.join(head_tail[0],metadata_file_name)
|
546 |
+
return metadata_file_path
|
547 |
+
|
548 |
+
def read_avg_seq_per_sample(input_dir : str, max_sequence_length) -> float:
|
549 |
+
metadata = None
|
550 |
+
metadata_file_path = get_metadata_file_path(input_dir)
|
551 |
+
print(f"Reading dataset metadata from: {metadata_file_path}")
|
552 |
+
if os.path.exists(metadata_file_path):
|
553 |
+
file_handle = open(metadata_file_path, mode='r')
|
554 |
+
json_content = file_handle.read()
|
555 |
+
metadata = json.loads(json_content)
|
556 |
+
else:
|
557 |
+
print("Packed dataset metadata file not accessible, falling back to default values of avg_seq_per_sample")
|
558 |
+
if max_sequence_length == 128:
|
559 |
+
return 1.2
|
560 |
+
elif max_sequence_length == 512:
|
561 |
+
return 2.0
|
562 |
+
else:
|
563 |
+
assert f"invalid max_sequence_length"
|
564 |
+
avg_seq_per_sample_key = "avg_seq_per_sample"
|
565 |
+
if metadata is not None and avg_seq_per_sample_key in metadata.keys():
|
566 |
+
avg_seq_per_sample = metadata[avg_seq_per_sample_key]
|
567 |
+
else:
|
568 |
+
assert False, f"Key {avg_seq_per_sample_key} not present in packed dataset metadata file: {metadata_file_path}"
|
569 |
+
print(f"AVG_SEQ_PER_SAMPLE: {avg_seq_per_sample}")
|
570 |
+
return avg_seq_per_sample
|
571 |
+
|
572 |
+
def main():
|
573 |
+
global timeout_sent
|
574 |
+
global avg_seq_per_pack
|
575 |
+
|
576 |
+
args = parse_arguments()
|
577 |
+
|
578 |
+
random.seed(args.seed + args.local_rank)
|
579 |
+
np.random.seed(args.seed + args.local_rank)
|
580 |
+
torch.manual_seed(args.seed + args.local_rank)
|
581 |
+
torch.cuda.manual_seed(args.seed + args.local_rank)
|
582 |
+
worker_init = WorkerInitObj(args.seed + args.local_rank)
|
583 |
+
|
584 |
+
if args.enable_packed_data_mode:
|
585 |
+
avg_seq_per_pack = read_avg_seq_per_sample(args.input_dir, args.max_seq_length)
|
586 |
+
else:
|
587 |
+
avg_seq_per_pack = 1.0
|
588 |
+
device, args = setup_training(args)
|
589 |
+
dllogger.log(step="PARAMETER", data={"Config": [str(args)]})
|
590 |
+
|
591 |
+
# Prepare optimizer
|
592 |
+
model, optimizer, lr_scheduler, checkpoint, global_step, criterion = prepare_model_and_optimizer(args, device)
|
593 |
+
|
594 |
+
if is_main_process():
|
595 |
+
dllogger.log(step="PARAMETER", data={"SEED": args.seed})
|
596 |
+
|
597 |
+
raw_train_start = None
|
598 |
+
if args.do_train:
|
599 |
+
if is_main_process():
|
600 |
+
dllogger.log(step="PARAMETER", data={"train_start": True})
|
601 |
+
dllogger.log(step="PARAMETER", data={"batch_size_per_gpu": args.train_batch_size})
|
602 |
+
dllogger.log(step="PARAMETER", data={"learning_rate": args.learning_rate})
|
603 |
+
|
604 |
+
model.train()
|
605 |
+
most_recent_ckpts_paths = []
|
606 |
+
average_loss = 0.0 # averaged loss every args.log_freq steps
|
607 |
+
epoch = 0
|
608 |
+
training_steps = 0
|
609 |
+
average_training_time_per_step = 0
|
610 |
+
average_perf_per_step = 0
|
611 |
+
loss_list = []
|
612 |
+
|
613 |
+
pool = ProcessPoolExecutor(1)
|
614 |
+
|
615 |
+
starting_time = time.time()
|
616 |
+
|
617 |
+
if args.use_torch_compile:
|
618 |
+
model = torch.compile(model, backend="inductor")
|
619 |
+
|
620 |
+
# Note: We loop infinitely over epochs, termination is handled via iteration count
|
621 |
+
while True:
|
622 |
+
thread = None
|
623 |
+
restored_data_loader = None
|
624 |
+
if not args.resume_from_checkpoint or epoch > 0 or (args.phase2 and global_step < 1) or args.init_checkpoint:
|
625 |
+
if args.enable_packed_data_mode:
|
626 |
+
files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if
|
627 |
+
os.path.isfile(os.path.join(args.input_dir, f))] # Packed files have no 'training' pre/postfix.
|
628 |
+
else:
|
629 |
+
files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if
|
630 |
+
os.path.isfile(os.path.join(args.input_dir, f)) and 'training' in f]
|
631 |
+
files.sort()
|
632 |
+
num_files = len(files)
|
633 |
+
random.Random(args.seed + epoch).shuffle(files)
|
634 |
+
f_start_id = 0
|
635 |
+
else:
|
636 |
+
f_start_id = checkpoint['files'][0]
|
637 |
+
files = checkpoint['files'][1:]
|
638 |
+
args.resume_from_checkpoint = False
|
639 |
+
num_files = len(files)
|
640 |
+
# may not exist in all checkpoints
|
641 |
+
epoch = checkpoint.get('epoch', 0)
|
642 |
+
restored_data_loader = checkpoint.get('data_loader', None)
|
643 |
+
|
644 |
+
shared_file_list = {}
|
645 |
+
|
646 |
+
if torch.distributed.is_initialized() and get_world_size() > num_files:
|
647 |
+
remainder = get_world_size() % num_files
|
648 |
+
data_file = files[(f_start_id*get_world_size()+get_rank() + remainder*f_start_id)%num_files]
|
649 |
+
else:
|
650 |
+
data_file = files[(f_start_id*get_world_size()+get_rank())%num_files]
|
651 |
+
|
652 |
+
previous_file = data_file
|
653 |
+
|
654 |
+
if restored_data_loader is None:
|
655 |
+
train_data = pretraining_dataset(data_file, args.max_predictions_per_seq, args.enable_packed_data_mode)
|
656 |
+
train_sampler = RandomSampler(train_data)
|
657 |
+
train_dataloader = DataLoader(train_data, sampler=train_sampler,
|
658 |
+
batch_size=args.train_batch_size * args.n_gpu,
|
659 |
+
num_workers=0, worker_init_fn=worker_init,
|
660 |
+
drop_last=True, pin_memory=True)
|
661 |
+
# shared_file_list["0"] = (train_dataloader, data_file)
|
662 |
+
else:
|
663 |
+
train_dataloader = restored_data_loader
|
664 |
+
restored_data_loader = None
|
665 |
+
|
666 |
+
overflow_buf = None
|
667 |
+
if args.allreduce_post_accumulation:
|
668 |
+
overflow_buf = torch.cuda.IntTensor([0])
|
669 |
+
|
670 |
+
for f_id in range(f_start_id + 1 , len(files)):
|
671 |
+
|
672 |
+
|
673 |
+
if get_world_size() > num_files:
|
674 |
+
data_file = files[(f_id*get_world_size()+get_rank() + remainder*f_id)%num_files]
|
675 |
+
else:
|
676 |
+
data_file = files[(f_id*get_world_size()+get_rank())%num_files]
|
677 |
+
|
678 |
+
previous_file = data_file
|
679 |
+
|
680 |
+
dataset_future = pool.submit(create_pretraining_dataset, data_file, args.max_predictions_per_seq, shared_file_list, args, worker_init)
|
681 |
+
|
682 |
+
train_iter = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader
|
683 |
+
|
684 |
+
if raw_train_start is None:
|
685 |
+
raw_train_start = time.time()
|
686 |
+
for step, batch in enumerate(train_iter):
|
687 |
+
|
688 |
+
training_steps += 1
|
689 |
+
|
690 |
+
batch = [t.to(device) for t in batch]
|
691 |
+
if args.enable_packed_data_mode:
|
692 |
+
input_ids, segment_ids, input_mask, positions, masked_lm_labels, next_sentence_positions, next_sentence_labels = batch
|
693 |
+
else:
|
694 |
+
input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch
|
695 |
+
|
696 |
+
if (args.local_rank != -1) and (training_steps % args.gradient_accumulation_steps == 0):
|
697 |
+
torch.distributed.barrier()
|
698 |
+
|
699 |
+
prediction_scores, seq_relationship_score = model(
|
700 |
+
input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, enable_packed_data_mode=args.enable_packed_data_mode,
|
701 |
+
positions=positions if args.enable_packed_data_mode else None,
|
702 |
+
next_sentence_positions=next_sentence_positions if args.enable_packed_data_mode else None)
|
703 |
+
|
704 |
+
loss = criterion(prediction_scores, seq_relationship_score, masked_lm_labels, next_sentence_labels)
|
705 |
+
if args.n_gpu > 1:
|
706 |
+
loss = loss.mean() # mean() to average on multi-gpu.
|
707 |
+
|
708 |
+
divisor = args.gradient_accumulation_steps
|
709 |
+
if args.gradient_accumulation_steps > 1:
|
710 |
+
if not args.allreduce_post_accumulation:
|
711 |
+
# this division was merged into predivision
|
712 |
+
loss = loss / args.gradient_accumulation_steps
|
713 |
+
divisor = 1.0
|
714 |
+
if args.fp16:
|
715 |
+
with amp.scale_loss(loss, optimizer, delay_overflow_check=args.allreduce_post_accumulation) as scaled_loss:
|
716 |
+
scaled_loss.backward()
|
717 |
+
else:
|
718 |
+
loss.backward()
|
719 |
+
|
720 |
+
htcore.mark_step()
|
721 |
+
|
722 |
+
loss_list.append(loss)
|
723 |
+
|
724 |
+
if training_steps % args.gradient_accumulation_steps == 0:
|
725 |
+
lr_scheduler.step() # learning rate warmup
|
726 |
+
global_step = take_optimizer_step(args, optimizer, model, overflow_buf, global_step)
|
727 |
+
|
728 |
+
htcore.mark_step()
|
729 |
+
|
730 |
+
if global_step >= args.steps_this_run or timeout_sent or training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0:
|
731 |
+
for loss_t in loss_list:
|
732 |
+
average_loss += loss_t.item()
|
733 |
+
loss_list.clear()
|
734 |
+
train_time = time.time() - starting_time
|
735 |
+
starting_time = time.time()
|
736 |
+
average_training_time_per_step = train_time/(args.gradient_accumulation_steps * args.log_freq)
|
737 |
+
average_perf_per_step = args.train_batch_size*avg_seq_per_pack/average_training_time_per_step
|
738 |
+
|
739 |
+
if global_step >= args.steps_this_run or timeout_sent:
|
740 |
+
train_time_raw = time.time() - raw_train_start
|
741 |
+
last_num_steps = int(training_steps / args.gradient_accumulation_steps) % args.log_freq
|
742 |
+
last_num_steps = args.log_freq if last_num_steps == 0 else last_num_steps
|
743 |
+
average_loss = torch.tensor(average_loss, dtype=torch.float32).cuda()
|
744 |
+
average_loss = average_loss / (last_num_steps * divisor)
|
745 |
+
if (torch.distributed.is_initialized()):
|
746 |
+
average_loss /= get_world_size()
|
747 |
+
torch.distributed.all_reduce(average_loss)
|
748 |
+
final_loss = average_loss.item()
|
749 |
+
if is_main_process():
|
750 |
+
dllogger.log(step=(epoch, global_step, ), data={"final_loss": final_loss,
|
751 |
+
"average_training_time_step": average_training_time_per_step,
|
752 |
+
"average_perf_per_step": average_perf_per_step})
|
753 |
+
elif training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0:
|
754 |
+
if is_main_process():
|
755 |
+
dllogger.log(step=(epoch, global_step, ), data={"average_loss": average_loss / (args.log_freq * divisor),
|
756 |
+
"step_loss": loss.item() * args.gradient_accumulation_steps / divisor,
|
757 |
+
"learning_rate": optimizer.param_groups[0]['lr'],
|
758 |
+
"average_training_time_step": average_training_time_per_step,
|
759 |
+
"average_perf_per_step": average_perf_per_step})
|
760 |
+
average_loss = 0
|
761 |
+
|
762 |
+
|
763 |
+
if global_step >= args.steps_this_run or training_steps % (
|
764 |
+
args.num_steps_per_checkpoint * args.gradient_accumulation_steps) == 0 or timeout_sent:
|
765 |
+
if is_main_process() and not args.skip_checkpoint:
|
766 |
+
# Save a trained model
|
767 |
+
dllogger.log(step="PARAMETER", data={"checkpoint_step": global_step})
|
768 |
+
model_to_save = model.module if hasattr(model,
|
769 |
+
'module') else model # Only save the model it-self
|
770 |
+
if args.resume_step < 0 or not args.phase2:
|
771 |
+
output_save_file = os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step))
|
772 |
+
else:
|
773 |
+
output_save_file = os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step + args.phase1_end_step))
|
774 |
+
if args.do_train:
|
775 |
+
torch.save({'model': model_to_save.state_dict(),
|
776 |
+
'optimizer': optimizer.state_dict(),
|
777 |
+
'master params': list(amp.master_params(optimizer)),
|
778 |
+
'files': [f_id] + files,
|
779 |
+
'epoch': epoch,
|
780 |
+
'data_loader': None if global_step >= args.max_steps else train_dataloader}, output_save_file)
|
781 |
+
|
782 |
+
most_recent_ckpts_paths.append(output_save_file)
|
783 |
+
if len(most_recent_ckpts_paths) > 3:
|
784 |
+
ckpt_to_be_removed = most_recent_ckpts_paths.pop(0)
|
785 |
+
os.remove(ckpt_to_be_removed)
|
786 |
+
|
787 |
+
# Exiting the training due to hitting max steps, or being sent a
|
788 |
+
# timeout from the cluster scheduler
|
789 |
+
if global_step >= args.steps_this_run or timeout_sent:
|
790 |
+
del train_dataloader
|
791 |
+
# thread.join()
|
792 |
+
return args, final_loss, train_time_raw, global_step
|
793 |
+
|
794 |
+
del train_dataloader
|
795 |
+
# thread.join()
|
796 |
+
# Make sure pool has finished and switch train_dataloader
|
797 |
+
# NOTE: Will block until complete
|
798 |
+
train_dataloader, data_file = dataset_future.result(timeout=None)
|
799 |
+
|
800 |
+
epoch += 1
|
801 |
+
|
802 |
+
if __name__ == "__main__":
|
803 |
+
|
804 |
+
now = time.time()
|
805 |
+
args, final_loss, train_time_raw, global_step = main()
|
806 |
+
gpu_count = args.n_gpu
|
807 |
+
global_step += args.phase1_end_step if (args.phase2 and args.resume_step > 0) else 0
|
808 |
+
if args.resume_step == -1:
|
809 |
+
args.resume_step = 0
|
810 |
+
if torch.distributed.is_initialized():
|
811 |
+
gpu_count = get_world_size()
|
812 |
+
if is_main_process():
|
813 |
+
e2e_time = time.time() - now
|
814 |
+
training_perf = args.train_batch_size * args.gradient_accumulation_steps * gpu_count * avg_seq_per_pack\
|
815 |
+
* (global_step - args.resume_step + skipped_steps) / train_time_raw
|
816 |
+
dllogger.log(step=tuple(), data={"e2e_train_time": e2e_time, "training_sequences_per_second": training_perf,
|
817 |
+
"final_loss": final_loss, "raw_train_time": train_time_raw })
|
818 |
+
dllogger.flush()
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/run_squad.py
ADDED
@@ -0,0 +1,1200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
3 |
+
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""Run BERT on SQuAD."""
|
17 |
+
|
18 |
+
from __future__ import absolute_import, division, print_function
|
19 |
+
|
20 |
+
import argparse
|
21 |
+
import collections
|
22 |
+
import json
|
23 |
+
import logging
|
24 |
+
import math
|
25 |
+
import os
|
26 |
+
import random
|
27 |
+
import sys
|
28 |
+
from io import open
|
29 |
+
|
30 |
+
import numpy as np
|
31 |
+
import torch
|
32 |
+
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
|
33 |
+
TensorDataset)
|
34 |
+
from torch.utils.data.distributed import DistributedSampler
|
35 |
+
from tqdm import tqdm, trange
|
36 |
+
|
37 |
+
from apex import amp
|
38 |
+
from schedulers import LinearWarmUpScheduler
|
39 |
+
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
|
40 |
+
import modeling
|
41 |
+
from optimization import BertAdam, warmup_linear
|
42 |
+
from tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize)
|
43 |
+
from utils import is_main_process, format_step
|
44 |
+
import dllogger, time
|
45 |
+
|
46 |
+
torch._C._jit_set_profiling_mode(False)
|
47 |
+
torch._C._jit_set_profiling_executor(False)
|
48 |
+
|
49 |
+
if sys.version_info[0] == 2:
|
50 |
+
import cPickle as pickle
|
51 |
+
else:
|
52 |
+
import pickle
|
53 |
+
|
54 |
+
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
|
55 |
+
datefmt='%m/%d/%Y %H:%M:%S',
|
56 |
+
level=logging.INFO)
|
57 |
+
logger = logging.getLogger(__name__)
|
58 |
+
|
59 |
+
|
60 |
+
class SquadExample(object):
|
61 |
+
"""
|
62 |
+
A single training/test example for the Squad dataset.
|
63 |
+
For examples without an answer, the start and end position are -1.
|
64 |
+
"""
|
65 |
+
|
66 |
+
def __init__(self,
|
67 |
+
qas_id,
|
68 |
+
question_text,
|
69 |
+
doc_tokens,
|
70 |
+
orig_answer_text=None,
|
71 |
+
start_position=None,
|
72 |
+
end_position=None,
|
73 |
+
is_impossible=None):
|
74 |
+
self.qas_id = qas_id
|
75 |
+
self.question_text = question_text
|
76 |
+
self.doc_tokens = doc_tokens
|
77 |
+
self.orig_answer_text = orig_answer_text
|
78 |
+
self.start_position = start_position
|
79 |
+
self.end_position = end_position
|
80 |
+
self.is_impossible = is_impossible
|
81 |
+
|
82 |
+
def __str__(self):
|
83 |
+
return self.__repr__()
|
84 |
+
|
85 |
+
def __repr__(self):
|
86 |
+
s = ""
|
87 |
+
s += "qas_id: %s" % (self.qas_id)
|
88 |
+
s += ", question_text: %s" % (
|
89 |
+
self.question_text)
|
90 |
+
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
|
91 |
+
if self.start_position:
|
92 |
+
s += ", start_position: %d" % (self.start_position)
|
93 |
+
if self.end_position:
|
94 |
+
s += ", end_position: %d" % (self.end_position)
|
95 |
+
if self.is_impossible:
|
96 |
+
s += ", is_impossible: %r" % (self.is_impossible)
|
97 |
+
return s
|
98 |
+
|
99 |
+
|
100 |
+
class InputFeatures(object):
|
101 |
+
"""A single set of features of data."""
|
102 |
+
|
103 |
+
def __init__(self,
|
104 |
+
unique_id,
|
105 |
+
example_index,
|
106 |
+
doc_span_index,
|
107 |
+
tokens,
|
108 |
+
token_to_orig_map,
|
109 |
+
token_is_max_context,
|
110 |
+
input_ids,
|
111 |
+
input_mask,
|
112 |
+
segment_ids,
|
113 |
+
start_position=None,
|
114 |
+
end_position=None,
|
115 |
+
is_impossible=None):
|
116 |
+
self.unique_id = unique_id
|
117 |
+
self.example_index = example_index
|
118 |
+
self.doc_span_index = doc_span_index
|
119 |
+
self.tokens = tokens
|
120 |
+
self.token_to_orig_map = token_to_orig_map
|
121 |
+
self.token_is_max_context = token_is_max_context
|
122 |
+
self.input_ids = input_ids
|
123 |
+
self.input_mask = input_mask
|
124 |
+
self.segment_ids = segment_ids
|
125 |
+
self.start_position = start_position
|
126 |
+
self.end_position = end_position
|
127 |
+
self.is_impossible = is_impossible
|
128 |
+
|
129 |
+
|
130 |
+
def read_squad_examples(input_file, is_training, version_2_with_negative):
|
131 |
+
"""Read a SQuAD json file into a list of SquadExample."""
|
132 |
+
with open(input_file, "r", encoding='utf-8') as reader:
|
133 |
+
input_data = json.load(reader)["data"]
|
134 |
+
|
135 |
+
def is_whitespace(c):
|
136 |
+
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
|
137 |
+
return True
|
138 |
+
return False
|
139 |
+
|
140 |
+
examples = []
|
141 |
+
for entry in input_data:
|
142 |
+
for paragraph in entry["paragraphs"]:
|
143 |
+
paragraph_text = paragraph["context"]
|
144 |
+
doc_tokens = []
|
145 |
+
char_to_word_offset = []
|
146 |
+
prev_is_whitespace = True
|
147 |
+
for c in paragraph_text:
|
148 |
+
if is_whitespace(c):
|
149 |
+
prev_is_whitespace = True
|
150 |
+
else:
|
151 |
+
if prev_is_whitespace:
|
152 |
+
doc_tokens.append(c)
|
153 |
+
else:
|
154 |
+
doc_tokens[-1] += c
|
155 |
+
prev_is_whitespace = False
|
156 |
+
char_to_word_offset.append(len(doc_tokens) - 1)
|
157 |
+
|
158 |
+
for qa in paragraph["qas"]:
|
159 |
+
qas_id = qa["id"]
|
160 |
+
question_text = qa["question"]
|
161 |
+
start_position = None
|
162 |
+
end_position = None
|
163 |
+
orig_answer_text = None
|
164 |
+
is_impossible = False
|
165 |
+
if is_training:
|
166 |
+
if version_2_with_negative:
|
167 |
+
is_impossible = qa["is_impossible"]
|
168 |
+
if (len(qa["answers"]) != 1) and (not is_impossible):
|
169 |
+
raise ValueError(
|
170 |
+
"For training, each question should have exactly 1 answer.")
|
171 |
+
if not is_impossible:
|
172 |
+
answer = qa["answers"][0]
|
173 |
+
orig_answer_text = answer["text"]
|
174 |
+
answer_offset = answer["answer_start"]
|
175 |
+
answer_length = len(orig_answer_text)
|
176 |
+
start_position = char_to_word_offset[answer_offset]
|
177 |
+
end_position = char_to_word_offset[answer_offset + answer_length - 1]
|
178 |
+
# Only add answers where the text can be exactly recovered from the
|
179 |
+
# document. If this CAN'T happen it's likely due to weird Unicode
|
180 |
+
# stuff so we will just skip the example.
|
181 |
+
#
|
182 |
+
# Note that this means for training mode, every example is NOT
|
183 |
+
# guaranteed to be preserved.
|
184 |
+
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
|
185 |
+
cleaned_answer_text = " ".join(
|
186 |
+
whitespace_tokenize(orig_answer_text))
|
187 |
+
if actual_text.find(cleaned_answer_text) == -1:
|
188 |
+
logger.warning("Could not find answer: '%s' vs. '%s'",
|
189 |
+
actual_text, cleaned_answer_text)
|
190 |
+
continue
|
191 |
+
else:
|
192 |
+
start_position = -1
|
193 |
+
end_position = -1
|
194 |
+
orig_answer_text = ""
|
195 |
+
|
196 |
+
example = SquadExample(
|
197 |
+
qas_id=qas_id,
|
198 |
+
question_text=question_text,
|
199 |
+
doc_tokens=doc_tokens,
|
200 |
+
orig_answer_text=orig_answer_text,
|
201 |
+
start_position=start_position,
|
202 |
+
end_position=end_position,
|
203 |
+
is_impossible=is_impossible)
|
204 |
+
examples.append(example)
|
205 |
+
return examples
|
206 |
+
|
207 |
+
|
208 |
+
def convert_examples_to_features(examples, tokenizer, max_seq_length,
|
209 |
+
doc_stride, max_query_length, is_training):
|
210 |
+
"""Loads a data file into a list of `InputBatch`s."""
|
211 |
+
|
212 |
+
unique_id = 1000000000
|
213 |
+
|
214 |
+
features = []
|
215 |
+
for (example_index, example) in enumerate(examples):
|
216 |
+
query_tokens = tokenizer.tokenize(example.question_text)
|
217 |
+
|
218 |
+
if len(query_tokens) > max_query_length:
|
219 |
+
query_tokens = query_tokens[0:max_query_length]
|
220 |
+
|
221 |
+
tok_to_orig_index = []
|
222 |
+
orig_to_tok_index = []
|
223 |
+
all_doc_tokens = []
|
224 |
+
for (i, token) in enumerate(example.doc_tokens):
|
225 |
+
orig_to_tok_index.append(len(all_doc_tokens))
|
226 |
+
sub_tokens = tokenizer.tokenize(token)
|
227 |
+
for sub_token in sub_tokens:
|
228 |
+
tok_to_orig_index.append(i)
|
229 |
+
all_doc_tokens.append(sub_token)
|
230 |
+
|
231 |
+
tok_start_position = None
|
232 |
+
tok_end_position = None
|
233 |
+
if is_training and example.is_impossible:
|
234 |
+
tok_start_position = -1
|
235 |
+
tok_end_position = -1
|
236 |
+
if is_training and not example.is_impossible:
|
237 |
+
tok_start_position = orig_to_tok_index[example.start_position]
|
238 |
+
if example.end_position < len(example.doc_tokens) - 1:
|
239 |
+
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
|
240 |
+
else:
|
241 |
+
tok_end_position = len(all_doc_tokens) - 1
|
242 |
+
(tok_start_position, tok_end_position) = _improve_answer_span(
|
243 |
+
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
|
244 |
+
example.orig_answer_text)
|
245 |
+
|
246 |
+
# The -3 accounts for [CLS], [SEP] and [SEP]
|
247 |
+
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
|
248 |
+
|
249 |
+
# We can have documents that are longer than the maximum sequence length.
|
250 |
+
# To deal with this we do a sliding window approach, where we take chunks
|
251 |
+
# of the up to our max length with a stride of `doc_stride`.
|
252 |
+
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
|
253 |
+
"DocSpan", ["start", "length"])
|
254 |
+
doc_spans = []
|
255 |
+
start_offset = 0
|
256 |
+
while start_offset < len(all_doc_tokens):
|
257 |
+
length = len(all_doc_tokens) - start_offset
|
258 |
+
if length > max_tokens_for_doc:
|
259 |
+
length = max_tokens_for_doc
|
260 |
+
doc_spans.append(_DocSpan(start=start_offset, length=length))
|
261 |
+
if start_offset + length == len(all_doc_tokens):
|
262 |
+
break
|
263 |
+
start_offset += min(length, doc_stride)
|
264 |
+
|
265 |
+
for (doc_span_index, doc_span) in enumerate(doc_spans):
|
266 |
+
tokens = []
|
267 |
+
token_to_orig_map = {}
|
268 |
+
token_is_max_context = {}
|
269 |
+
segment_ids = []
|
270 |
+
tokens.append("[CLS]")
|
271 |
+
segment_ids.append(0)
|
272 |
+
for token in query_tokens:
|
273 |
+
tokens.append(token)
|
274 |
+
segment_ids.append(0)
|
275 |
+
tokens.append("[SEP]")
|
276 |
+
segment_ids.append(0)
|
277 |
+
|
278 |
+
for i in range(doc_span.length):
|
279 |
+
split_token_index = doc_span.start + i
|
280 |
+
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
|
281 |
+
|
282 |
+
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
|
283 |
+
split_token_index)
|
284 |
+
token_is_max_context[len(tokens)] = is_max_context
|
285 |
+
tokens.append(all_doc_tokens[split_token_index])
|
286 |
+
segment_ids.append(1)
|
287 |
+
tokens.append("[SEP]")
|
288 |
+
segment_ids.append(1)
|
289 |
+
|
290 |
+
input_ids = tokenizer.convert_tokens_to_ids(tokens)
|
291 |
+
|
292 |
+
# The mask has 1 for real tokens and 0 for padding tokens. Only real
|
293 |
+
# tokens are attended to.
|
294 |
+
input_mask = [1] * len(input_ids)
|
295 |
+
|
296 |
+
# Zero-pad up to the sequence length.
|
297 |
+
while len(input_ids) < max_seq_length:
|
298 |
+
input_ids.append(0)
|
299 |
+
input_mask.append(0)
|
300 |
+
segment_ids.append(0)
|
301 |
+
|
302 |
+
assert len(input_ids) == max_seq_length
|
303 |
+
assert len(input_mask) == max_seq_length
|
304 |
+
assert len(segment_ids) == max_seq_length
|
305 |
+
|
306 |
+
start_position = None
|
307 |
+
end_position = None
|
308 |
+
if is_training and not example.is_impossible:
|
309 |
+
# For training, if our document chunk does not contain an annotation
|
310 |
+
# we throw it out, since there is nothing to predict.
|
311 |
+
doc_start = doc_span.start
|
312 |
+
doc_end = doc_span.start + doc_span.length - 1
|
313 |
+
out_of_span = False
|
314 |
+
if not (tok_start_position >= doc_start and
|
315 |
+
tok_end_position <= doc_end):
|
316 |
+
out_of_span = True
|
317 |
+
if out_of_span:
|
318 |
+
start_position = 0
|
319 |
+
end_position = 0
|
320 |
+
else:
|
321 |
+
doc_offset = len(query_tokens) + 2
|
322 |
+
start_position = tok_start_position - doc_start + doc_offset
|
323 |
+
end_position = tok_end_position - doc_start + doc_offset
|
324 |
+
if is_training and example.is_impossible:
|
325 |
+
start_position = 0
|
326 |
+
end_position = 0
|
327 |
+
|
328 |
+
features.append(
|
329 |
+
InputFeatures(
|
330 |
+
unique_id=unique_id,
|
331 |
+
example_index=example_index,
|
332 |
+
doc_span_index=doc_span_index,
|
333 |
+
tokens=tokens,
|
334 |
+
token_to_orig_map=token_to_orig_map,
|
335 |
+
token_is_max_context=token_is_max_context,
|
336 |
+
input_ids=input_ids,
|
337 |
+
input_mask=input_mask,
|
338 |
+
segment_ids=segment_ids,
|
339 |
+
start_position=start_position,
|
340 |
+
end_position=end_position,
|
341 |
+
is_impossible=example.is_impossible))
|
342 |
+
unique_id += 1
|
343 |
+
|
344 |
+
return features
|
345 |
+
|
346 |
+
|
347 |
+
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
|
348 |
+
orig_answer_text):
|
349 |
+
"""Returns tokenized answer spans that better match the annotated answer."""
|
350 |
+
|
351 |
+
# The SQuAD annotations are character based. We first project them to
|
352 |
+
# whitespace-tokenized words. But then after WordPiece tokenization, we can
|
353 |
+
# often find a "better match". For example:
|
354 |
+
#
|
355 |
+
# Question: What year was John Smith born?
|
356 |
+
# Context: The leader was John Smith (1895-1943).
|
357 |
+
# Answer: 1895
|
358 |
+
#
|
359 |
+
# The original whitespace-tokenized answer will be "(1895-1943).". However
|
360 |
+
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
|
361 |
+
# the exact answer, 1895.
|
362 |
+
#
|
363 |
+
# However, this is not always possible. Consider the following:
|
364 |
+
#
|
365 |
+
# Question: What country is the top exporter of electornics?
|
366 |
+
# Context: The Japanese electronics industry is the lagest in the world.
|
367 |
+
# Answer: Japan
|
368 |
+
#
|
369 |
+
# In this case, the annotator chose "Japan" as a character sub-span of
|
370 |
+
# the word "Japanese". Since our WordPiece tokenizer does not split
|
371 |
+
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
|
372 |
+
# in SQuAD, but does happen.
|
373 |
+
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
|
374 |
+
|
375 |
+
for new_start in range(input_start, input_end + 1):
|
376 |
+
for new_end in range(input_end, new_start - 1, -1):
|
377 |
+
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
|
378 |
+
if text_span == tok_answer_text:
|
379 |
+
return (new_start, new_end)
|
380 |
+
|
381 |
+
return (input_start, input_end)
|
382 |
+
|
383 |
+
|
384 |
+
def _check_is_max_context(doc_spans, cur_span_index, position):
|
385 |
+
"""Check if this is the 'max context' doc span for the token."""
|
386 |
+
|
387 |
+
# Because of the sliding window approach taken to scoring documents, a single
|
388 |
+
# token can appear in multiple documents. E.g.
|
389 |
+
# Doc: the man went to the store and bought a gallon of milk
|
390 |
+
# Span A: the man went to the
|
391 |
+
# Span B: to the store and bought
|
392 |
+
# Span C: and bought a gallon of
|
393 |
+
# ...
|
394 |
+
#
|
395 |
+
# Now the word 'bought' will have two scores from spans B and C. We only
|
396 |
+
# want to consider the score with "maximum context", which we define as
|
397 |
+
# the *minimum* of its left and right context (the *sum* of left and
|
398 |
+
# right context will always be the same, of course).
|
399 |
+
#
|
400 |
+
# In the example the maximum context for 'bought' would be span C since
|
401 |
+
# it has 1 left context and 3 right context, while span B has 4 left context
|
402 |
+
# and 0 right context.
|
403 |
+
best_score = None
|
404 |
+
best_span_index = None
|
405 |
+
for (span_index, doc_span) in enumerate(doc_spans):
|
406 |
+
end = doc_span.start + doc_span.length - 1
|
407 |
+
if position < doc_span.start:
|
408 |
+
continue
|
409 |
+
if position > end:
|
410 |
+
continue
|
411 |
+
num_left_context = position - doc_span.start
|
412 |
+
num_right_context = end - position
|
413 |
+
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
|
414 |
+
if best_score is None or score > best_score:
|
415 |
+
best_score = score
|
416 |
+
best_span_index = span_index
|
417 |
+
|
418 |
+
return cur_span_index == best_span_index
|
419 |
+
|
420 |
+
|
421 |
+
RawResult = collections.namedtuple("RawResult",
|
422 |
+
["unique_id", "start_logits", "end_logits"])
|
423 |
+
|
424 |
+
|
425 |
+
def get_answers(examples, features, results, args):
|
426 |
+
predictions = collections.defaultdict(list) #it is possible that one example corresponds to multiple features
|
427 |
+
Prediction = collections.namedtuple('Prediction', ['text', 'start_logit', 'end_logit'])
|
428 |
+
|
429 |
+
if args.version_2_with_negative:
|
430 |
+
null_vals = collections.defaultdict(lambda: (float("inf"),0,0))
|
431 |
+
for ex, feat, result in match_results(examples, features, results):
|
432 |
+
start_indices = _get_best_indices(result.start_logits, args.n_best_size)
|
433 |
+
end_indices = _get_best_indices(result.end_logits, args.n_best_size)
|
434 |
+
prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices, feat, result, args)
|
435 |
+
prelim_predictions = sorted(
|
436 |
+
prelim_predictions,
|
437 |
+
key=lambda x: (x.start_logit + x.end_logit),
|
438 |
+
reverse=True)
|
439 |
+
if args.version_2_with_negative:
|
440 |
+
score = result.start_logits[0] + result.end_logits[0]
|
441 |
+
if score < null_vals[ex.qas_id][0]:
|
442 |
+
null_vals[ex.qas_id] = (score, result.start_logits[0], result.end_logits[0])
|
443 |
+
|
444 |
+
curr_predictions = []
|
445 |
+
seen_predictions = []
|
446 |
+
for pred in prelim_predictions:
|
447 |
+
if len(curr_predictions) == args.n_best_size:
|
448 |
+
break
|
449 |
+
if pred.start_index > 0: # this is a non-null prediction TODO: this probably is irrelevant
|
450 |
+
final_text = get_answer_text(ex, feat, pred, args)
|
451 |
+
if final_text in seen_predictions:
|
452 |
+
continue
|
453 |
+
else:
|
454 |
+
final_text = ""
|
455 |
+
|
456 |
+
seen_predictions.append(final_text)
|
457 |
+
curr_predictions.append(Prediction(final_text, pred.start_logit, pred.end_logit))
|
458 |
+
predictions[ex.qas_id] += curr_predictions
|
459 |
+
|
460 |
+
#Add empty prediction
|
461 |
+
if args.version_2_with_negative:
|
462 |
+
for qas_id in predictions.keys():
|
463 |
+
predictions[qas_id].append(Prediction('',
|
464 |
+
null_vals[ex.qas_id][1],
|
465 |
+
null_vals[ex.qas_id][2]))
|
466 |
+
|
467 |
+
|
468 |
+
nbest_answers = collections.defaultdict(list)
|
469 |
+
answers = {}
|
470 |
+
for qas_id, preds in predictions.items():
|
471 |
+
nbest = sorted(
|
472 |
+
preds,
|
473 |
+
key=lambda x: (x.start_logit + x.end_logit),
|
474 |
+
reverse=True)[:args.n_best_size]
|
475 |
+
|
476 |
+
# In very rare edge cases we could only have single null prediction.
|
477 |
+
# So we just create a nonce prediction in this case to avoid failure.
|
478 |
+
if not nbest:
|
479 |
+
nbest.append(Prediction(text="empty", start_logit=0.0, end_logit=0.0))
|
480 |
+
|
481 |
+
total_scores = []
|
482 |
+
best_non_null_entry = None
|
483 |
+
for entry in nbest:
|
484 |
+
total_scores.append(entry.start_logit + entry.end_logit)
|
485 |
+
if not best_non_null_entry and entry.text:
|
486 |
+
best_non_null_entry = entry
|
487 |
+
probs = _compute_softmax(total_scores)
|
488 |
+
for (i, entry) in enumerate(nbest):
|
489 |
+
output = collections.OrderedDict()
|
490 |
+
output["text"] = entry.text
|
491 |
+
output["probability"] = probs[i]
|
492 |
+
output["start_logit"] = entry.start_logit
|
493 |
+
output["end_logit"] = entry.end_logit
|
494 |
+
nbest_answers[qas_id].append(output)
|
495 |
+
if args.version_2_with_negative:
|
496 |
+
score_diff = null_vals[qas_id][0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit
|
497 |
+
if score_diff > args.null_score_diff_threshold:
|
498 |
+
answers[qas_id] = ""
|
499 |
+
else:
|
500 |
+
answers[qas_id] = best_non_null_entry.text
|
501 |
+
else:
|
502 |
+
answers[qas_id] = nbest_answers[qas_id][0]['text']
|
503 |
+
|
504 |
+
return answers, nbest_answers
|
505 |
+
|
506 |
+
def get_answer_text(example, feature, pred, args):
|
507 |
+
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
|
508 |
+
orig_doc_start = feature.token_to_orig_map[pred.start_index]
|
509 |
+
orig_doc_end = feature.token_to_orig_map[pred.end_index]
|
510 |
+
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
|
511 |
+
tok_text = " ".join(tok_tokens)
|
512 |
+
|
513 |
+
# De-tokenize WordPieces that have been split off.
|
514 |
+
tok_text = tok_text.replace(" ##", "")
|
515 |
+
tok_text = tok_text.replace("##", "")
|
516 |
+
|
517 |
+
# Clean whitespace
|
518 |
+
tok_text = tok_text.strip()
|
519 |
+
tok_text = " ".join(tok_text.split())
|
520 |
+
orig_text = " ".join(orig_tokens)
|
521 |
+
|
522 |
+
final_text = get_final_text(tok_text, orig_text, args.do_lower_case, args.verbose_logging)
|
523 |
+
return final_text
|
524 |
+
|
525 |
+
def get_valid_prelim_predictions(start_indices, end_indices, feature, result, args):
|
526 |
+
|
527 |
+
_PrelimPrediction = collections.namedtuple(
|
528 |
+
"PrelimPrediction",
|
529 |
+
["start_index", "end_index", "start_logit", "end_logit"])
|
530 |
+
prelim_predictions = []
|
531 |
+
for start_index in start_indices:
|
532 |
+
for end_index in end_indices:
|
533 |
+
if start_index >= len(feature.tokens):
|
534 |
+
continue
|
535 |
+
if end_index >= len(feature.tokens):
|
536 |
+
continue
|
537 |
+
if start_index not in feature.token_to_orig_map:
|
538 |
+
continue
|
539 |
+
if end_index not in feature.token_to_orig_map:
|
540 |
+
continue
|
541 |
+
if not feature.token_is_max_context.get(start_index, False):
|
542 |
+
continue
|
543 |
+
if end_index < start_index:
|
544 |
+
continue
|
545 |
+
length = end_index - start_index + 1
|
546 |
+
if length > args.max_answer_length:
|
547 |
+
continue
|
548 |
+
prelim_predictions.append(
|
549 |
+
_PrelimPrediction(
|
550 |
+
start_index=start_index,
|
551 |
+
end_index=end_index,
|
552 |
+
start_logit=result.start_logits[start_index],
|
553 |
+
end_logit=result.end_logits[end_index]))
|
554 |
+
return prelim_predictions
|
555 |
+
|
556 |
+
def match_results(examples, features, results):
|
557 |
+
unique_f_ids = set([f.unique_id for f in features])
|
558 |
+
unique_r_ids = set([r.unique_id for r in results])
|
559 |
+
matching_ids = unique_f_ids & unique_r_ids
|
560 |
+
features = [f for f in features if f.unique_id in matching_ids]
|
561 |
+
results = [r for r in results if r.unique_id in matching_ids]
|
562 |
+
features.sort(key=lambda x: x.unique_id)
|
563 |
+
results.sort(key=lambda x: x.unique_id)
|
564 |
+
|
565 |
+
for f, r in zip(features, results): #original code assumes strict ordering of examples. TODO: rewrite this
|
566 |
+
yield examples[f.example_index], f, r
|
567 |
+
|
568 |
+
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
|
569 |
+
"""Project the tokenized prediction back to the original text."""
|
570 |
+
|
571 |
+
# When we created the data, we kept track of the alignment between original
|
572 |
+
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
|
573 |
+
# now `orig_text` contains the span of our original text corresponding to the
|
574 |
+
# span that we predicted.
|
575 |
+
#
|
576 |
+
# However, `orig_text` may contain extra characters that we don't want in
|
577 |
+
# our prediction.
|
578 |
+
#
|
579 |
+
# For example, let's say:
|
580 |
+
# pred_text = steve smith
|
581 |
+
# orig_text = Steve Smith's
|
582 |
+
#
|
583 |
+
# We don't want to return `orig_text` because it contains the extra "'s".
|
584 |
+
#
|
585 |
+
# We don't want to return `pred_text` because it's already been normalized
|
586 |
+
# (the SQuAD eval script also does punctuation stripping/lower casing but
|
587 |
+
# our tokenizer does additional normalization like stripping accent
|
588 |
+
# characters).
|
589 |
+
#
|
590 |
+
# What we really want to return is "Steve Smith".
|
591 |
+
#
|
592 |
+
# Therefore, we have to apply a semi-complicated alignment heruistic between
|
593 |
+
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
|
594 |
+
# can fail in certain cases in which case we just return `orig_text`.
|
595 |
+
|
596 |
+
def _strip_spaces(text):
|
597 |
+
ns_chars = []
|
598 |
+
ns_to_s_map = collections.OrderedDict()
|
599 |
+
for (i, c) in enumerate(text):
|
600 |
+
if c == " ":
|
601 |
+
continue
|
602 |
+
ns_to_s_map[len(ns_chars)] = i
|
603 |
+
ns_chars.append(c)
|
604 |
+
ns_text = "".join(ns_chars)
|
605 |
+
return (ns_text, ns_to_s_map)
|
606 |
+
|
607 |
+
# We first tokenize `orig_text`, strip whitespace from the result
|
608 |
+
# and `pred_text`, and check if they are the same length. If they are
|
609 |
+
# NOT the same length, the heuristic has failed. If they are the same
|
610 |
+
# length, we assume the characters are one-to-one aligned.
|
611 |
+
|
612 |
+
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
|
613 |
+
|
614 |
+
tok_text = " ".join(tokenizer.tokenize(orig_text))
|
615 |
+
|
616 |
+
start_position = tok_text.find(pred_text)
|
617 |
+
if start_position == -1:
|
618 |
+
if verbose_logging:
|
619 |
+
logger.info(
|
620 |
+
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
|
621 |
+
return orig_text
|
622 |
+
end_position = start_position + len(pred_text) - 1
|
623 |
+
|
624 |
+
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
|
625 |
+
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
|
626 |
+
|
627 |
+
if len(orig_ns_text) != len(tok_ns_text):
|
628 |
+
if verbose_logging:
|
629 |
+
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
|
630 |
+
orig_ns_text, tok_ns_text)
|
631 |
+
return orig_text
|
632 |
+
|
633 |
+
# We then project the characters in `pred_text` back to `orig_text` using
|
634 |
+
# the character-to-character alignment.
|
635 |
+
tok_s_to_ns_map = {}
|
636 |
+
for (i, tok_index) in tok_ns_to_s_map.items():
|
637 |
+
tok_s_to_ns_map[tok_index] = i
|
638 |
+
|
639 |
+
orig_start_position = None
|
640 |
+
if start_position in tok_s_to_ns_map:
|
641 |
+
ns_start_position = tok_s_to_ns_map[start_position]
|
642 |
+
if ns_start_position in orig_ns_to_s_map:
|
643 |
+
orig_start_position = orig_ns_to_s_map[ns_start_position]
|
644 |
+
|
645 |
+
if orig_start_position is None:
|
646 |
+
if verbose_logging:
|
647 |
+
logger.info("Couldn't map start position")
|
648 |
+
return orig_text
|
649 |
+
|
650 |
+
orig_end_position = None
|
651 |
+
if end_position in tok_s_to_ns_map:
|
652 |
+
ns_end_position = tok_s_to_ns_map[end_position]
|
653 |
+
if ns_end_position in orig_ns_to_s_map:
|
654 |
+
orig_end_position = orig_ns_to_s_map[ns_end_position]
|
655 |
+
|
656 |
+
if orig_end_position is None:
|
657 |
+
if verbose_logging:
|
658 |
+
logger.info("Couldn't map end position")
|
659 |
+
return orig_text
|
660 |
+
|
661 |
+
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
|
662 |
+
return output_text
|
663 |
+
|
664 |
+
|
665 |
+
def _get_best_indices(logits, n_best_size):
|
666 |
+
"""Get the n-best logits from a list."""
|
667 |
+
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
|
668 |
+
|
669 |
+
best_indices = []
|
670 |
+
for i in range(len(index_and_score)):
|
671 |
+
if i >= n_best_size:
|
672 |
+
break
|
673 |
+
best_indices.append(index_and_score[i][0])
|
674 |
+
return best_indices
|
675 |
+
|
676 |
+
|
677 |
+
def _compute_softmax(scores):
|
678 |
+
"""Compute softmax probability over raw logits."""
|
679 |
+
if not scores:
|
680 |
+
return []
|
681 |
+
|
682 |
+
max_score = None
|
683 |
+
for score in scores:
|
684 |
+
if max_score is None or score > max_score:
|
685 |
+
max_score = score
|
686 |
+
|
687 |
+
exp_scores = []
|
688 |
+
total_sum = 0.0
|
689 |
+
for score in scores:
|
690 |
+
x = math.exp(score - max_score)
|
691 |
+
exp_scores.append(x)
|
692 |
+
total_sum += x
|
693 |
+
|
694 |
+
probs = []
|
695 |
+
for score in exp_scores:
|
696 |
+
probs.append(score / total_sum)
|
697 |
+
return probs
|
698 |
+
|
699 |
+
|
700 |
+
|
701 |
+
from apex.multi_tensor_apply import multi_tensor_applier
|
702 |
+
class GradientClipper:
|
703 |
+
"""
|
704 |
+
Clips gradient norm of an iterable of parameters.
|
705 |
+
"""
|
706 |
+
def __init__(self, max_grad_norm):
|
707 |
+
self.max_norm = max_grad_norm
|
708 |
+
if multi_tensor_applier.available:
|
709 |
+
import amp_C
|
710 |
+
self._overflow_buf = torch.cuda.IntTensor([0])
|
711 |
+
self.multi_tensor_l2norm = amp_C.multi_tensor_l2norm
|
712 |
+
self.multi_tensor_scale = amp_C.multi_tensor_scale
|
713 |
+
else:
|
714 |
+
raise RuntimeError('Gradient clipping requires cuda extensions')
|
715 |
+
|
716 |
+
def step(self, parameters):
|
717 |
+
l = [p.grad for p in parameters if p.grad is not None]
|
718 |
+
total_norm, _ = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [l], False)
|
719 |
+
total_norm = total_norm.item()
|
720 |
+
if (total_norm == float('inf')): return
|
721 |
+
clip_coef = self.max_norm / (total_norm + 1e-6)
|
722 |
+
if clip_coef < 1:
|
723 |
+
multi_tensor_applier(self.multi_tensor_scale, self._overflow_buf, [l, l], clip_coef)
|
724 |
+
|
725 |
+
|
726 |
+
def main():
|
727 |
+
parser = argparse.ArgumentParser()
|
728 |
+
|
729 |
+
## Required parameters
|
730 |
+
parser.add_argument("--bert_model", default=None, type=str, required=True,
|
731 |
+
help="Bert pre-trained model selected in the list: bert-base-uncased, "
|
732 |
+
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
|
733 |
+
"bert-base-multilingual-cased, bert-base-chinese.")
|
734 |
+
parser.add_argument("--output_dir", default=None, type=str, required=True,
|
735 |
+
help="The output directory where the model checkpoints and predictions will be written.")
|
736 |
+
parser.add_argument("--init_checkpoint",
|
737 |
+
default=None,
|
738 |
+
type=str,
|
739 |
+
required=True,
|
740 |
+
help="The checkpoint file from pretraining")
|
741 |
+
|
742 |
+
## Other parameters
|
743 |
+
parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json")
|
744 |
+
parser.add_argument("--predict_file", default=None, type=str,
|
745 |
+
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
|
746 |
+
parser.add_argument("--max_seq_length", default=384, type=int,
|
747 |
+
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
|
748 |
+
"longer than this will be truncated, and sequences shorter than this will be padded.")
|
749 |
+
parser.add_argument("--doc_stride", default=128, type=int,
|
750 |
+
help="When splitting up a long document into chunks, how much stride to take between chunks.")
|
751 |
+
parser.add_argument("--max_query_length", default=64, type=int,
|
752 |
+
help="The maximum number of tokens for the question. Questions longer than this will "
|
753 |
+
"be truncated to this length.")
|
754 |
+
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
|
755 |
+
parser.add_argument("--do_predict", action='store_true', help="Whether to run eval on the dev set.")
|
756 |
+
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
|
757 |
+
parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.")
|
758 |
+
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
|
759 |
+
parser.add_argument("--num_train_epochs", default=3.0, type=float,
|
760 |
+
help="Total number of training epochs to perform.")
|
761 |
+
parser.add_argument("--max_steps", default=-1.0, type=float,
|
762 |
+
help="Total number of training steps to perform.")
|
763 |
+
parser.add_argument("--warmup_proportion", default=0.1, type=float,
|
764 |
+
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% "
|
765 |
+
"of training.")
|
766 |
+
parser.add_argument("--n_best_size", default=20, type=int,
|
767 |
+
help="The total number of n-best predictions to generate in the nbest_predictions.json "
|
768 |
+
"output file.")
|
769 |
+
parser.add_argument("--max_answer_length", default=30, type=int,
|
770 |
+
help="The maximum length of an answer that can be generated. This is needed because the start "
|
771 |
+
"and end predictions are not conditioned on one another.")
|
772 |
+
parser.add_argument("--verbose_logging", action='store_true',
|
773 |
+
help="If true, all of the warnings related to data processing will be printed. "
|
774 |
+
"A number of warnings are expected for a normal SQuAD evaluation.")
|
775 |
+
parser.add_argument("--no_cuda",
|
776 |
+
action='store_true',
|
777 |
+
help="Whether not to use CUDA when available")
|
778 |
+
parser.add_argument('--seed',
|
779 |
+
type=int,
|
780 |
+
default=42,
|
781 |
+
help="random seed for initialization")
|
782 |
+
parser.add_argument('--gradient_accumulation_steps',
|
783 |
+
type=int,
|
784 |
+
default=1,
|
785 |
+
help="Number of updates steps to accumulate before performing a backward/update pass.")
|
786 |
+
parser.add_argument("--do_lower_case",
|
787 |
+
action='store_true',
|
788 |
+
help="Whether to lower case the input text. True for uncased models, False for cased models.")
|
789 |
+
parser.add_argument("--local_rank",
|
790 |
+
type=int,
|
791 |
+
default=os.getenv('LOCAL_RANK', -1),
|
792 |
+
help="local_rank for distributed training on gpus")
|
793 |
+
parser.add_argument('--fp16',
|
794 |
+
default=False,
|
795 |
+
action='store_true',
|
796 |
+
help="Mixed precision training")
|
797 |
+
parser.add_argument('--amp',
|
798 |
+
default=False,
|
799 |
+
action='store_true',
|
800 |
+
help="Mixed precision training")
|
801 |
+
parser.add_argument('--loss_scale',
|
802 |
+
type=float, default=0,
|
803 |
+
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
|
804 |
+
"0 (default value): dynamic loss scaling.\n"
|
805 |
+
"Positive power of 2: static loss scaling value.\n")
|
806 |
+
parser.add_argument('--version_2_with_negative',
|
807 |
+
action='store_true',
|
808 |
+
help='If true, the SQuAD examples contain some that do not have an answer.')
|
809 |
+
parser.add_argument('--null_score_diff_threshold',
|
810 |
+
type=float, default=0.0,
|
811 |
+
help="If null_score - best_non_null is greater than the threshold predict null.")
|
812 |
+
parser.add_argument('--vocab_file',
|
813 |
+
type=str, default=None, required=True,
|
814 |
+
help="Vocabulary mapping/file BERT was pretrainined on")
|
815 |
+
parser.add_argument("--config_file",
|
816 |
+
default=None,
|
817 |
+
type=str,
|
818 |
+
required=True,
|
819 |
+
help="The BERT model config")
|
820 |
+
parser.add_argument('--log_freq',
|
821 |
+
type=int, default=50,
|
822 |
+
help='frequency of logging loss.')
|
823 |
+
parser.add_argument('--json-summary', type=str, default="results/dllogger.json",
|
824 |
+
help='If provided, the json summary will be written to'
|
825 |
+
'the specified file.')
|
826 |
+
parser.add_argument("--eval_script",
|
827 |
+
help="Script to evaluate squad predictions",
|
828 |
+
default="evaluate.py",
|
829 |
+
type=str)
|
830 |
+
parser.add_argument("--do_eval",
|
831 |
+
action='store_true',
|
832 |
+
help="Whether to use evaluate accuracy of predictions")
|
833 |
+
parser.add_argument("--use_env",
|
834 |
+
action='store_true',
|
835 |
+
help="Whether to read local rank from ENVVAR")
|
836 |
+
parser.add_argument('--skip_checkpoint',
|
837 |
+
default=False,
|
838 |
+
action='store_true',
|
839 |
+
help="Whether to save checkpoints")
|
840 |
+
parser.add_argument('--disable-progress-bar',
|
841 |
+
default=False,
|
842 |
+
action='store_true',
|
843 |
+
help='Disable tqdm progress bar')
|
844 |
+
parser.add_argument("--skip_cache",
|
845 |
+
default=False,
|
846 |
+
action='store_true',
|
847 |
+
help="Whether to cache train features")
|
848 |
+
parser.add_argument("--cache_dir",
|
849 |
+
default=None,
|
850 |
+
type=str,
|
851 |
+
help="Location to cache train feaures. Will default to the dataset directory")
|
852 |
+
|
853 |
+
args = parser.parse_args()
|
854 |
+
args.fp16 = args.fp16 or args.amp
|
855 |
+
|
856 |
+
if args.local_rank == -1 or args.no_cuda:
|
857 |
+
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
|
858 |
+
n_gpu = torch.cuda.device_count()
|
859 |
+
else:
|
860 |
+
torch.cuda.set_device(args.local_rank)
|
861 |
+
device = torch.device("cuda", args.local_rank)
|
862 |
+
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
|
863 |
+
torch.distributed.init_process_group(backend='nccl', init_method='env://')
|
864 |
+
n_gpu = 1
|
865 |
+
|
866 |
+
if is_main_process():
|
867 |
+
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
|
868 |
+
filename=args.json_summary),
|
869 |
+
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)])
|
870 |
+
else:
|
871 |
+
dllogger.init(backends=[])
|
872 |
+
|
873 |
+
print("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
|
874 |
+
device, n_gpu, bool(args.local_rank != -1), args.fp16))
|
875 |
+
|
876 |
+
dllogger.log(step="PARAMETER", data={"Config": [str(args)]})
|
877 |
+
|
878 |
+
if args.gradient_accumulation_steps < 1:
|
879 |
+
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
|
880 |
+
args.gradient_accumulation_steps))
|
881 |
+
|
882 |
+
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
|
883 |
+
|
884 |
+
random.seed(args.seed)
|
885 |
+
np.random.seed(args.seed)
|
886 |
+
torch.manual_seed(args.seed)
|
887 |
+
dllogger.log(step="PARAMETER", data={"SEED": args.seed})
|
888 |
+
|
889 |
+
if n_gpu > 0:
|
890 |
+
torch.cuda.manual_seed_all(args.seed)
|
891 |
+
|
892 |
+
if not args.do_train and not args.do_predict:
|
893 |
+
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
|
894 |
+
|
895 |
+
if args.do_train:
|
896 |
+
if not args.train_file:
|
897 |
+
raise ValueError(
|
898 |
+
"If `do_train` is True, then `train_file` must be specified.")
|
899 |
+
if args.do_predict:
|
900 |
+
if not args.predict_file:
|
901 |
+
raise ValueError(
|
902 |
+
"If `do_predict` is True, then `predict_file` must be specified.")
|
903 |
+
|
904 |
+
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and os.listdir(args.output_dir)!=['logfile.txt']:
|
905 |
+
print("WARNING: Output directory {} already exists and is not empty.".format(args.output_dir), os.listdir(args.output_dir))
|
906 |
+
if not os.path.exists(args.output_dir) and is_main_process():
|
907 |
+
os.makedirs(args.output_dir)
|
908 |
+
|
909 |
+
tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large
|
910 |
+
# tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
911 |
+
|
912 |
+
train_examples = None
|
913 |
+
num_train_optimization_steps = None
|
914 |
+
if args.do_train:
|
915 |
+
train_examples = read_squad_examples(
|
916 |
+
input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative)
|
917 |
+
num_train_optimization_steps = int(
|
918 |
+
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
|
919 |
+
if args.local_rank != -1:
|
920 |
+
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
|
921 |
+
|
922 |
+
# Prepare model
|
923 |
+
config = modeling.BertConfig.from_json_file(args.config_file)
|
924 |
+
# Padding for divisibility by 8
|
925 |
+
if config.vocab_size % 8 != 0:
|
926 |
+
config.vocab_size += 8 - (config.vocab_size % 8)
|
927 |
+
|
928 |
+
modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training
|
929 |
+
model = modeling.BertForQuestionAnswering(config)
|
930 |
+
# model = modeling.BertForQuestionAnswering.from_pretrained(args.bert_model,
|
931 |
+
# cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)))
|
932 |
+
dllogger.log(step="PARAMETER", data={"loading_checkpoint": True})
|
933 |
+
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu')["model"], strict=False)
|
934 |
+
dllogger.log(step="PARAMETER", data={"loaded_checkpoint": True})
|
935 |
+
model.to(device)
|
936 |
+
num_weights = sum([p.numel() for p in model.parameters() if p.requires_grad])
|
937 |
+
dllogger.log(step="PARAMETER", data={"model_weights_num":num_weights})
|
938 |
+
|
939 |
+
# Prepare optimizer
|
940 |
+
param_optimizer = list(model.named_parameters())
|
941 |
+
|
942 |
+
# hack to remove pooler, which is not used
|
943 |
+
# thus it produce None grad that break apex
|
944 |
+
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
|
945 |
+
|
946 |
+
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
|
947 |
+
optimizer_grouped_parameters = [
|
948 |
+
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
|
949 |
+
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
950 |
+
]
|
951 |
+
if args.do_train:
|
952 |
+
if args.fp16:
|
953 |
+
try:
|
954 |
+
from apex.optimizers import FusedAdam
|
955 |
+
except ImportError:
|
956 |
+
raise ImportError(
|
957 |
+
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
|
958 |
+
optimizer = FusedAdam(optimizer_grouped_parameters,
|
959 |
+
lr=args.learning_rate,
|
960 |
+
bias_correction=False)
|
961 |
+
|
962 |
+
if args.loss_scale == 0:
|
963 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False,
|
964 |
+
loss_scale="dynamic")
|
965 |
+
else:
|
966 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", keep_batchnorm_fp32=False, loss_scale=args.loss_scale)
|
967 |
+
if args.do_train:
|
968 |
+
scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=num_train_optimization_steps)
|
969 |
+
|
970 |
+
else:
|
971 |
+
optimizer = BertAdam(optimizer_grouped_parameters,
|
972 |
+
lr=args.learning_rate,
|
973 |
+
warmup=args.warmup_proportion,
|
974 |
+
t_total=num_train_optimization_steps)
|
975 |
+
|
976 |
+
if args.local_rank != -1:
|
977 |
+
try:
|
978 |
+
from apex.parallel import DistributedDataParallel as DDP
|
979 |
+
except ImportError:
|
980 |
+
raise ImportError(
|
981 |
+
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
|
982 |
+
|
983 |
+
model = DDP(model)
|
984 |
+
elif n_gpu > 1:
|
985 |
+
model = torch.nn.DataParallel(model)
|
986 |
+
|
987 |
+
global_step = 0
|
988 |
+
if args.do_train:
|
989 |
+
|
990 |
+
if args.cache_dir is None:
|
991 |
+
cached_train_features_file = args.train_file + '_{0}_{1}_{2}_{3}'.format(
|
992 |
+
list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride),
|
993 |
+
str(args.max_query_length))
|
994 |
+
else:
|
995 |
+
cached_train_features_file = args.cache_dir.strip('/') + '/' + args.train_file.split('/')[-1] + '_{0}_{1}_{2}_{3}'.format(
|
996 |
+
list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride),
|
997 |
+
str(args.max_query_length))
|
998 |
+
|
999 |
+
train_features = None
|
1000 |
+
try:
|
1001 |
+
with open(cached_train_features_file, "rb") as reader:
|
1002 |
+
train_features = pickle.load(reader)
|
1003 |
+
except:
|
1004 |
+
train_features = convert_examples_to_features(
|
1005 |
+
examples=train_examples,
|
1006 |
+
tokenizer=tokenizer,
|
1007 |
+
max_seq_length=args.max_seq_length,
|
1008 |
+
doc_stride=args.doc_stride,
|
1009 |
+
max_query_length=args.max_query_length,
|
1010 |
+
is_training=True)
|
1011 |
+
|
1012 |
+
if not args.skip_cache and is_main_process():
|
1013 |
+
dllogger.log(step="PARAMETER", data={"Cached_train features_file": cached_train_features_file})
|
1014 |
+
with open(cached_train_features_file, "wb") as writer:
|
1015 |
+
pickle.dump(train_features, writer)
|
1016 |
+
|
1017 |
+
dllogger.log(step="PARAMETER", data={"train_start": True})
|
1018 |
+
dllogger.log(step="PARAMETER", data={"training_samples": len(train_examples)})
|
1019 |
+
dllogger.log(step="PARAMETER", data={"training_features": len(train_features)})
|
1020 |
+
dllogger.log(step="PARAMETER", data={"train_batch_size":args.train_batch_size})
|
1021 |
+
dllogger.log(step="PARAMETER", data={"steps":num_train_optimization_steps})
|
1022 |
+
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
|
1023 |
+
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
|
1024 |
+
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
|
1025 |
+
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
|
1026 |
+
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
|
1027 |
+
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
|
1028 |
+
all_start_positions, all_end_positions)
|
1029 |
+
if args.local_rank == -1:
|
1030 |
+
train_sampler = RandomSampler(train_data)
|
1031 |
+
else:
|
1032 |
+
train_sampler = DistributedSampler(train_data)
|
1033 |
+
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size * n_gpu)
|
1034 |
+
|
1035 |
+
model.train()
|
1036 |
+
gradClipper = GradientClipper(max_grad_norm=1.0)
|
1037 |
+
final_loss = None
|
1038 |
+
train_start = time.time()
|
1039 |
+
for epoch in range(int(args.num_train_epochs)):
|
1040 |
+
train_iter = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader
|
1041 |
+
for step, batch in enumerate(train_iter):
|
1042 |
+
# Terminate early for benchmarking
|
1043 |
+
|
1044 |
+
if args.max_steps > 0 and global_step > args.max_steps:
|
1045 |
+
break
|
1046 |
+
|
1047 |
+
if n_gpu == 1:
|
1048 |
+
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
|
1049 |
+
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
|
1050 |
+
start_logits, end_logits = model(input_ids, segment_ids, input_mask)
|
1051 |
+
# If we are on multi-GPU, split add a dimension
|
1052 |
+
if len(start_positions.size()) > 1:
|
1053 |
+
start_positions = start_positions.squeeze(-1)
|
1054 |
+
if len(end_positions.size()) > 1:
|
1055 |
+
end_positions = end_positions.squeeze(-1)
|
1056 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
1057 |
+
ignored_index = start_logits.size(1)
|
1058 |
+
start_positions.clamp_(0, ignored_index)
|
1059 |
+
end_positions.clamp_(0, ignored_index)
|
1060 |
+
|
1061 |
+
loss_fct = torch.nn.CrossEntropyLoss(ignore_index=ignored_index)
|
1062 |
+
start_loss = loss_fct(start_logits, start_positions)
|
1063 |
+
end_loss = loss_fct(end_logits, end_positions)
|
1064 |
+
loss = (start_loss + end_loss) / 2
|
1065 |
+
if n_gpu > 1:
|
1066 |
+
loss = loss.mean() # mean() to average on multi-gpu.
|
1067 |
+
if args.gradient_accumulation_steps > 1:
|
1068 |
+
loss = loss / args.gradient_accumulation_steps
|
1069 |
+
if args.fp16:
|
1070 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
1071 |
+
scaled_loss.backward()
|
1072 |
+
else:
|
1073 |
+
loss.backward()
|
1074 |
+
|
1075 |
+
# gradient clipping
|
1076 |
+
gradClipper.step(amp.master_params(optimizer))
|
1077 |
+
|
1078 |
+
if (step + 1) % args.gradient_accumulation_steps == 0:
|
1079 |
+
if args.fp16 :
|
1080 |
+
# modify learning rate with special warm up for BERT which FusedAdam doesn't do
|
1081 |
+
scheduler.step()
|
1082 |
+
optimizer.step()
|
1083 |
+
optimizer.zero_grad()
|
1084 |
+
global_step += 1
|
1085 |
+
|
1086 |
+
final_loss = loss.item()
|
1087 |
+
if step % args.log_freq == 0:
|
1088 |
+
dllogger.log(step=(epoch, global_step,), data={"step_loss": final_loss,
|
1089 |
+
"learning_rate": optimizer.param_groups[0]['lr']})
|
1090 |
+
|
1091 |
+
time_to_train = time.time() - train_start
|
1092 |
+
|
1093 |
+
if args.do_train and is_main_process() and not args.skip_checkpoint:
|
1094 |
+
# Save a trained model and the associated configuration
|
1095 |
+
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
|
1096 |
+
output_model_file = os.path.join(args.output_dir, modeling.WEIGHTS_NAME)
|
1097 |
+
torch.save({"model":model_to_save.state_dict()}, output_model_file)
|
1098 |
+
output_config_file = os.path.join(args.output_dir, modeling.CONFIG_NAME)
|
1099 |
+
with open(output_config_file, 'w') as f:
|
1100 |
+
f.write(model_to_save.config.to_json_string())
|
1101 |
+
|
1102 |
+
if args.do_predict and (args.local_rank == -1 or is_main_process()):
|
1103 |
+
|
1104 |
+
if not args.do_train and args.fp16:
|
1105 |
+
model.half()
|
1106 |
+
|
1107 |
+
eval_examples = read_squad_examples(
|
1108 |
+
input_file=args.predict_file, is_training=False, version_2_with_negative=args.version_2_with_negative)
|
1109 |
+
eval_features = convert_examples_to_features(
|
1110 |
+
examples=eval_examples,
|
1111 |
+
tokenizer=tokenizer,
|
1112 |
+
max_seq_length=args.max_seq_length,
|
1113 |
+
doc_stride=args.doc_stride,
|
1114 |
+
max_query_length=args.max_query_length,
|
1115 |
+
is_training=False)
|
1116 |
+
|
1117 |
+
dllogger.log(step="PARAMETER", data={"infer_start": True})
|
1118 |
+
dllogger.log(step="PARAMETER", data={"eval_samples": len(eval_examples)})
|
1119 |
+
dllogger.log(step="PARAMETER", data={"eval_features": len(eval_features)})
|
1120 |
+
dllogger.log(step="PARAMETER", data={"predict_batch_size": args.predict_batch_size})
|
1121 |
+
|
1122 |
+
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
|
1123 |
+
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
|
1124 |
+
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
|
1125 |
+
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
|
1126 |
+
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
|
1127 |
+
# Run prediction for full data
|
1128 |
+
eval_sampler = SequentialSampler(eval_data)
|
1129 |
+
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)
|
1130 |
+
|
1131 |
+
infer_start = time.time()
|
1132 |
+
model.eval()
|
1133 |
+
all_results = []
|
1134 |
+
dllogger.log(step="PARAMETER", data={"eval_start": True})
|
1135 |
+
for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating", disable=args.disable_progress_bar):
|
1136 |
+
if len(all_results) % 1000 == 0:
|
1137 |
+
dllogger.log(step="PARAMETER", data={"sample_number": len(all_results)})
|
1138 |
+
input_ids = input_ids.to(device)
|
1139 |
+
input_mask = input_mask.to(device)
|
1140 |
+
segment_ids = segment_ids.to(device)
|
1141 |
+
with torch.no_grad():
|
1142 |
+
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
|
1143 |
+
for i, example_index in enumerate(example_indices):
|
1144 |
+
start_logits = batch_start_logits[i].detach().cpu().tolist()
|
1145 |
+
end_logits = batch_end_logits[i].detach().cpu().tolist()
|
1146 |
+
eval_feature = eval_features[example_index.item()]
|
1147 |
+
unique_id = int(eval_feature.unique_id)
|
1148 |
+
all_results.append(RawResult(unique_id=unique_id,
|
1149 |
+
start_logits=start_logits,
|
1150 |
+
end_logits=end_logits))
|
1151 |
+
|
1152 |
+
time_to_infer = time.time() - infer_start
|
1153 |
+
output_prediction_file = os.path.join(args.output_dir, "predictions.json")
|
1154 |
+
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
|
1155 |
+
|
1156 |
+
answers, nbest_answers = get_answers(eval_examples, eval_features, all_results, args)
|
1157 |
+
with open(output_prediction_file, "w") as f:
|
1158 |
+
f.write(json.dumps(answers, indent=4) + "\n")
|
1159 |
+
with open(output_nbest_file, "w") as f:
|
1160 |
+
f.write(json.dumps(nbest_answers, indent=4) + "\n")
|
1161 |
+
|
1162 |
+
# output_null_log_odds_file = os.path.join(args.output_dir, "null_odds.json")
|
1163 |
+
# write_predictions(eval_examples, eval_features, all_results,
|
1164 |
+
# args.n_best_size, args.max_answer_length,
|
1165 |
+
# args.do_lower_case, output_prediction_file,
|
1166 |
+
# output_nbest_file, output_null_log_odds_file, args.verbose_logging,
|
1167 |
+
# args.version_2_with_negative, args.null_score_diff_threshold)
|
1168 |
+
|
1169 |
+
if args.do_eval and is_main_process():
|
1170 |
+
import sys
|
1171 |
+
import subprocess
|
1172 |
+
eval_out = subprocess.check_output([sys.executable, args.eval_script,
|
1173 |
+
args.predict_file, args.output_dir + "/predictions.json"])
|
1174 |
+
scores = str(eval_out).strip()
|
1175 |
+
exact_match = float(scores.split(":")[1].split(",")[0])
|
1176 |
+
f1 = float(scores.split(":")[2].split("}")[0])
|
1177 |
+
|
1178 |
+
if args.do_train:
|
1179 |
+
gpu_count = n_gpu
|
1180 |
+
if torch.distributed.is_initialized():
|
1181 |
+
gpu_count = torch.distributed.get_world_size()
|
1182 |
+
|
1183 |
+
if args.max_steps == -1:
|
1184 |
+
dllogger.log(step=tuple(), data={"e2e_train_time": time_to_train,
|
1185 |
+
"training_sequences_per_second": len(train_features) * args.num_train_epochs / time_to_train,
|
1186 |
+
"final_loss": final_loss})
|
1187 |
+
else:
|
1188 |
+
dllogger.log(step=tuple(), data={"e2e_train_time": time_to_train,
|
1189 |
+
"training_sequences_per_second": args.train_batch_size * args.gradient_accumulation_steps \
|
1190 |
+
* args.max_steps * gpu_count / time_to_train,
|
1191 |
+
"final_loss": final_loss})
|
1192 |
+
if args.do_predict and is_main_process():
|
1193 |
+
dllogger.log(step=tuple(), data={"e2e_inference_time": time_to_infer,
|
1194 |
+
"inference_sequences_per_second": len(eval_features) / time_to_infer})
|
1195 |
+
if args.do_eval and is_main_process():
|
1196 |
+
dllogger.log(step=tuple(), data={"exact_match": exact_match, "F1": f1})
|
1197 |
+
|
1198 |
+
if __name__ == "__main__":
|
1199 |
+
main()
|
1200 |
+
dllogger.flush()
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/gpu_migration/nlp/bert/run_swag.py
ADDED
@@ -0,0 +1,570 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
|
3 |
+
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""BERT finetuning runner."""
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import csv
|
20 |
+
import logging
|
21 |
+
import os
|
22 |
+
import random
|
23 |
+
import sys
|
24 |
+
from io import open
|
25 |
+
|
26 |
+
import numpy as np
|
27 |
+
import torch
|
28 |
+
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
|
29 |
+
TensorDataset)
|
30 |
+
from torch.utils.data.distributed import DistributedSampler
|
31 |
+
from tqdm import tqdm, trange
|
32 |
+
|
33 |
+
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
|
34 |
+
from modeling import BertForMultipleChoice, BertConfig, WEIGHTS_NAME, CONFIG_NAME
|
35 |
+
from optimization import BertAdam, warmup_linear
|
36 |
+
from tokenization import BertTokenizer
|
37 |
+
|
38 |
+
torch._C._jit_set_profiling_mode(False)
|
39 |
+
torch._C._jit_set_profiling_executor(False)
|
40 |
+
|
41 |
+
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
|
42 |
+
datefmt = '%m/%d/%Y %H:%M:%S',
|
43 |
+
level = logging.INFO)
|
44 |
+
logger = logging.getLogger(__name__)
|
45 |
+
|
46 |
+
|
47 |
+
class SwagExample(object):
|
48 |
+
"""A single training/test example for the SWAG dataset."""
|
49 |
+
def __init__(self,
|
50 |
+
swag_id,
|
51 |
+
context_sentence,
|
52 |
+
start_ending,
|
53 |
+
ending_0,
|
54 |
+
ending_1,
|
55 |
+
ending_2,
|
56 |
+
ending_3,
|
57 |
+
label = None):
|
58 |
+
self.swag_id = swag_id
|
59 |
+
self.context_sentence = context_sentence
|
60 |
+
self.start_ending = start_ending
|
61 |
+
self.endings = [
|
62 |
+
ending_0,
|
63 |
+
ending_1,
|
64 |
+
ending_2,
|
65 |
+
ending_3,
|
66 |
+
]
|
67 |
+
self.label = label
|
68 |
+
|
69 |
+
def __str__(self):
|
70 |
+
return self.__repr__()
|
71 |
+
|
72 |
+
def __repr__(self):
|
73 |
+
l = [
|
74 |
+
"swag_id: {}".format(self.swag_id),
|
75 |
+
"context_sentence: {}".format(self.context_sentence),
|
76 |
+
"start_ending: {}".format(self.start_ending),
|
77 |
+
"ending_0: {}".format(self.endings[0]),
|
78 |
+
"ending_1: {}".format(self.endings[1]),
|
79 |
+
"ending_2: {}".format(self.endings[2]),
|
80 |
+
"ending_3: {}".format(self.endings[3]),
|
81 |
+
]
|
82 |
+
|
83 |
+
if self.label is not None:
|
84 |
+
l.append("label: {}".format(self.label))
|
85 |
+
|
86 |
+
return ", ".join(l)
|
87 |
+
|
88 |
+
|
89 |
+
class InputFeatures(object):
|
90 |
+
def __init__(self,
|
91 |
+
example_id,
|
92 |
+
choices_features,
|
93 |
+
label
|
94 |
+
|
95 |
+
):
|
96 |
+
self.example_id = example_id
|
97 |
+
self.choices_features = [
|
98 |
+
{
|
99 |
+
'input_ids': input_ids,
|
100 |
+
'input_mask': input_mask,
|
101 |
+
'segment_ids': segment_ids
|
102 |
+
}
|
103 |
+
for _, input_ids, input_mask, segment_ids in choices_features
|
104 |
+
]
|
105 |
+
self.label = label
|
106 |
+
|
107 |
+
|
108 |
+
def read_swag_examples(input_file, is_training):
|
109 |
+
with open(input_file, 'r', encoding='utf-8') as f:
|
110 |
+
reader = csv.reader(f)
|
111 |
+
lines = []
|
112 |
+
for line in reader:
|
113 |
+
if sys.version_info[0] == 2:
|
114 |
+
line = list(unicode(cell, 'utf-8') for cell in line)
|
115 |
+
lines.append(line)
|
116 |
+
|
117 |
+
if is_training and lines[0][-1] != 'label':
|
118 |
+
raise ValueError(
|
119 |
+
"For training, the input file must contain a label column."
|
120 |
+
)
|
121 |
+
|
122 |
+
examples = [
|
123 |
+
SwagExample(
|
124 |
+
swag_id = line[2],
|
125 |
+
context_sentence = line[4],
|
126 |
+
start_ending = line[5], # in the swag dataset, the
|
127 |
+
# common beginning of each
|
128 |
+
# choice is stored in "sent2".
|
129 |
+
ending_0 = line[7],
|
130 |
+
ending_1 = line[8],
|
131 |
+
ending_2 = line[9],
|
132 |
+
ending_3 = line[10],
|
133 |
+
label = int(line[11]) if is_training else None
|
134 |
+
) for line in lines[1:] # we skip the line with the column names
|
135 |
+
]
|
136 |
+
|
137 |
+
return examples
|
138 |
+
|
139 |
+
def convert_examples_to_features(examples, tokenizer, max_seq_length,
|
140 |
+
is_training):
|
141 |
+
"""Loads a data file into a list of `InputBatch`s."""
|
142 |
+
|
143 |
+
# Swag is a multiple choice task. To perform this task using Bert,
|
144 |
+
# we will use the formatting proposed in "Improving Language
|
145 |
+
# Understanding by Generative Pre-Training" and suggested by
|
146 |
+
# @jacobdevlin-google in this issue
|
147 |
+
# https://github.com/google-research/bert/issues/38.
|
148 |
+
#
|
149 |
+
# Each choice will correspond to a sample on which we run the
|
150 |
+
# inference. For a given Swag example, we will create the 4
|
151 |
+
# following inputs:
|
152 |
+
# - [CLS] context [SEP] choice_1 [SEP]
|
153 |
+
# - [CLS] context [SEP] choice_2 [SEP]
|
154 |
+
# - [CLS] context [SEP] choice_3 [SEP]
|
155 |
+
# - [CLS] context [SEP] choice_4 [SEP]
|
156 |
+
# The model will output a single value for each input. To get the
|
157 |
+
# final decision of the model, we will run a softmax over these 4
|
158 |
+
# outputs.
|
159 |
+
features = []
|
160 |
+
for example_index, example in enumerate(examples):
|
161 |
+
context_tokens = tokenizer.tokenize(example.context_sentence)
|
162 |
+
start_ending_tokens = tokenizer.tokenize(example.start_ending)
|
163 |
+
|
164 |
+
choices_features = []
|
165 |
+
for ending_index, ending in enumerate(example.endings):
|
166 |
+
# We create a copy of the context tokens in order to be
|
167 |
+
# able to shrink it according to ending_tokens
|
168 |
+
context_tokens_choice = context_tokens[:]
|
169 |
+
ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
|
170 |
+
# Modifies `context_tokens_choice` and `ending_tokens` in
|
171 |
+
# place so that the total length is less than the
|
172 |
+
# specified length. Account for [CLS], [SEP], [SEP] with
|
173 |
+
# "- 3"
|
174 |
+
_truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3)
|
175 |
+
|
176 |
+
tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"]
|
177 |
+
segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1)
|
178 |
+
|
179 |
+
input_ids = tokenizer.convert_tokens_to_ids(tokens)
|
180 |
+
input_mask = [1] * len(input_ids)
|
181 |
+
|
182 |
+
# Zero-pad up to the sequence length.
|
183 |
+
padding = [0] * (max_seq_length - len(input_ids))
|
184 |
+
input_ids += padding
|
185 |
+
input_mask += padding
|
186 |
+
segment_ids += padding
|
187 |
+
|
188 |
+
assert len(input_ids) == max_seq_length
|
189 |
+
assert len(input_mask) == max_seq_length
|
190 |
+
assert len(segment_ids) == max_seq_length
|
191 |
+
|
192 |
+
choices_features.append((tokens, input_ids, input_mask, segment_ids))
|
193 |
+
|
194 |
+
label = example.label
|
195 |
+
if example_index < 5:
|
196 |
+
logger.info("*** Example ***")
|
197 |
+
logger.info("swag_id: {}".format(example.swag_id))
|
198 |
+
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
|
199 |
+
logger.info("choice: {}".format(choice_idx))
|
200 |
+
logger.info("tokens: {}".format(' '.join(tokens)))
|
201 |
+
logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
|
202 |
+
logger.info("input_mask: {}".format(' '.join(map(str, input_mask))))
|
203 |
+
logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids))))
|
204 |
+
if is_training:
|
205 |
+
logger.info("label: {}".format(label))
|
206 |
+
|
207 |
+
features.append(
|
208 |
+
InputFeatures(
|
209 |
+
example_id = example.swag_id,
|
210 |
+
choices_features = choices_features,
|
211 |
+
label = label
|
212 |
+
)
|
213 |
+
)
|
214 |
+
|
215 |
+
return features
|
216 |
+
|
217 |
+
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
|
218 |
+
"""Truncates a sequence pair in place to the maximum length."""
|
219 |
+
|
220 |
+
# This is a simple heuristic which will always truncate the longer sequence
|
221 |
+
# one token at a time. This makes more sense than truncating an equal percent
|
222 |
+
# of tokens from each, since if one sequence is very short then each token
|
223 |
+
# that's truncated likely contains more information than a longer sequence.
|
224 |
+
while True:
|
225 |
+
total_length = len(tokens_a) + len(tokens_b)
|
226 |
+
if total_length <= max_length:
|
227 |
+
break
|
228 |
+
if len(tokens_a) > len(tokens_b):
|
229 |
+
tokens_a.pop()
|
230 |
+
else:
|
231 |
+
tokens_b.pop()
|
232 |
+
|
233 |
+
def accuracy(out, labels):
|
234 |
+
outputs = np.argmax(out, axis=1)
|
235 |
+
return np.sum(outputs == labels)
|
236 |
+
|
237 |
+
def select_field(features, field):
|
238 |
+
return [
|
239 |
+
[
|
240 |
+
choice[field]
|
241 |
+
for choice in feature.choices_features
|
242 |
+
]
|
243 |
+
for feature in features
|
244 |
+
]
|
245 |
+
|
246 |
+
def main():
|
247 |
+
parser = argparse.ArgumentParser()
|
248 |
+
|
249 |
+
## Required parameters
|
250 |
+
parser.add_argument("--data_dir",
|
251 |
+
default=None,
|
252 |
+
type=str,
|
253 |
+
required=True,
|
254 |
+
help="The input data dir. Should contain the .csv files (or other data files) for the task.")
|
255 |
+
parser.add_argument("--bert_model", default=None, type=str, required=True,
|
256 |
+
help="Bert pre-trained model selected in the list: bert-base-uncased, "
|
257 |
+
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
|
258 |
+
"bert-base-multilingual-cased, bert-base-chinese.")
|
259 |
+
parser.add_argument("--output_dir",
|
260 |
+
default=None,
|
261 |
+
type=str,
|
262 |
+
required=True,
|
263 |
+
help="The output directory where the model checkpoints will be written.")
|
264 |
+
parser.add_argument("--init_checkpoint",
|
265 |
+
default=None,
|
266 |
+
type=str,
|
267 |
+
required=True,
|
268 |
+
help="The checkpoint file from pretraining")
|
269 |
+
|
270 |
+
## Other parameters
|
271 |
+
parser.add_argument("--max_seq_length",
|
272 |
+
default=128,
|
273 |
+
type=int,
|
274 |
+
help="The maximum total input sequence length after WordPiece tokenization. \n"
|
275 |
+
"Sequences longer than this will be truncated, and sequences shorter \n"
|
276 |
+
"than this will be padded.")
|
277 |
+
parser.add_argument("--do_train",
|
278 |
+
action='store_true',
|
279 |
+
help="Whether to run training.")
|
280 |
+
parser.add_argument("--do_eval",
|
281 |
+
action='store_true',
|
282 |
+
help="Whether to run eval on the dev set.")
|
283 |
+
parser.add_argument("--do_lower_case",
|
284 |
+
action='store_true',
|
285 |
+
help="Set this flag if you are using an uncased model.")
|
286 |
+
parser.add_argument("--train_batch_size",
|
287 |
+
default=32,
|
288 |
+
type=int,
|
289 |
+
help="Total batch size for training.")
|
290 |
+
parser.add_argument("--eval_batch_size",
|
291 |
+
default=8,
|
292 |
+
type=int,
|
293 |
+
help="Total batch size for eval.")
|
294 |
+
parser.add_argument("--learning_rate",
|
295 |
+
default=5e-5,
|
296 |
+
type=float,
|
297 |
+
help="The initial learning rate for Adam.")
|
298 |
+
parser.add_argument("--num_train_epochs",
|
299 |
+
default=3.0,
|
300 |
+
type=float,
|
301 |
+
help="Total number of training epochs to perform.")
|
302 |
+
parser.add_argument("--max_steps", default=-1.0, type=float,
|
303 |
+
help="Total number of training steps to perform.")
|
304 |
+
parser.add_argument("--warmup_proportion",
|
305 |
+
default=0.1,
|
306 |
+
type=float,
|
307 |
+
help="Proportion of training to perform linear learning rate warmup for. "
|
308 |
+
"E.g., 0.1 = 10%% of training.")
|
309 |
+
parser.add_argument("--no_cuda",
|
310 |
+
action='store_true',
|
311 |
+
help="Whether not to use CUDA when available")
|
312 |
+
parser.add_argument("--local_rank",
|
313 |
+
type=int,
|
314 |
+
default=-1,
|
315 |
+
help="local_rank for distributed training on gpus")
|
316 |
+
parser.add_argument('--seed',
|
317 |
+
type=int,
|
318 |
+
default=42,
|
319 |
+
help="random seed for initialization")
|
320 |
+
parser.add_argument('--gradient_accumulation_steps',
|
321 |
+
type=int,
|
322 |
+
default=1,
|
323 |
+
help="Number of updates steps to accumulate before performing a backward/update pass.")
|
324 |
+
parser.add_argument('--fp16',
|
325 |
+
default=False,
|
326 |
+
action='store_true',
|
327 |
+
help="Mixed precision training")
|
328 |
+
parser.add_argument('--amp',
|
329 |
+
default=False,
|
330 |
+
action='store_true',
|
331 |
+
help="Mixed precision training")
|
332 |
+
parser.add_argument('--loss_scale',
|
333 |
+
type=float, default=0,
|
334 |
+
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
|
335 |
+
"0 (default value): dynamic loss scaling.\n"
|
336 |
+
"Positive power of 2: static loss scaling value.\n")
|
337 |
+
|
338 |
+
args = parser.parse_args()
|
339 |
+
args.fp16 = args.fp16 or args.amp
|
340 |
+
|
341 |
+
if args.local_rank == -1 or args.no_cuda:
|
342 |
+
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
|
343 |
+
n_gpu = torch.cuda.device_count()
|
344 |
+
else:
|
345 |
+
torch.cuda.set_device(args.local_rank)
|
346 |
+
device = torch.device("cuda", args.local_rank)
|
347 |
+
n_gpu = 1
|
348 |
+
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
|
349 |
+
torch.distributed.init_process_group(backend='nccl')
|
350 |
+
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
|
351 |
+
device, n_gpu, bool(args.local_rank != -1), args.fp16))
|
352 |
+
|
353 |
+
if args.gradient_accumulation_steps < 1:
|
354 |
+
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
|
355 |
+
args.gradient_accumulation_steps))
|
356 |
+
|
357 |
+
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
|
358 |
+
|
359 |
+
random.seed(args.seed)
|
360 |
+
np.random.seed(args.seed)
|
361 |
+
torch.manual_seed(args.seed)
|
362 |
+
if n_gpu > 0:
|
363 |
+
torch.cuda.manual_seed_all(args.seed)
|
364 |
+
|
365 |
+
if not args.do_train and not args.do_eval:
|
366 |
+
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
|
367 |
+
|
368 |
+
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
|
369 |
+
print("WARNING: Output directory ({}) already exists and is not empty.".format(args.output_dir))
|
370 |
+
if not os.path.exists(args.output_dir):
|
371 |
+
os.makedirs(args.output_dir)
|
372 |
+
|
373 |
+
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
374 |
+
|
375 |
+
train_examples = None
|
376 |
+
num_train_optimization_steps = None
|
377 |
+
if args.do_train:
|
378 |
+
train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True)
|
379 |
+
num_train_optimization_steps = int(
|
380 |
+
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
|
381 |
+
if args.local_rank != -1:
|
382 |
+
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
|
383 |
+
|
384 |
+
# Prepare model
|
385 |
+
model = BertForMultipleChoice.from_pretrained(args.bert_model,
|
386 |
+
cache_dir=os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(args.local_rank)),
|
387 |
+
num_choices=4)
|
388 |
+
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False)
|
389 |
+
|
390 |
+
if args.fp16:
|
391 |
+
model.half()
|
392 |
+
model.to(device)
|
393 |
+
if args.local_rank != -1:
|
394 |
+
try:
|
395 |
+
from apex.parallel import DistributedDataParallel as DDP
|
396 |
+
except ImportError:
|
397 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
|
398 |
+
|
399 |
+
model = DDP(model)
|
400 |
+
elif n_gpu > 1:
|
401 |
+
model = torch.nn.DataParallel(model)
|
402 |
+
|
403 |
+
# Prepare optimizer
|
404 |
+
param_optimizer = list(model.named_parameters())
|
405 |
+
|
406 |
+
# hack to remove pooler, which is not used
|
407 |
+
# thus it produce None grad that break apex
|
408 |
+
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
|
409 |
+
|
410 |
+
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
|
411 |
+
optimizer_grouped_parameters = [
|
412 |
+
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
|
413 |
+
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
414 |
+
]
|
415 |
+
if args.fp16:
|
416 |
+
try:
|
417 |
+
from apex.contrib.optimizers import FP16_Optimizer
|
418 |
+
from apex.optimizers import FusedAdam
|
419 |
+
except ImportError:
|
420 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
|
421 |
+
|
422 |
+
optimizer = FusedAdam(optimizer_grouped_parameters,
|
423 |
+
lr=args.learning_rate,
|
424 |
+
bias_correction=False,
|
425 |
+
max_grad_norm=1.0)
|
426 |
+
if args.loss_scale == 0:
|
427 |
+
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
|
428 |
+
else:
|
429 |
+
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
|
430 |
+
else:
|
431 |
+
optimizer = BertAdam(optimizer_grouped_parameters,
|
432 |
+
lr=args.learning_rate,
|
433 |
+
warmup=args.warmup_proportion,
|
434 |
+
t_total=num_train_optimization_steps)
|
435 |
+
|
436 |
+
global_step = 0
|
437 |
+
if args.do_train:
|
438 |
+
train_features = convert_examples_to_features(
|
439 |
+
train_examples, tokenizer, args.max_seq_length, True)
|
440 |
+
logger.info("***** Running training *****")
|
441 |
+
logger.info(" Num examples = %d", len(train_examples))
|
442 |
+
logger.info(" Batch size = %d", args.train_batch_size)
|
443 |
+
logger.info(" Num steps = %d", num_train_optimization_steps)
|
444 |
+
all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long)
|
445 |
+
all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long)
|
446 |
+
all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long)
|
447 |
+
all_label = torch.tensor([f.label for f in train_features], dtype=torch.long)
|
448 |
+
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
|
449 |
+
if args.local_rank == -1:
|
450 |
+
train_sampler = RandomSampler(train_data)
|
451 |
+
else:
|
452 |
+
train_sampler = DistributedSampler(train_data)
|
453 |
+
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
|
454 |
+
|
455 |
+
model.train()
|
456 |
+
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
|
457 |
+
tr_loss = 0
|
458 |
+
nb_tr_examples, nb_tr_steps = 0, 0
|
459 |
+
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
|
460 |
+
# Terminate early for benchmarking
|
461 |
+
if args.max_steps > 0 and global_step > args.max_steps:
|
462 |
+
break
|
463 |
+
|
464 |
+
batch = tuple(t.to(device) for t in batch)
|
465 |
+
input_ids, input_mask, segment_ids, label_ids = batch
|
466 |
+
loss = model(input_ids, segment_ids, input_mask, label_ids)
|
467 |
+
if n_gpu > 1:
|
468 |
+
loss = loss.mean() # mean() to average on multi-gpu.
|
469 |
+
if args.fp16 and args.loss_scale != 1.0:
|
470 |
+
# rescale loss for fp16 training
|
471 |
+
# see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
|
472 |
+
loss = loss * args.loss_scale
|
473 |
+
if args.gradient_accumulation_steps > 1:
|
474 |
+
loss = loss / args.gradient_accumulation_steps
|
475 |
+
tr_loss += loss.item()
|
476 |
+
nb_tr_examples += input_ids.size(0)
|
477 |
+
nb_tr_steps += 1
|
478 |
+
|
479 |
+
if args.fp16:
|
480 |
+
optimizer.backward(loss)
|
481 |
+
else:
|
482 |
+
loss.backward()
|
483 |
+
if (step + 1) % args.gradient_accumulation_steps == 0:
|
484 |
+
if args.fp16:
|
485 |
+
# modify learning rate with special warm up BERT uses
|
486 |
+
# if args.fp16 is False, BertAdam is used that handles this automatically
|
487 |
+
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
|
488 |
+
for param_group in optimizer.param_groups:
|
489 |
+
param_group['lr'] = lr_this_step
|
490 |
+
optimizer.step()
|
491 |
+
optimizer.zero_grad()
|
492 |
+
global_step += 1
|
493 |
+
|
494 |
+
|
495 |
+
if args.do_train:
|
496 |
+
# Save a trained model and the associated configuration
|
497 |
+
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
|
498 |
+
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
|
499 |
+
torch.save(model_to_save.state_dict(), output_model_file)
|
500 |
+
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
|
501 |
+
with open(output_config_file, 'w') as f:
|
502 |
+
f.write(model_to_save.config.to_json_string())
|
503 |
+
|
504 |
+
# Load a trained model and config that you have fine-tuned
|
505 |
+
config = BertConfig(output_config_file)
|
506 |
+
model = BertForMultipleChoice(config, num_choices=4)
|
507 |
+
model.load_state_dict(torch.load(output_model_file))
|
508 |
+
else:
|
509 |
+
model = BertForMultipleChoice.from_pretrained(args.bert_model, num_choices=4)
|
510 |
+
model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu'), strict=False)
|
511 |
+
model.to(device)
|
512 |
+
|
513 |
+
|
514 |
+
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
|
515 |
+
eval_examples = read_swag_examples(os.path.join(args.data_dir, 'val.csv'), is_training = True)
|
516 |
+
eval_features = convert_examples_to_features(
|
517 |
+
eval_examples, tokenizer, args.max_seq_length, True)
|
518 |
+
logger.info("***** Running evaluation *****")
|
519 |
+
logger.info(" Num examples = %d", len(eval_examples))
|
520 |
+
logger.info(" Batch size = %d", args.eval_batch_size)
|
521 |
+
all_input_ids = torch.tensor(select_field(eval_features, 'input_ids'), dtype=torch.long)
|
522 |
+
all_input_mask = torch.tensor(select_field(eval_features, 'input_mask'), dtype=torch.long)
|
523 |
+
all_segment_ids = torch.tensor(select_field(eval_features, 'segment_ids'), dtype=torch.long)
|
524 |
+
all_label = torch.tensor([f.label for f in eval_features], dtype=torch.long)
|
525 |
+
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label)
|
526 |
+
# Run prediction for full data
|
527 |
+
eval_sampler = SequentialSampler(eval_data)
|
528 |
+
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
|
529 |
+
|
530 |
+
model.eval()
|
531 |
+
eval_loss, eval_accuracy = 0, 0
|
532 |
+
nb_eval_steps, nb_eval_examples = 0, 0
|
533 |
+
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
|
534 |
+
input_ids = input_ids.to(device)
|
535 |
+
input_mask = input_mask.to(device)
|
536 |
+
segment_ids = segment_ids.to(device)
|
537 |
+
label_ids = label_ids.to(device)
|
538 |
+
|
539 |
+
with torch.no_grad():
|
540 |
+
tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
|
541 |
+
logits = model(input_ids, segment_ids, input_mask)
|
542 |
+
|
543 |
+
logits = logits.detach().cpu().numpy()
|
544 |
+
label_ids = label_ids.to('cpu').numpy()
|
545 |
+
tmp_eval_accuracy = accuracy(logits, label_ids)
|
546 |
+
|
547 |
+
eval_loss += tmp_eval_loss.mean().item()
|
548 |
+
eval_accuracy += tmp_eval_accuracy
|
549 |
+
|
550 |
+
nb_eval_examples += input_ids.size(0)
|
551 |
+
nb_eval_steps += 1
|
552 |
+
|
553 |
+
eval_loss = eval_loss / nb_eval_steps
|
554 |
+
eval_accuracy = eval_accuracy / nb_eval_examples
|
555 |
+
|
556 |
+
result = {'eval_loss': eval_loss,
|
557 |
+
'eval_accuracy': eval_accuracy,
|
558 |
+
'global_step': global_step,
|
559 |
+
'loss': tr_loss/nb_tr_steps}
|
560 |
+
|
561 |
+
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
|
562 |
+
with open(output_eval_file, "w") as writer:
|
563 |
+
logger.info("***** Eval results *****")
|
564 |
+
for key in sorted(result.keys()):
|
565 |
+
logger.info(" %s = %s", key, str(result[key]))
|
566 |
+
writer.write("%s = %s\n" % (key, str(result[key])))
|
567 |
+
|
568 |
+
|
569 |
+
if __name__ == "__main__":
|
570 |
+
main()
|