diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/CODEOWNERS b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/CODEOWNERS new file mode 100644 index 0000000000000000000000000000000000000000..60a921d7f6d586692195e89dba62a54f7d8ec23d --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/CODEOWNERS @@ -0,0 +1 @@ +* @jeffra @samyam @tjruwase @ShadenSmith @conglongli @awan-10 @cli99 @eltonzheng @minjiaz @RezaYazdaniAminabadi @duli2012 @mrwyattii @yaozhewei @arashb @xiaoxiawu-microsoft @guanhuawang diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/LICENSE b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9b75fdf39f9459943a92945209097b05e623622d --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/LICENSE @@ -0,0 +1,265 @@ +The following applies to all files unless otherwise noted: +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-- + +This repository also contains code from Hugging Face Inc., Google Research, +Facebook (from their Fairseq project), and Philip Popien. Files from these +organizations have notices at the top of each file. Below are licenses +used in those files, as indicated. + + +------------- LICENSE FOR huggingface and Google Research code -------------- + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------- LICENSE FOR Facebook Fairseq code -------------- + +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/MANIFEST.in b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..f44791183e894ad5f6bb72cf4498db5c8566d498 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/MANIFEST.in @@ -0,0 +1,2 @@ +include megatron/data/Makefile +include megatron/data/helpers.cpp diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/SECURITY.md b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..869fdfe2b246991a053fab9cfec1bed3ab532ab1 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/README.md b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1f0aa31d96f2126b7ddc201385c266bca2f122cc --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/README.md @@ -0,0 +1,5 @@ +# Run the scripts below to setup dataset + +bash download_books.sh + +bash download_vocab.sh diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_books.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_books.sh new file mode 100644 index 0000000000000000000000000000000000000000..cb93c2b21328886ec4b425fdcf788011d913fa57 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_books.sh @@ -0,0 +1,2 @@ +wget https://the-eye.eu/public/AI/pile_neox/data/BookCorpusDataset_text_document.bin +wget https://the-eye.eu/public/AI/pile_neox/data/BookCorpusDataset_text_document.idx \ No newline at end of file diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_ckpt.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_ckpt.sh new file mode 100644 index 0000000000000000000000000000000000000000..ac10274b187057ccda7284a84c55cc63f9d247f2 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_ckpt.sh @@ -0,0 +1,8 @@ +mkdir -p checkpoints/gpt2_345m + +cd checkpoints/gpt2_345m +wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip +unzip megatron_lm_345m_v0.0.zip +rm megatron_lm_345m_v0.0.zip +cd ../.. + diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_vocab.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_vocab.sh new file mode 100644 index 0000000000000000000000000000000000000000..0b7637104baaa0f1d413d03143b20f17b0a1ad40 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/dataset/download_vocab.sh @@ -0,0 +1,2 @@ +wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json +wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt \ No newline at end of file diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/README.md b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d4c56b219ed48c875185e2f23be3eec892bdc062 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/README.md @@ -0,0 +1,15 @@ +## Recipes and Scripts + +### Azure + +We strongly recommend to start with AzureML recipe in the ```azureml``` folder. + +If you have a custom infrastructure (e.g. HPC clusters) or Azure VM and VMSS based environments, please refer to the bash scripts in the ```azure``` folder. + +### MoE + +Please see the ```MoE``` folder for different training recipes and scripts for Mixture-of-expert based models. + +### Curriculum Learning + +Curriculum learning recipes are in the ```curriculum_learning``` folder. Please refer to the detailed tutorials linked inside. diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/create_embeddings.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/create_embeddings.sh new file mode 100644 index 0000000000000000000000000000000000000000..59a5839f7e273e15a76e77765c38807ac59b38b9 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/create_embeddings.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Compute embeddings for each entry of a given dataset (e.g. Wikipedia) + +RANK=0 +WORLD_SIZE=1 + +# Wikipedia data can be downloaded from the following link: +# https://github.com/facebookresearch/DPR/blob/master/data/download_data.py +EVIDENCE_DATA_DIR= +EMBEDDING_PATH= +CHECKPOINT_PATH= + +python tools/create_doc_index.py \ + --num-layers 12 \ + --hidden-size 768 \ + --num-attention-heads 12 \ + --tensor-model-parallel-size 1 \ + --micro-batch-size 128 \ + --checkpoint-activations \ + --seq-length 512 \ + --retriever-seq-length 256 \ + --max-position-embeddings 512 \ + --load ${CHECKPOINT_PATH} \ + --evidence-data-path ${EVIDENCE_DATA_DIR} \ + --embedding-path ${EMBEDDING_PATH} \ + --indexer-log-interval 1000 \ + --indexer-batch-size 128 \ + --vocab-file bert-vocab.txt \ + --num-workers 2 \ + --fp16 + diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/evaluate_ict_zeroshot_nq.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/evaluate_ict_zeroshot_nq.sh new file mode 100644 index 0000000000000000000000000000000000000000..e1ce45a9342f2a1a3a818018abe19c0810c40e19 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/evaluate_ict_zeroshot_nq.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Evaluate natural question test data given Wikipedia embeddings and pretrained +# ICT model + +# Datasets can be downloaded from the following link: +# https://github.com/facebookresearch/DPR/blob/master/data/download_data.py + +EVIDENCE_DATA_DIR= +EMBEDDING_PATH= +CHECKPOINT_PATH= + +QA_FILE= + +python tasks/main.py \ + --task ICT-ZEROSHOT-NQ \ + --tokenizer-type BertWordPieceLowerCase \ + --num-layers 12 \ + --hidden-size 768 \ + --num-attention-heads 12 \ + --tensor-model-parallel-size 1 \ + --micro-batch-size 128 \ + --checkpoint-activations \ + --seq-length 512 \ + --max-position-embeddings 512 \ + --load ${CHECKPOINT_PATH} \ + --evidence-data-path ${EVIDENCE_DATA_DIR} \ + --embedding-path ${EMBEDDING_PATH} \ + --retriever-seq-length 256 \ + --vocab-file bert-vocab.txt\ + --qa-data-test ${QA_FILE} \ + --num-workers 2 \ + --faiss-use-gpu \ + --retriever-report-topk-accuracies 1 5 20 100 \ + --fp16 + diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_mnli_distributed.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_mnli_distributed.sh new file mode 100644 index 0000000000000000000000000000000000000000..213eb1fa116fd464ed2aea373879c802da837b6f --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_mnli_distributed.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +WORLD_SIZE=8 + +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +TRAIN_DATA="data/glue_data/MNLI/train.tsv" +VALID_DATA="data/glue_data/MNLI/dev_matched.tsv \ + data/glue_data/MNLI/dev_mismatched.tsv" +PRETRAINED_CHECKPOINT=checkpoints/bert_345m +VOCAB_FILE=bert-vocab.txt +CHECKPOINT_PATH=checkpoints/bert_345m_mnli + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ + --task MNLI \ + --seed 1234 \ + --train-data $TRAIN_DATA \ + --valid-data $VALID_DATA \ + --tokenizer-type BertWordPieceLowerCase \ + --vocab-file $VOCAB_FILE \ + --epochs 5 \ + --pretrained-checkpoint $PRETRAINED_CHECKPOINT \ + --tensor-model-parallel-size 1 \ + --num-layers 24 \ + --hidden-size 1024 \ + --num-attention-heads 16 \ + --micro-batch-size 8 \ + --checkpoint-activations \ + --lr 5.0e-5 \ + --lr-decay-style linear \ + --lr-warmup-fraction 0.065 \ + --seq-length 512 \ + --max-position-embeddings 512 \ + --save-interval 500000 \ + --save $CHECKPOINT_PATH \ + --log-interval 10 \ + --eval-interval 100 \ + --eval-iters 50 \ + --weight-decay 1.0e-1 \ + --fp16 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_race_distributed.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_race_distributed.sh new file mode 100644 index 0000000000000000000000000000000000000000..5ac642ee3248ae1dc73e2536d2fe275b354c52b7 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/finetune_race_distributed.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +WORLD_SIZE=8 + +DISTRIBUTED_ARGS="--nproc_per_node $WORLD_SIZE \ + --nnodes 1 \ + --node_rank 0 \ + --master_addr localhost \ + --master_port 6000" + +TRAIN_DATA="data/RACE/train/middle" +VALID_DATA="data/RACE/dev/middle \ + data/RACE/dev/high" +VOCAB_FILE=bert-vocab.txt +PRETRAINED_CHECKPOINT=checkpoints/bert_345m +CHECKPOINT_PATH=checkpoints/bert_345m_race + +python -m torch.distributed.launch $DISTRIBUTED_ARGS ./tasks/main.py \ + --task RACE \ + --seed 1234 \ + --train-data $TRAIN_DATA \ + --valid-data $VALID_DATA \ + --tokenizer-type BertWordPieceLowerCase \ + --vocab-file $VOCAB_FILE \ + --epochs 3 \ + --pretrained-checkpoint $PRETRAINED_CHECKPOINT \ + --tensor-model-parallel-size 1 \ + --num-layers 24 \ + --hidden-size 1024 \ + --num-attention-heads 16 \ + --micro-batch-size 4 \ + --checkpoint-activations \ + --lr 1.0e-5 \ + --lr-decay-style linear \ + --lr-warmup-fraction 0.06 \ + --seq-length 512 \ + --max-position-embeddings 512 \ + --save-interval 100000 \ + --save $CHECKPOINT_PATH \ + --log-interval 10 \ + --eval-interval 100 \ + --eval-iters 50 \ + --weight-decay 1.0e-1 \ + --clip-grad 1.0 \ + --hidden-dropout 0.1 \ + --attention-dropout 0.1 \ + --fp16 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_bert.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_bert.sh new file mode 100644 index 0000000000000000000000000000000000000000..9c744ee451442b18e0538b84224ea1df06bda7a6 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_bert.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +RANK=0 +WORLD_SIZE=1 +DATA_PATH=_text_sentence +CHECKPOINT_PATH= + +python pretrain_bert.py \ + --num-layers 24 \ + --hidden-size 1024 \ + --num-attention-heads 16 \ + --micro-batch-size 4 \ + --global-batch-size 8 \ + --seq-length 512 \ + --max-position-embeddings 512 \ + --train-iters 2000000 \ + --lr-decay-iters 990000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --vocab-file bert-vocab.txt \ + --data-impl mmap \ + --split 949,50,1 \ + --lr 0.0001 \ + --min-lr 0.00001 \ + --lr-decay-style linear \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --log-interval 100 \ + --save-interval 10000 \ + --eval-interval 1000 \ + --eval-iters 10 \ + --fp16 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed.sh new file mode 100644 index 0000000000000000000000000000000000000000..778b4ad2a36aa649b78c0625f1848381f6fa2a7f --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NNODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DATA_PATH= +VOCAB_FILE= +CHECKPOINT_PATH= + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +python -m torch.distributed.launch $DISTRIBUTED_ARGS \ + pretrain_t5.py \ + --num-layers 12 \ + --hidden-size 768 \ + --num-attention-heads 12 \ + --kv-channels 64 \ + --ffn-hidden-size 3072 \ + --encoder-seq-length 512 \ + --decoder-seq-length 128 \ + --micro-batch-size 16 \ + --global-batch-size 2048 \ + --max-position-embeddings 512 \ + --train-iters 1000000 \ + --lr-decay-iters 1000000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --vocab-file $VOCAB_FILE \ + --data-impl mmap \ + --split 949,50,1 \ + --lr 0.0001 \ + --min-lr 0.00001 \ + --lr-decay-style linear \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --log-interval 100 \ + --save-interval 10000 \ + --eval-interval 1000 \ + --eval-iters 10 \ + --fp16 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed_with_mp.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed_with_mp.sh new file mode 100644 index 0000000000000000000000000000000000000000..9be70393d86e499ea2d1ab6ebaae1e59415a4f3b --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/examples/pretrain_t5_distributed_with_mp.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +GPUS_PER_NODE=8 +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NNODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DATA_PATH= +CHECKPOINT_PATH= + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +python -m torch.distributed.launch $DISTRIBUTED_ARGS \ + pretrain_t5.py \ + --tensor-model-parallel-size 2 \ + --num-layers 12 \ + --hidden-size 768 \ + --num-attention-heads 12 \ + --kv-channels 64 \ + --ffn-hidden-size 3072 \ + --encoder-seq-length 512 \ + --decoder-seq-length 128 \ + --micro-batch-size 16 \ + --global-batch-size 2048 \ + --seq-length 512 \ + --max-position-embeddings 512 \ + --train-iters 1000000 \ + --lr-decay-iters 1000000 \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --vocab-file t5-vocab.txt \ + --data-impl mmap \ + --split 949,50,1 \ + --lr 0.0001 \ + --min-lr 0.00001 \ + --lr-decay-style linear \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --log-interval 100 \ + --save-interval 10000 \ + --eval-interval 1000 \ + --eval-iters 10 \ + --fp16 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_bert.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..dd057a7ffdbb64a2e92f50aff173ca73c7867421 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_bert.py @@ -0,0 +1,159 @@ +# coding=utf-8 +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company. +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pretrain BERT""" + +from functools import partial + +import torch +import torch.nn.functional as F + +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_timers +from megatron import mpu +from megatron.data.dataset_utils import build_train_valid_test_datasets +from megatron.model import BertModel +from megatron.training import pretrain +from megatron.utils import average_losses_across_data_parallel_group + + +def model_provider(pre_process=True, post_process=True): + """Build the model.""" + + print_rank_0('building BERT model ...') + + args = get_args() + num_tokentypes = 2 if args.bert_binary_head else 0 + model = BertModel( + num_tokentypes=num_tokentypes, + add_binary_head=args.bert_binary_head, + parallel_output=True, + pre_process=pre_process, + post_process=post_process) + + return model + + +def get_batch(data_iterator): + """Build the batch.""" + + # Items and their type. + keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask'] + datatype = torch.int64 if get_args.device.type=="cuda" else torch.int32 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + # TODO (SW-62395): Implement proper Long -> Int casting + for key, val in data.items(): + data[key] = val.to(datatype) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + if (datatype == torch.int64): + tokens = data_b['text'].long() + types = data_b['types'].long() + sentence_order = data_b['is_random'].long() + loss_mask = data_b['loss_mask'].float() + lm_labels = data_b['labels'].long() + padding_mask = data_b['padding_mask'].long() + else: + tokens = data_b['text'].int() + types = data_b['types'].int() + sentence_order = data_b['is_random'].int() + loss_mask = data_b['loss_mask'].float() + lm_labels = data_b['labels'].int() + padding_mask = data_b['padding_mask'].int() + + return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask + + +def loss_func(loss_mask, sentence_order, output_tensor): + lm_loss_, sop_logits = output_tensor + + lm_loss_ = lm_loss_.float() + loss_mask = loss_mask.float() + lm_loss = torch.sum( + lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + if sop_logits is not None: + sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), + sentence_order.view(-1), + ignore_index=-1) + sop_loss = sop_loss.float() + loss = lm_loss + sop_loss + averaged_losses = average_losses_across_data_parallel_group( + [lm_loss, sop_loss]) + return loss, {'lm loss': averaged_losses[0], + 'sop loss': averaged_losses[1]} + + else: + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group( + [lm_loss]) + return loss, {'lm loss': averaged_losses[0]} + + +def forward_step(data_iterator, model): + """Forward step.""" + args = get_args() + timers = get_timers() + + # Get the batch. + timers('batch-generator').start() + tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch( + data_iterator) + timers('batch-generator').stop() + + if not args.bert_binary_head: + types = None + + # Forward pass through the model. + output_tensor = model(tokens, padding_mask, tokentype_ids=types, + lm_labels=lm_labels) + + return output_tensor, partial(loss_func, loss_mask, sentence_order) + + +def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + + print_rank_0('> building train, validation, and test datasets ' + 'for BERT ...') + train_ds, valid_ds, test_ds = build_train_valid_test_datasets( + data_prefix=args.data_path, + data_impl=args.data_impl, + splits_string=args.split, + train_valid_test_num_samples=train_val_test_num_samples, + max_seq_length=args.seq_length, + masked_lm_prob=args.mask_prob, + short_seq_prob=args.short_seq_prob, + seed=args.seed, + skip_warmup=(not args.mmap_warmup), + binary_head=args.bert_binary_head) + print_rank_0("> finished creating BERT datasets ...") + + return train_ds, valid_ds, test_ds + + +if __name__ == "__main__": + + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, + args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'}) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_gpt.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_gpt.py new file mode 100644 index 0000000000000000000000000000000000000000..52b55e255d5510fcc531590a5776dabb52c0d87f --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_gpt.py @@ -0,0 +1,335 @@ +# coding=utf-8 +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company. +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pretrain GPT""" + +import torch +from functools import partial +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_timers +from megatron import get_tokenizer +from megatron import mpu +from megatron.data.gpt_dataset import build_train_valid_test_datasets +from megatron.model import GPTModel, GPTModelPipe +from megatron.training import pretrain +from megatron.utils import get_ltor_masks_and_position_ids +from megatron.utils import average_losses_across_data_parallel_group +from megatron.global_vars import get_current_device +from megatron.enums import PositionEmbeddingType +import deepspeed +from deepspeed.runtime.utils import see_memory_usage +import os +import subprocess + +from torch import nn +import torch.nn.functional as F + +def model_provider(pre_process=True, post_process=True, parallel_output=True): + """Build the model.""" + + print_rank_0('building GPT model ...') + see_memory_usage(f"Before Building Model", force=True) + + args = get_args() + with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(), + remote_device=None if args.remote_device == 'none' else args.remote_device, + config_dict_or_path=args.deepspeed_config, + enabled=args.zero_stage == 3, + mpu=mpu): + current_device = get_current_device() + if args.deepspeed and not args.no_pipeline_parallel: + + # verify --deepspeed_activation_checkpointing + # mandatory! otherwise the model uses fork() mapping to Megatron's RNGStatesTrackerSingleton + # while GPTModelPipe uses DS checkpoint activations that uses DS's RNGStatesTracker + if args.checkpoint_activations and args.checkpoint_activations_granularity == "full": + assert args.deepspeed_activation_checkpointing, \ + "Flag --deepspeed_activation_checkpointing is mandatory when using GPTModelPipe" \ + " with checkpoint activations granularity full." + + model = GPTModelPipe( + num_tokentypes=0, + parallel_output=parallel_output, + ) + # This is a hack to give us a reference to get_batch_pipe from within training.py + # We need to call model.set_batch_fn after deepspeed.initialize + model._megatron_batch_fn = get_batch_pipe + + # Predompute the attention mask and store it in args. This avoids having to + # pipeline it as an activation during training. The mask is constant, and thus + # we can reuse it. + attention_mask = torch.tril(torch.ones( + (1, args.seq_length, args.seq_length), device=current_device)).view( + 1, 1, args.seq_length, args.seq_length) + + # Convert attention mask to binary: + attention_mask = (attention_mask < 0.5) + if args.fp16: + attention_mask = attention_mask.half() + elif args.bf16: + attention_mask = attention_mask.bfloat16() + + if args.mask_tensor_adding: + args.attn_mask = attention_mask * -10000.0 + else: + args.attn_mask = attention_mask.to(torch.bool) + + else: + assert args.position_embedding_type != PositionEmbeddingType.alibi, \ + "GPTModel doesn't yet support ALiBi positional encoding" + model = GPTModel( + num_tokentypes=0, + parallel_output=parallel_output, + pre_process=pre_process, + post_process=post_process + ).to(current_device) + see_memory_usage(f"After Building Model", force=True) + return model + + +def get_batch(data_iterator): + """Generate a batch""" + args = get_args() + tokenizer = get_tokenizer() + + # Items and their type. + keys = ['text'] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_ = data_b['text'].long() + if not args.use_seq_len_plus_one_tokens: + labels = torch.roll(tokens_, shifts=-1, dims=1) + labels[:, -1] = -1 + tokens = tokens_ + else: + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + + # Get the masks and postition ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, + tokenizer.eod, + args.reset_position_ids, + args.reset_attention_mask, + args.eod_mask_loss, + labels = labels, + dummy_sample= None,) + + tokens[tokens == -1] = 0 + labels[labels == -1] = 0 + + return tokens, labels, loss_mask, attention_mask, position_ids + + +def get_batch_pipe(data): + """Modification of `get_batch` to work on `next(data_iterator)` instead of `data_iterator`""" + args = get_args() + tokenizer = get_tokenizer() + + # Items and their type. + keys = ['text'] + datatype = torch.int64 + + # Broadcast data. + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_ = data_b['text'].long() + if not args.use_seq_len_plus_one_tokens: + labels = torch.roll(tokens_, shifts=-1, dims=1) + labels[:, -1] = -1 + tokens = tokens_ + else: + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + + # Get the masks and postition ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, + tokenizer.eod, + args.reset_position_ids, + args.reset_attention_mask, + args.eod_mask_loss, + labels = labels, + dummy_sample = None, + ) + tokens[tokens == -1] = 0 + labels[labels == -1] = 0 + + + if args.curriculum_learning and args.curriculum_seqlen < tokens.size()[1]: + # seqlen-based curriculum learning + # tokens, position_ids, labels, loss_mask have size [batch size, seqlen] + tokens = tokens[:, :args.curriculum_seqlen].contiguous() + position_ids = position_ids[:, :args.curriculum_seqlen].contiguous() + if labels is not None: + labels = labels[:, :args.curriculum_seqlen].contiguous() + loss_mask = loss_mask[:, :args.curriculum_seqlen].contiguous() + + return (tokens, position_ids, attention_mask), (labels, loss_mask) + + +def loss_func(loss_mask, moe_loss, mos_loss, output_tensor): + args = get_args() + losses = output_tensor.float() + loss_mask = loss_mask.view(-1).float() + loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() + + # Reduce loss for logging. + averaged_loss = average_losses_across_data_parallel_group([loss]) + if args.mos or args.kd: + # assert max(args.num_experts) >= 1 + loss = loss + moe_loss + mos_loss + if args.mos: + return loss, {'total loss': loss, 'lm loss': averaged_loss[0], 'moe loss': moe_loss, 'mos loss': mos_loss} + elif args.kd: + return loss, {'total loss': loss, 'lm loss': averaged_loss[0], 'moe loss': moe_loss, 'kd loss': mos_loss} + print_rank_0('>>> total loss: {}, lm loss {}, kd loss {}'.format(loss, averaged_loss[0], mos_loss)) + else: + if max(args.num_experts) <= 1: + return loss, {'lm loss': averaged_loss[0]} + else: + loss = loss + moe_loss + return loss, {'lm loss': averaged_loss[0], 'moe loss': moe_loss} + +def calculate_mos_loss(args, stu_output, teacher_model, tokens, position_ids, attention_mask): + mos_loss = 0 + alpha = args.kd_alpha_ce + beta = args.kd_beta_ce + kd_temp = args.kd_temp + + if teacher_model: + with torch.no_grad(): + if args.curriculum_learning and args.curriculum_seqlen < args.seq_length: + assert args.curriculum_seqlen is not None + curriculum_seqlen = args.curriculum_seqlen + tokens = tokens[:, :curriculum_seqlen].contiguous() + position_ids = position_ids[:, :curriculum_seqlen].contiguous() + attention_mask = attention_mask[:, :, :curriculum_seqlen, :curriculum_seqlen].contiguous() + # No need to truncate labels as we do not need it for the teacher logits + tea_output, *tea_other_losses = teacher_model(tokens, position_ids, attention_mask) + assert stu_output.size() == tea_output.size(), 'teacher and student output should match in size. Student: {}, Teacher: {}, CL seq length {}'.format(stu_output.size(), tea_output.size(), args.curriculum_seqlen) + + student_logits = F.log_softmax(stu_output / kd_temp, dim=2) + tea_logits = F.softmax(tea_output / kd_temp, dim=2) # The target logits is expected to be probabilities. If we use log_softmax, then we need to set target_log to true when initializing the KLDivLoss. + + mos_loss = kd_temp * kd_temp * nn.KLDivLoss(reduction='batchmean')(student_logits, tea_logits) + + mos_loss = mos_loss.div(args.seq_length) * beta + return mos_loss + +def forward_step(data_iterator, model, teacher_model=None): + """Forward step.""" + args = get_args() + timers = get_timers() + + # Get the batch. + timers('batch-generator').start() + tokens, labels, loss_mask, attention_mask, position_ids = get_batch( + data_iterator) + timers('batch-generator').stop() + + if args.mos or args.kd: + # The forward func can return either the loss or the logits, depending on whether passing in the labels or not. + stu_output, *other_losses = model(tokens, position_ids, attention_mask) + if args.curriculum_learning and args.curriculum_seqlen < args.seq_length: + assert args.curriculum_seqlen is not None + labels = labels[:, :args.curriculum_seqlen].contiguous() + output_tensor = mpu.vocab_parallel_cross_entropy(stu_output.contiguous().float(), labels) + else: + output_tensor, *other_losses = model(tokens, position_ids, attention_mask, + labels=labels) + if args.curriculum_learning and args.curriculum_seqlen < args.seq_length: + loss_mask = loss_mask[:, :args.curriculum_seqlen].contiguous() + + moe_losses = [] + for moe_loss in other_losses: + if moe_loss is not None: + moe_losses.append(moe_loss) + moe_loss = sum(moe_losses) * args.moe_loss_coeff + + mos_loss = 0 + if args.mos or args.kd: + assert model.training + mos_loss = calculate_mos_loss(args, stu_output, teacher_model, tokens, position_ids, attention_mask) + + # Output_tensor stores the standard loss, loos_func calculates the total loss. + return output_tensor, partial(loss_func, loss_mask, moe_loss, mos_loss) + + +def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + + print_rank_0('> building train, validation, and test datasets ' + 'for GPT ...') + train_ds, valid_ds, test_ds = build_train_valid_test_datasets( + data_prefix=args.data_path, + train_data_prefix=args.train_data_path, + valid_data_prefix=args.valid_data_path, + test_data_prefix=args.test_data_path, + data_impl=args.data_impl, + splits_string=args.split, + train_valid_test_num_samples=train_val_test_num_samples, + seq_length=args.seq_length, + seed=args.seed, + skip_warmup=(not args.mmap_warmup), + use_seq_len_plus_one_tokens=args.use_seq_len_plus_one_tokens) + print_rank_0("> finished creating GPT datasets ...") + + return train_ds, valid_ds, test_ds + + +def command_exists(cmd): + result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True) + return result.wait() == 0 + + +def git_ds_info(): + from deepspeed.env_report import main as ds_report + ds_report() + + # Write out version/git info + git_hash_cmd = "git rev-parse --short HEAD" + git_branch_cmd = "git rev-parse --abbrev-ref HEAD" + if command_exists('git'): + try: + result = subprocess.check_output(git_hash_cmd, shell=True) + git_hash = result.decode('utf-8').strip() + result = subprocess.check_output(git_branch_cmd, shell=True) + git_branch = result.decode('utf-8').strip() + except subprocess.CalledProcessError: + git_hash = "unknown" + git_branch = "unknown" + else: + git_hash = "unknown" + git_branch = "unknown" + print(f'**** Git info for Megatron: git_hash={git_hash} git_branch={git_branch} ****') + + +if __name__ == "__main__": + git_ds_info() + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, + args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_ict.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_ict.py new file mode 100644 index 0000000000000000000000000000000000000000..1438b3d57826038e45b65661c5d376e9c88c7097 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_ict.py @@ -0,0 +1,167 @@ +# coding=utf-8 +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pretrain BERT for Inverse Cloze Task""" +import math + +import torch +import torch.distributed as dist +import torch.nn.functional as F + +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_timers +from megatron import mpu +from megatron.data.biencoder_dataset_utils import get_ict_batch +from megatron.data.dataset_utils import build_train_valid_test_datasets +from megatron.model.biencoder_model import biencoder_model_provider +from megatron.training import pretrain +from megatron.utils import average_losses_across_data_parallel_group + + +def pretrain_ict_model_provider(): + args = get_args() + model = biencoder_model_provider( + only_context_model=False, + only_query_model=False, + biencoder_shared_query_context_model=\ + args.biencoder_shared_query_context_model) + return model + +def get_group_world_size_rank(): + + group = mpu.get_data_parallel_group() + rank = torch.distributed.get_rank(group=group) + world_size = torch.distributed.get_world_size(group=group) + + return group, rank, world_size + + +class AllgatherFromDataParallelRegion(torch.autograd.Function): + + @staticmethod + def forward(ctx, input_): + assert input_.dim() == 2 + group, rank, world_size = get_group_world_size_rank() + + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank] = input_ + torch.distributed.all_gather(tensor_list, input_, group=group) + + output = torch.cat(tensor_list, dim=0).contiguous() + + return output + + + @staticmethod + def backward(ctx, grad_output): + group, rank, world_size = get_group_world_size_rank() + + assert grad_output.shape[0] % world_size == 0 + dim_size = grad_output.shape[0] // world_size + output_list = torch.split(grad_output, dim_size, dim=0) + + # get chunk from this rank + output = output_list[rank].contiguous() + return output + +def forward_step(data_iterator, model, input_tensor): + """Forward step.""" + args = get_args() + timers = get_timers() + + # Get the batch. + timers('batch-generator').start() + query_tokens, query_mask, \ + context_tokens, context_mask, context_indices = get_ict_batch(data_iterator) + timers('batch-generator').stop() + + # Query and Context Types + query_types = torch.cuda.LongTensor(*query_tokens.shape).fill_(0) + context_types = torch.cuda.LongTensor(*context_tokens.shape).fill_(0) + + # Forward model. + query_logits, context_logits = model(query_tokens, query_mask, + query_types, context_tokens, + context_mask, context_types) + + micro_batch_size = query_logits.shape[0] + # recall we assert that tensor_model_parallel_size == 1 + assert mpu.get_tensor_model_parallel_world_size() == 1, \ + "Model parallel size > 1 not supported for ICT" + + global_batch_size = dist.get_world_size() * micro_batch_size + all_query_logits = AllgatherFromDataParallelRegion.apply(query_logits) + all_context_logits = AllgatherFromDataParallelRegion.apply(context_logits) + + # scores are inner products between query and context embeddings + retrieval_scores = torch.matmul(all_query_logits, + torch.transpose(all_context_logits, 0, 1)) + # scaling the retriever scores + if args.retriever_score_scaling: + retrieval_scores = retrieval_scores / math.sqrt(args.hidden_size) + + softmax_scores = F.log_softmax(retrieval_scores, dim=1) + sorted_vals, sorted_indices = torch.topk(softmax_scores, + k=softmax_scores.shape[1], sorted=True) + + def topk_accuracy(k): + return torch.cuda.FloatTensor([sum([int(i in sorted_indices[i, :k]) \ + for i in range(global_batch_size)]) / global_batch_size]) + + topk_accs = [topk_accuracy(int(k)) for k in args.retriever_report_topk_accuracies] + + labels = torch.arange(global_batch_size).long().cuda() + loss = F.nll_loss(softmax_scores, labels, reduction='mean') + reduced_losses = average_losses_across_data_parallel_group([loss, *topk_accs]) + + # Scale the retrieval loss + loss = loss * mpu.get_data_parallel_world_size() + + # create stats_dict with retrieval loss and all specified top-k accuracies + topk_acc_dict = {'top{}_acc'.format(k): v * 100 for k, v in \ + zip(args.retriever_report_topk_accuracies, reduced_losses[1:])} + stats_dict = dict(loss=reduced_losses[0], **topk_acc_dict) + return loss, stats_dict + + +def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid and test datasets.""" + args = get_args() + print_rank_0('> building train, validation, and test datasets ' + 'for BERT ICT...') + + train_ds, valid_ds, test_ds = build_train_valid_test_datasets( + data_prefix=args.data_path, + data_impl=args.data_impl, + splits_string=args.split, + train_valid_test_num_samples=train_val_test_num_samples, + max_seq_length=args.seq_length, + masked_lm_prob=args.mask_prob, + short_seq_prob=args.short_seq_prob, + seed=args.seed, + skip_warmup=(not args.mmap_warmup), + binary_head=False, + dataset_type='ict') + print_rank_0("> finished creating BERT ICT datasets ...") + + return train_ds, valid_ds, test_ds + + +if __name__ == "__main__": + pretrain(train_valid_test_datasets_provider, + pretrain_ict_model_provider, + forward_step, + args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'}) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_t5.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_t5.py new file mode 100644 index 0000000000000000000000000000000000000000..34f80616238ed5a26927485a04b5d0c30474f53c --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_t5.py @@ -0,0 +1,134 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pretrain T5""" + +from functools import partial + +import torch + +from megatron import ( + get_args, + get_timers, + mpu, + print_rank_0 +) +from megatron.data.dataset_utils import build_train_valid_test_datasets +from megatron.model import T5Model +from megatron.training import pretrain +from megatron.utils import average_losses_across_data_parallel_group + + +def model_provider(pre_process=True, post_process=True): + """Build the model.""" + assert pre_process and post_process, "T5 doesn't yet support pipelining" + + print_rank_0('building T5 model ...') + model = T5Model(num_tokentypes=0, + parallel_output=True) + return model + + +def get_batch(data_iterator): + """Build the batch.""" + + keys = ['text_enc', 'text_dec', 'labels', 'loss_mask', + 'enc_mask', 'dec_mask', 'enc_dec_mask'] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_enc = data_b['text_enc'].long() + tokens_dec = data_b['text_dec'].long() + labels = data_b['labels'].long() + loss_mask = data_b['loss_mask'].float() + + enc_mask = (data_b['enc_mask'] < 0.5) + dec_mask = (data_b['dec_mask'] < 0.5) + enc_dec_mask = (data_b['enc_dec_mask'] < 0.5) + + return tokens_enc, tokens_dec, loss_mask, labels, \ + enc_mask, dec_mask, enc_dec_mask + + +def loss_func(loss_mask, output_tensor): + lm_loss_, _ = output_tensor + + lm_loss_ = lm_loss_.float() + lm_loss = torch.sum( + lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss]) + + return loss, {'lm loss': averaged_losses[0]} + + +def forward_step(data_iterator, model): + """Forward step.""" + args = get_args() + timers = get_timers() + + # Get the batch. + timers('batch generator').start() + tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask \ + = get_batch(data_iterator) + timers('batch generator').stop() + + # Forward model lm_labels + output_tensor = model(tokens_enc, + tokens_dec, + enc_mask, + dec_mask, + enc_dec_mask, + tokentype_ids=None, + lm_labels=lm_labels) + + return output_tensor, partial(loss_func, loss_mask) + + +def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + + print_rank_0('> building train, validation, and test datasets ' + 'for T5 ...') + train_ds, valid_ds, test_ds = build_train_valid_test_datasets( + data_prefix=args.data_path, + data_impl=args.data_impl, + splits_string=args.split, + train_valid_test_num_samples=train_val_test_num_samples, + max_seq_length=args.encoder_seq_length, + max_seq_length_dec=args.decoder_seq_length, + masked_lm_prob=args.mask_prob, + short_seq_prob=args.short_seq_prob, + seed=args.seed, + skip_warmup=(not args.mmap_warmup), + dataset_type='t5') + print_rank_0("> finished creating T5 datasets ...") + + return train_ds, valid_ds, test_ds + + +if __name__ == "__main__": + + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, + args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'}) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_vit.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..16ec10439a09146f6e2936778265222bd23fe911 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/pretrain_vit.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pretrain VIT""" + +import torch +import torch.nn.functional as F +from megatron import get_args, get_timers, mpu, print_rank_0 +from megatron.data.vit_dataset import build_train_valid_datasets +from megatron.model.vit_model import VitModel +from megatron.training import pretrain +from megatron.utils import average_losses_across_data_parallel_group + +def model_provider(): + """Build the model.""" + + print_rank_0("building VIT model ...") + args = get_args() + + model = VitModel(num_classes=args.num_classes) + return model + +def get_batch(data_iterator): + """Build the batch.""" + data = next(data_iterator) + + # only data parallelism; no need for broadcast + images = data[0].cuda() + labels = data[1].cuda() + + return images, labels + +def forward_step(data_iterator, model, input_tensor): + """Forward step.""" + timers = get_timers() + assert input_tensor is None + + # Get the batch. + timers("batch-generator").start() + ( + images, + labels, + ) = get_batch(data_iterator) + timers("batch-generator").stop() + + # Forward model. lm_labels + logits = model(images).contiguous().float() + loss = F.cross_entropy(logits, labels) + + outputs = torch.argmax(logits, -1) + correct = (outputs == labels).float() + accuracy = torch.mean(correct) + + averaged_loss = average_losses_across_data_parallel_group([loss, accuracy]) + + return loss, {"loss": averaged_loss[0], "accuracy": averaged_loss[1]} + + +def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + + print_rank_0( + "> building train, validation, and test datasets " "for VIT ..." + ) + train_ds, valid_ds = build_train_valid_datasets(data_path=args.data_path) + print_rank_0("> finished creating VIT datasets ...") + + return train_ds, valid_ds, None + + +if __name__ == "__main__": + + pretrain( + train_valid_test_datasets_provider, + model_provider, + forward_step, + args_defaults={'dataloader_type': 'cyclic'} + ) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/requirements.txt b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..44a1d9e30083847bd611fa62abfd63e3b468f1b0 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/requirements.txt @@ -0,0 +1,10 @@ +git+https://github.com/mlcommons/logging.git@2.1.0-rc1 +git+https://github.com/NVIDIA/mlperf-common.git +pybind11 +torch +six +regex +numpy +sentencepiece +torchmetrics>=0.8.0 +einops \ No newline at end of file diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/run_gpt.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/run_gpt.sh new file mode 100644 index 0000000000000000000000000000000000000000..94a2831747a4d405f690dfd38fe2046eaba8b9c2 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/run_gpt.sh @@ -0,0 +1,433 @@ +#!/bin/bash +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company. + +set -ex +function parse_args() +{ + while true; do + case "$1" in + --data-dir ) + DATA_DIR="$2" + shift 2 ;; + --num-nodes ) + NUM_NODES="$2" + shift 2 ;; + --devices-per-node ) + DEVICES_PER_NODE="$2" + shift 2 ;; + --data-parallel-size ) + DP="$2" + shift 2 ;; + --tensor-model-parallel-size ) + TP="$2" + shift 2 ;; + --pipeline-model-parallel-size ) + PP="$2" + shift 2 ;; + --num-layers ) + NUM_LAYERS="$2" + shift 2 ;; + --hidden-size ) + HIDDEN_SIZE="$2" + shift 2 ;; + --num-attention-heads ) + NUM_ATTENTION_HEADS="$2" + shift 2 ;; + --seq-length ) + SEQ_LENGTH="$2" + shift 2 ;; + --dropout ) + DROPOUT="$2" + shift 2 ;; + --micro-batch-size ) + MICRO_BATCH="$2" + shift 2 ;; + --eval-micro-batch-size ) + EVAL_MICRO_BATCH="$2" + shift 2 ;; + --global-batch-size ) + GLOBAL_BATCH="$2" + shift 2 ;; + --train-samples ) + TRAIN_SAMPLES="$2" + shift 2 ;; + --lr ) + LR="$2" + shift 2 ;; + --min-lr ) + MIN_LR="$2" + shift 2 ;; + --lr-decay-samples ) + LR_DECAY_SAMPLES="$2" + shift 2 ;; + --lr-warmup-samples ) + LR_WARMUP_SAMPLES="$2" + shift 2 ;; + --seed ) + SEED="$2" + shift 2 ;; + --eval-iters ) + EVAL_ITERS="$2" + shift 2 ;; + --eval-interval ) + EVAL_INTERVAL="$2" + shift 2 ;; + --exit-interval ) + EXIT_INTERVAL="$2" + shift 2 ;; + --output-dir ) + OUTPUT_DIR="$2" + shift 2 ;; + --start-from-ckpt ) + START_FROM_CKPT="$2" + shift 2 ;; + --universal-ckpt-path ) + UNIVERSAL_CKPT_PATH="$2" + shift 2 ;; + --save-checkpoints ) + SAVE_CKPT="$2" + shift 2 ;; + --save-checkpoints-dir ) + SAVE_CKPT_DIR="$2" + shift 2 ;; + --save-interval ) + SAVE_INTERVAL="$2" + shift 2 ;; + --log-interval ) + LOG_INTERVAL="$2" + shift 2 ;; + --tensorboard-dir ) + TENSORBOARD_DIR="$2" + shift 2 ;; + --kill-switch-file ) + KILL_SWITCH_FILE="$2" + shift 2 ;; + --hosts ) + HOSTS="$2" + shift 2 ;; + --hostsfile ) + HOSTSFILE="$2" + shift 2 ;; + --mllog-output-path ) + MLLOG_FILE="$2" + shift 2 ;; + --eval-loss-exit-value ) + EVAL_LOSS_EXIT_VALUE="$2" + shift 2 ;; + --profile ) + PROFILE_FLAG="--profile $2" + shift 2 ;; + --profile-steps ) + PROFILE_STEPS_FLAG="--profile-steps $2" + shift 2 ;; + -te | --use-fp8-transformer-engine ) + TRANSFORMER_ENGINE_FLAG="--use-hpu-fp8-transformer-engine" + shift 1 ;; + -fsdpa | --use-fused-sdpa ) + USE_FUSED_SDPA="--use-fused-sdpa $2" + shift 2 ;; + -fsdpa-recompute | --use-fused-sdpa-with-recompute ) + USE_FUSED_SDPA_WITH_RECOMPUTE_ARG="$2" + shift 2 ;; + --fp8-measure-interval ) + FP8_MEASURE_INTERVAL="$2" + shift 2 ;; + --use-hpu-graphs ) + HPU_GRAPHS_FLAG="--use-hpu-graphs $2" + shift 2 ;; + --cache-fp8-weight-fwd ) + HPU_GRAPHS_FLAG="--cache-fp8-weight-fwd $2" + shift 2 ;; + --ext-train-iters ) + EXTERNAL_TRAINING_ITERATIONS="$2" + shift 2 ;; + -sp | --sequence-parallel ) + SEQUENCE_PARALLEL="$2" + shift 2 ;; + --device-warmup ) + DEVICE_WARMUP=$2 + shift 2 ;; + --device-warmup-dataset-path ) + WARMUP_DATASET_PATH=$2 + shift 2 ;; + --device-warmup-iterations ) + WARMUP_ITERATIONS=$2 + shift 2 ;; + -- ) + shift + break ;; + * ) + if [[ -n "$1" ]]; then + echo "error: invalid parameter: $1" + exit -1 + fi + break ;; + esac + done + +} + +function generate_hostsfile() +{ + HOSTS_PATH=$1 + HOSTSFILE_PATH=$2 + local num_nodes=${3:-8} + + rm -rf $HOSTSFILE_PATH + touch $HOSTSFILE_PATH + + while IFS= read -r ip; do + echo "$ip slots=$num_nodes" >> $HOSTSFILE_PATH + done < "$HOSTS_PATH" + + echo "hostsfile: " + cat $HOSTSFILE_PATH +} + + +# Default values for arguments, that can be overridden from cmd by parse_args func or env variable +DATA_DIR="/mnt/weka/data/mlperf_datasets/gpt-3/c4_mlperf_19_12_2022/preprocessed_c4_spm" +NUM_NODES=8 +DEVICES_PER_NODE=8 +DP=1 +TP=8 +PP=8 +NUM_LAYERS=96 +HIDDEN_SIZE=12288 +NUM_ATTENTION_HEADS=96 +SEQ_LENGTH=2048 +DROPOUT=0.0 +MICRO_BATCH=2 +EVAL_MICRO_BATCH=8 +GLOBAL_BATCH=2048 +CLIP_GRAD=1.0 +ZERO_STAGE=0 +TRAIN_SAMPLES=84500000 +LR=2.0e-5 +MIN_LR=2.0e-6 +LR_DECAY_SAMPLES=166809600 +LR_WARMUP_SAMPLES=407040 +SEED=${RANDOM} +EVAL_ITERS=-1 +EVAL_INTERVAL=12 +EXIT_INTERVAL=500 +START_FROM_CKPT=true +SAVE_CKPT=true +SAVE_INTERVAL=500 +LOG_INTERVAL=1 +UNIVERSAL_CKPT_PATH="/mnt/weka/data/pytorch/gpt3/gpt3_spmd1x64x24_tpuv4-3072_v84_20221101_universal4000" +OUTPUT_DIR=${OUTPUT_DIR:-"/tmp"} +HOSTS="" +HOSTSFILE="/root/shared/hostsfile" +MLLOG_FILE="/tmp/result_0.txt" +EVAL_LOSS_EXIT_VALUE=2.69 +TRANSFORMER_ENGINE_FLAG="" +USE_FUSED_SDPA="--use-fused-sdpa true" +USE_FUSED_SDPA_WITH_RECOMPUTE_ARG="false" +FP8_MEASURE_INTERVAL=16 +CACHE_FP8_WEIGHT_FWD_FLAG="--cache-fp8-weight-fwd true" +HPU_GRAPHS_FLAG="--use-hpu-graphs false" +ACCUMULATE_GRADS_VIA_HOOKS="true" +EXTERNAL_TRAINING_ITERATIONS=4000 +EXTERNAL_GBS=1536 +SEQUENCE_PARALLEL=true +DEVICE_WARMUP=true +WARMUP_DATASET_PATH="/mnt/weka/data/mlperf_datasets/gpt-3/synthetic_dataset/warmup_dataset" +WARMUP_ITERATIONS=5 +CACHE_FP8_WEIGHT_FLAG="--cache-fp8-weight" + +parse_args "$@" + +if [ -f "$HOSTS" ]; then + generate_hostsfile $HOSTS $HOSTSFILE 8 +fi + +# data and model dir paths +DATA_PATH_6=$DATA_DIR/c4_en_6_c4_spm_text_document +DATA_PATH_7=$DATA_DIR/c4_en_7_c4_spm_text_document +VALID_DATA_PATH=$DATA_DIR/c4_en_validation_c4_spm_text_document +MODEL_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +# allow to override /proc file system in case it is mounted with different name on docker container +PROC_FS=${PROC_FS:-"/proc"} + +# output log path +if [ -z "$OUTPUT_DIR" ]; then + RUNTIME=`date +"%Y%m%d_%H%M"` + OUTPUT_DIR=out/gpt3/ds_z${ZERO_STAGE}_nl${NUM_LAYERS}_hs${HIDDEN_SIZE}_gb${GLOBAL_BATCH}_mb${MICRO_BATCH}_D${DP}_T${TP}_P${PP}_${RUNTIME} +fi +if [ -z "$TENSORBOARD_DIR" ]; then + TENSORBOARD_DIR=$OUTPUT_DIR/tensorboard +fi + +# saving checkpoint args +if [ $SAVE_CKPT = true ] || [ $SAVE_CKPT = 1 ]; then + if [ -z "$SAVE_CKPT_DIR" ]; then + SAVE_CKPT_DIR=$OUTPUT_DIR/checkpoints + fi + SAVE_CKPT_ARGS=" --save $SAVE_CKPT_DIR --save-interval $SAVE_INTERVAL " +fi + +if [ "$DEVICE_WARMUP" == "true" ]; then + DEVICE_WARMUP_ARG=" --device-warmup --warmup-dataset-path $WARMUP_DATASET_PATH --device-warmup-iterations $WARMUP_ITERATIONS" +fi + +# handle kill switch argument +if [ -n "$KILL_SWITCH_FILE" ]; then + KILL_SWITCH_ARG="--kill-switch-path $KILL_SWITCH_FILE" +fi + +# Checkpoint loading configure +LOAD_CHECKPOINT_ARGS="" +if [ $START_FROM_CKPT = true ] || [ $START_FROM_CKPT = 1 ]; then + CHECKPOINTS_BACKUP="$OUTPUT_DIR/../../checkpoints" + if [ "$(ls -A $CHECKPOINTS_BACKUP 2>/dev/null)" ]; then + LOAD_CHECKPOINT_ARGS=" --load $CHECKPOINTS_BACKUP " + else + LOAD_CHECKPOINT_ARGS=" --load $UNIVERSAL_CKPT_PATH --universal-checkpoint --no-load-rng " + fi +fi + +# Sequence parallelism +SEQUENCE_PARALLEL_ARG="--sequence-parallel" +PARTITIONED_MODE="false" +if [ $SEQUENCE_PARALLEL = false ]; then + SEQUENCE_PARALLEL_ARG="" + PARTITIONED_MODE="true" +fi + +# Activation checkpointing or recompute +if [[ $USE_FUSED_SDPA_WITH_RECOMPUTE_ARG == "false" ]]; then + ACTIVATION_CHECKPOINTING="--checkpoint-activations \ + --checkpoint-activations-granularity=selective " +else + ACTIVATION_CHECKPOINTING="" +fi + +mkdir -p ${OUTPUT_DIR} +# create DS config +DS_CONFIG=${OUTPUT_DIR}/ds_config.json +cat << EOT > $DS_CONFIG +{ + "train_batch_size" : $GLOBAL_BATCH, + "train_micro_batch_size_per_gpu": $MICRO_BATCH, + "steps_per_print": $LOG_INTERVAL, + + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "gradient_clipping": $CLIP_GRAD, + "bf16": { + "enabled": true, + "accumulate_grads_via_hooks": $ACCUMULATE_GRADS_VIA_HOOKS + }, + + "wall_clock_breakdown" : false, + + "pipeline": { + "pipe_partitioned": $PARTITIONED_MODE, + "grad_partitioned": $PARTITIONED_MODE + } +} +EOT + +echo "*******************************************************" +echo "Deepspeed config:" +cat $DS_CONFIG +echo "*******************************************************" + +# DeepSpeed args +ds_args="" +ds_args=" --deepspeed ${ds_args}" +ds_args=" --deepspeed_config=$DS_CONFIG ${ds_args}" +ds_args=" --zero-stage=$ZERO_STAGE ${ds_args}" +ds_args=" --deepspeed-activation-checkpointing ${ds_args}" + +CMD="sync && \ + if [ \"\$LOCAL_RANK\" -eq \"0\" ]; then echo 3 > $PROC_FS/sys/vm/drop_caches ; fi && \ + python -u $MODEL_DIR/pretrain_gpt.py \ + --use_hpu \ + --distributed-backend=hccl \ + --tensor-model-parallel-size $TP \ + --pipeline-model-parallel-size $PP \ + --optimizer fusedadamw \ + --num-layers $NUM_LAYERS \ + --hidden-size $HIDDEN_SIZE \ + --num-attention-heads $NUM_ATTENTION_HEADS \ + --seq-length $SEQ_LENGTH \ + --loss-scale 1 \ + --max-position-embeddings $SEQ_LENGTH \ + --micro-batch-size $MICRO_BATCH \ + --eval-micro-batch-size $EVAL_MICRO_BATCH \ + --global-batch-size $GLOBAL_BATCH \ + --lr $LR \ + --min-lr $MIN_LR \ + --lr-decay-style cosine \ + --train-samples $TRAIN_SAMPLES \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --log-interval $LOG_INTERVAL \ + --train-data-path 0.5 $DATA_PATH_6 0.5 $DATA_PATH_7 \ + --valid-data-path 1.0 $VALID_DATA_PATH \ + --eval-iters $EVAL_ITERS \ + --eval-interval $EVAL_INTERVAL \ + --vocab-file $DATA_DIR/vocab.json \ + --merge-file $DATA_DIR/merges.txt \ + --split 100,0,0 \ + --clip-grad $CLIP_GRAD \ + --attention-dropout $DROPOUT \ + --hidden-dropout $DROPOUT \ + --no-query-key-layer-scaling \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --weight-decay 0.1 \ + --init-method-std 0.006 \ + --seed $SEED \ + --bf16 \ + $ACTIVATION_CHECKPOINTING \ + --tensorboard-dir $TENSORBOARD_DIR \ + --log-validation-ppl-to-tensorboard \ + --no-bias-gelu-fusion \ + --no-masked-softmax-fusion \ + --no-bias-dropout-fusion \ + --mask-tensor-adding \ + --fix-position-emb-redundant-alloc \ + --no-scaled-init \ + --no-seq-len-plus-one-tokens \ + --apply-layernorm-weight-plus-one \ + --do-layernorm-bias-weight-decay \ + --exit-interval $EXIT_INTERVAL \ + --DDP-impl local \ + --mllog-output-path $MLLOG_FILE \ + --eval-loss-exit-value $EVAL_LOSS_EXIT_VALUE \ + --ext-lr-steps $(($EXTERNAL_TRAINING_ITERATIONS*$EXTERNAL_GBS)) \ + $LOAD_CHECKPOINT_ARGS \ + $SAVE_CKPT_ARGS \ + $KILL_SWITCH_ARG \ + $TRANSFORMER_ENGINE_FLAG \ + $USE_FUSED_SDPA \ + $DEVICE_WARMUP_ARG \ + --hpu-fp8-measure-interval $FP8_MEASURE_INTERVAL \ + $CACHE_FP8_WEIGHT_FWD_FLAG \ + $HPU_GRAPHS_FLAG \ + $CACHE_FP8_WEIGHT_FLAG \ + $PROFILE_FLAG \ + $PROFILE_STEPS_FLAG \ + $SEQUENCE_PARALLEL_ARG \ + $ds_args" + + +# configure multinode +if [ "$NUM_NODES" -ne "1" -a -f "$HOSTSFILE" ] +then + MULTINODE_CMD="--force_multi \ + --hostfile=$HOSTSFILE \ + --master_addr $(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p) " +fi + +# run gpt3 +deepspeed --num_nodes ${NUM_NODES} \ + --num_gpus ${DEVICES_PER_NODE} \ + --no_local_rank \ + --no_python \ + $MULTINODE_CMD \ + /usr/bin/bash -c "$CMD" diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/setup.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..668a6b6ccead4824d48929f80698a024efbd2484 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/setup.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Setup for pip package.""" + +import os +import sys +import setuptools + +if sys.version_info < (3,): + raise Exception("Python 2 is not supported by Megatron.") + +from megatron.package_info import ( + __description__, + __contact_names__, + __url__, + __download_url__, + __keywords__, + __license__, + __package_name__, + __version__, +) + +with open("README.md", "r") as fh: + long_description = fh.read() + +############################################################################### +# Dependency Loading # +# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # + + +def req_file(filename): + with open(filename) as f: + content = f.readlines() + return [x.strip() for x in content] + + +install_requires = req_file("requirements.txt") + +setuptools.setup( + name=__package_name__, + # Versions should comply with PEP440. For a discussion on single-sourcing + # the version across setup.py and the project code, see + # https://packaging.python.org/en/latest/single_source_version.html + version=__version__, + description=__description__, + long_description=long_description, + long_description_content_type="text/markdown", + # The project's main homepage. + url=__url__, + author=__contact_names__, + maintainer=__contact_names__, + # The licence under which the project is released + license=__license__, + classifiers=[ + 'Intended Audience :: Developers', + 'Intended Audience :: Science/Research', + 'Intended Audience :: Information Technology', + # Indicate what your project relates to + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Software Development :: Libraries :: Python Modules', + # Supported python versions + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + # Additional Setting + 'Environment :: Console', + 'Natural Language :: English', + 'Operating System :: OS Independent', + ], + python_requires='>=3.6', + packages=setuptools.find_packages(), + install_requires=install_requires, + # Add in any packaged data. + include_package_data=True, + zip_safe=False, + # PyPI package information. + keywords=__keywords__ +) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/__init__.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/convert_paxml_optimizer.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/convert_paxml_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..367a56bd67e609127a6460653a13c40df935a5b1 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/convert_paxml_optimizer.py @@ -0,0 +1,426 @@ +# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company +############################################################################### +"""Script to load layer(s) of the LLM checkpoint using TensorStore. +More details about TensorStore, please visit +https://github.com/google/tensorstore . +""" + +import argparse +import tensorstore as ts +import glob +import json +import torch +import os +import sys +import multiprocessing +from datetime import datetime + +def get_numpy_array(filename): + spec = {'driver': 'zarr', 'metadata_key': '.zarray', 'kvstore': {}} + spec['kvstore'] = { + 'driver': 'file', + 'path': filename, + } + + t = ts.open(ts.Spec(spec), open=True).result() + t_v = t.read().result() + return t_v + +def get_torch_tensor(filename, dtype): + array = get_numpy_array(filename) + array_torch = torch.from_numpy(array) + array_torch = array_torch.to(dtype) + return array_torch + +def get_layer_info(output_dir, lyr_num, nv_name): + lyr_dir = os.path.join(output_dir, F"layer_{str(lyr_num)}") + lyr_name = "language_model.encoder.layers."+str(lyr_num)+"."+nv_name + return lyr_dir, lyr_name + +def store_tensor(save_tensor, lyr_dir, lyr_name, params_dict): + optim_state = {} + optim_state["state"] = {} + optim_state["state"]["exp_avg"] = save_tensor["m"] + optim_state["state"]["exp_avg_sq"] = save_tensor["v"] + optim_state["fp32_from_fp16_params"] = save_tensor["w"] + if params_dict is not None: + optim_state["param_groups"] = params_dict + torch.save(optim_state, os.path.join(lyr_dir, lyr_name + ".pt")) + +def copy_layers(args, nv_name, g_name, prefix, params_dict): + + array_torch = {} + g_name_path = os.path.join(args.google_ckpts, prefix + ".m." + g_name) + array_torch["m"] = get_torch_tensor(g_name_path, args.dtype) + g_name_path = os.path.join(args.google_ckpts, prefix + ".v." + g_name) + array_torch["v"] = get_torch_tensor(g_name_path, args.dtype) + g_name_path = os.path.join(args.google_ckpts, "mdl_vars." + g_name) + array_torch["w"] = get_torch_tensor(g_name_path, args.dtype) + + print(F"G Name: {g_name}, shape: {array_torch['m'].shape}", flush=True) + save_tensor = {} + if nv_name == "language_model.embedding.position_embeddings.weight": + start_idx = 0 + end_idx = 2048 + for key in list(array_torch.keys()): + save_tensor[key] = array_torch[key][start_idx: end_idx, :].contiguous().detach().clone() + print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True) + store_tensor(save_tensor, args.output_dir, nv_name, params_dict) + elif nv_name == "language_model.embedding.word_embeddings.weight": + for key in list(array_torch.keys()): + save_tensor[key] = array_torch[key].transpose(0, 1).contiguous().detach().clone() + print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True) + store_tensor(save_tensor, args.output_dir, nv_name, params_dict) + store_tensor(save_tensor, args.output_dir, "word_embeddings.weight", params_dict) + else: + for key in list(array_torch.keys()): + save_tensor[key] = array_torch[key].detach().clone() + print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True) + store_tensor(save_tensor, args.output_dir, nv_name, params_dict) + del save_tensor + del array_torch + +def split_encoder_layers(args, nv_name, g_name, prefix, params_dict): + array_torch = {} + g_name_path = os.path.join(args.google_ckpts, prefix + ".m." + g_name) + array_torch["m"] = get_torch_tensor(g_name_path, args.dtype) + g_name_path = os.path.join(args.google_ckpts, prefix + ".v." + g_name) + array_torch["v"] = get_torch_tensor(g_name_path, args.dtype) + g_name_path = os.path.join(args.google_ckpts, "mdl_vars." + g_name) + array_torch["w"] = get_torch_tensor(g_name_path, args.dtype) + print(F"G Name: {g_name}, shape: {array_torch['m'].shape}", flush=True) + save_tensor = {} + if ( + nv_name == "mlp.dense_4h_to_h.bias" + or nv_name == "post_attention_layernorm.bias" + or nv_name == "post_attention_layernorm.weight" + or nv_name == "input_layernorm.bias" + or nv_name == "input_layernorm.weight" + or nv_name == "self_attention.dense.bias" + or nv_name == "mlp.dense_h_to_4h.bias" + or nv_name == "self_attention.dense.weight" + ): + print(F"1st Check: {nv_name}") + for lyr_num in range(args.num_layers): + print("layer_num=",lyr_num) + lyr_dir, lyr_name = get_layer_info(args.output_dir, lyr_num, nv_name) + for key in list(array_torch.keys()): + save_tensor[key] = array_torch[key][lyr_num].contiguous().detach().clone() + if lyr_num == (args.num_layers // 2): + print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True) + store_tensor(save_tensor, lyr_dir, lyr_name, params_dict) + save_tensor = {} + + elif ( + nv_name == "mlp.dense_h_to_4h.weight" + or nv_name == "mlp.dense_4h_to_h.weight" + ): + print(F"2nd Check: {nv_name}") + for lyr_num in range(args.num_layers): + print("layer_num=",lyr_num) + lyr_dir, lyr_name = get_layer_info(args.output_dir, lyr_num, nv_name) + for key in list(array_torch.keys()): + save_tensor[key] = array_torch[key][lyr_num].transpose(0, 1).contiguous().detach().clone() + #save_tensor = save_tensor.transpose(0, 1).clone() + if lyr_num == (args.num_layers // 2): + print(F"NV Name: {nv_name}, shape: {save_tensor['v'].shape}", flush=True) + store_tensor(save_tensor, lyr_dir, lyr_name, params_dict) + save_tensor = {} + elif nv_name == "self_attention.query_key_value.weight": + print(F"3nd Check: {nv_name}") + # nv shape [4608, 12288] => 4608 = 12 (heads) * 3 (qkv) * 128 (hidden_size / heads) + # google shape [96, 3, 12288, 96, 128] + for lyr_num in range(args.num_layers): + print("layer_num=",lyr_num) + lyr_dir, lyr_name = get_layer_info(args.output_dir, lyr_num, nv_name) + for key in list(array_torch.keys()): + save_tensor[key] = array_torch[key][lyr_num].permute(2, 0, 3, 1).contiguous().detach().clone() + #save_tensor = save_tensor.permute(2, 0, 3, 1).contiguous().clone() + if lyr_num == (args.num_layers // 2): + print(F"NV Name: {nv_name}, shape: {save_tensor['w'].shape}", flush=True) + store_tensor(save_tensor, lyr_dir, lyr_name, params_dict) + save_tensor = {} + elif nv_name == "self_attention.query_key_value.bias": + print(F"4rd Check: {nv_name}") + # nv shape [4608] => 4608 = 12 (heads) * 3 (qkv) * 128 (hidden_size / heads) + # google shape [96, 3, 96, 128] + for lyr_num in range(args.num_layers): + print("layer_num=",lyr_num) + lyr_dir, lyr_name = get_layer_info(args.output_dir, lyr_num, nv_name) + for key in list(array_torch.keys()): + save_tensor[key] = array_torch[key][lyr_num].permute(1, 0, 2).contiguous().detach().clone() + #save_tensor = save_tensor.permute(1, 0, 2).contiguous().clone() + if lyr_num == (args.num_layers // 2): + print(F"NV Name: {nv_name}, shape: {save_tensor['m'].shape}", flush=True) + store_tensor(save_tensor, lyr_dir, lyr_name, params_dict) + save_tensor = {} + else: + print(F"Not a valid layer name: {nv_name}", flush=True) + sys.exit() + del array_torch + + +def arrange_google_ckpts(args, prefix1, prefix2): + + output_dir = args.output_dir + num_layers = args.num_layers + + params_dict = None + if args.params_file is not None: + with open(args.params_file, 'r') as f: + params_dict = json.load(f) + else: + print(F"For Megatron-LM Optimizer to get the right optimizer params, provide params_file json", flush=True) + + if args.dtype == "bf16": + args.dtype = torch.bfloat16 + else: + args.dtype = torch.float + + for lyr_num in range(num_layers): + pp_id_dir = os.path.join(output_dir, f"layer_{str(lyr_num)}") + os.makedirs(pp_id_dir, exist_ok=True) + + #layers that are not part of encoder blocks. + torch.multiprocessing.set_start_method("spawn") + torch.multiprocessing.set_sharing_strategy("file_system") + + + nv_g_names_pairs = [ + ("language_model.embedding.word_embeddings.weight", "params.lm.softmax.logits_ffn.linear.w"), + ("language_model.embedding.position_embeddings.weight", "params.lm.position_emb.emb_var"), + ("language_model.encoder.final_layernorm.weight", "params.lm.final_ln.scale"), + ("language_model.encoder.final_layernorm.bias", "params.lm.final_ln.bias"), + ] + pool = multiprocessing.Pool(args.pool) + pool.starmap( + copy_layers, + [ + ( + args, + nv_name, + g_name, + prefix1, + params_dict, + ) + for (nv_name, g_name) in nv_g_names_pairs + ], + ) + pool.close() + pool.join() + + + + nv_g_names_pairs1 = [ + ("mlp.dense_4h_to_h.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer2.bias.b"), + ] + + pool = multiprocessing.Pool(args.pool) + pool.starmap( + split_encoder_layers, + [ + ( + args, + nv_name, + g_name, + prefix2, + params_dict, + ) + for (nv_name, g_name) in nv_g_names_pairs1 + ], + ) + pool.close() + pool.join() + + nv_g_names_pairs2 = [ + ("post_attention_layernorm.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.layer_norm.bias"), + ("post_attention_layernorm.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.layer_norm.scale"), + ("input_layernorm.bias", "params.lm.transformer.repeat.sub.x_layers_0.layer_norm.bias"), + ("input_layernorm.weight", "params.lm.transformer.repeat.sub.x_layers_0.layer_norm.scale"), + ("self_attention.dense.bias", "params.lm.transformer.repeat.sub.x_layers_0.self_attention.post.b"), + ] + + pool = multiprocessing.Pool(args.pool) + pool.starmap( + split_encoder_layers, + [ + ( + args, + nv_name, + g_name, + prefix2, + params_dict, + ) + for (nv_name, g_name) in nv_g_names_pairs2 + ], + ) + pool.close() + pool.join() + + nv_g_names_pairs3 = [ + ("mlp.dense_h_to_4h.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1.bias.b"), + ] + + pool = multiprocessing.Pool(args.pool) + pool.starmap( + split_encoder_layers, + [ + ( + args, + nv_name, + g_name, + prefix2, + params_dict, + ) + for (nv_name, g_name) in nv_g_names_pairs3 + ], + ) + pool.close() + pool.join() + + nv_g_names_pairs4 = [ + ("mlp.dense_h_to_4h.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1.linear.w"), + ] + + pool = multiprocessing.Pool(args.pool) + pool.starmap( + split_encoder_layers, + [ + ( + args, + nv_name, + g_name, + prefix2, + params_dict, + ) + for (nv_name, g_name) in nv_g_names_pairs4 + ], + ) + pool.close() + pool.join() + + nv_g_names_pairs5 = [ + ("mlp.dense_4h_to_h.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer2.linear.w"), + ("self_attention.dense.weight", "params.lm.transformer.repeat.sub.x_layers_0.self_attention.post.w"), + ("self_attention.query_key_value.weight", + "params.lm.transformer.repeat.sub.x_layers_0.self_attention.combined_qkv.w"), + ("self_attention.query_key_value.bias", + "params.lm.transformer.repeat.sub.x_layers_0.self_attention.combined_qkv.b"), + ] + + pool = multiprocessing.Pool(args.pool) + pool.starmap( + split_encoder_layers, + [ + ( + args, + nv_name, + g_name, + prefix2, + params_dict, + ) + for (nv_name, g_name) in nv_g_names_pairs5 + ], + ) + pool.close() + pool.join() + + exit(0) + + nv_g_names_pairs = [ + ("mlp.dense_4h_to_h.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer2.bias.b"), + ("post_attention_layernorm.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.layer_norm.bias"), + ("post_attention_layernorm.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.layer_norm.scale"), + ("input_layernorm.bias", "params.lm.transformer.repeat.sub.x_layers_0.layer_norm.bias"), + ("input_layernorm.weight", "params.lm.transformer.repeat.sub.x_layers_0.layer_norm.scale"), + ("self_attention.dense.bias", "params.lm.transformer.repeat.sub.x_layers_0.self_attention.post.b"), + ("mlp.dense_h_to_4h.bias", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1.bias.b"), + ("mlp.dense_h_to_4h.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1.linear.w"), + ("mlp.dense_4h_to_h.weight", "params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer2.linear.w"), + ("self_attention.dense.weight", "params.lm.transformer.repeat.sub.x_layers_0.self_attention.post.w"), + ("self_attention.query_key_value.weight", + "params.lm.transformer.repeat.sub.x_layers_0.self_attention.combined_qkv.w"), + ("self_attention.query_key_value.bias", + "params.lm.transformer.repeat.sub.x_layers_0.self_attention.combined_qkv.b"), + ] + + pool = multiprocessing.Pool(args.pool) + pool.starmap( + split_encoder_layers, + [ + ( + args, + nv_name, + g_name, + prefix2, + params_dict, + ) + for (nv_name, g_name) in nv_g_names_pairs + ], + ) + pool.close() + pool.join() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + '--google_ckpts', "-gckpt", + type=str, + default='/workspace/data/checkpoint_00001300', + help='Google Checkpoint directory') + parser.add_argument( + '--output_dir', "-o", + type=str, + default='google_to_torch_output', + help='Output directory') + parser.add_argument( + '--dtype', "-dt", + type=str, + default="float", + help='datatype') + parser.add_argument( + '--num_layers', "-nl", + type=int, + default=96, + help='number of encoder layers') + parser.add_argument( + '--params_file', "-pl", + type=str, + default=None, + help='Json File for Param Groups') + parser.add_argument( + '--pool', "-p", + type=int, + default=4, + help='parallel processes') + + args = parser.parse_args() + print("\n=============== Argument ===============") + for key in vars(args): + print(f"{key}: {vars(args)[key]}") + print("========================================") + + param1 = "opt_states_0.no_prefix_2" #Assij + param2 = "opt_states_0.p#96#i-1_2" + + + start_time = datetime.now() + arrange_google_ckpts(args, param1, param2) + stop_time = datetime.now() + run_time = stop_time - start_time + print(f"[INFO] Spend {run_time} (h:m:s) to convert the model") \ No newline at end of file diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_checkpoint.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..decd98c359097ea8d84fa4c56fc1c54282469858 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_checkpoint.py @@ -0,0 +1,196 @@ +import os +from typing import Dict +import torch + +ZERO_FILE_PREFIX = 'zero_pp_rank_' +LAYER_FILE_PREFIX = 'layer_' +MP_RANK_FILE_PREFIX = 'mp_rank_' +EMBEDDING_LAYER_INDEX = 0 +FINAL_LAYER_NORM_INDEX = -1 +ARGS_KEY = 'args' +ITERATION_KEY = 'iteration' +SEQUENTIAL_LAYERS = [ + 'input_layernorm.weight', 'input_layernorm.bias', + 'self_attention.dense.bias', + 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', + 'mlp.dense_4h_to_h.bias', + 'position_embeddings.weight' +] + +LAYER_CONCAT_DIM = { + 'self_attention.dense.weight': 1, + 'mlp.dense_4h_to_h.weight': 1 +} + +class DeepSpeedCheckpoint(object): + def __init__(self, dir, tp_degree=None, pp_degree=None, no_pp=False): + self.dir = dir + self.no_pp = no_pp + self.file_list = self._get_files(dir) + self.zero_files = self._get_files_with_prefix(self.file_list, ZERO_FILE_PREFIX) + self.layer_files = self._get_files_with_prefix(self.file_list, LAYER_FILE_PREFIX) + self.mp_rank_files = self._get_files_with_prefix(self.file_list, MP_RANK_FILE_PREFIX) + self.layer_keys = self._get_layer_keys() + self.layer_count = len(self.layer_keys) + if not self.no_pp: + self.original_tp_degree = len(self._get_files_with_prefix(self.layer_files, f'{LAYER_FILE_PREFIX}01')) + self.original_pp_degree = len(self.mp_rank_files) // self.original_tp_degree + else: + self.original_tp_degree = len(self.mp_rank_files) + self.original_pp_degree = 1 + self.dp_degree = len(self.zero_files) // (self.original_pp_degree * self.original_tp_degree) + self.tp_degree = self.original_tp_degree if tp_degree is None else tp_degree + self.pp_degree = self.original_pp_degree if pp_degree is None else pp_degree + self.global_state = {} + + self._sanity_check() + self.pp_to_transformer_map = self._build_pp_transformer_map() + self.transformer_file_map = self._build_transformer_file_map() + if not self.no_pp: + self.tp_to_embedding_map = self._build_tp_other_layer_map(EMBEDDING_LAYER_INDEX) + self.tp_to_final_norm_map = self._build_tp_other_layer_map(FINAL_LAYER_NORM_INDEX) + self._build_global_state() + + + + def show_tp_embedding_map(self): + self._dump_mapping(self.tp_to_embedding_map, 'tp_to_embedding_layers') + + def show_tp_final_norm_map(self): + self._dump_mapping(self.tp_to_final_norm_map, 'tp_to_final_norm_layers') + + def show_pp_tranformer_map(self): + self._dump_mapping(self.pp_to_transformer_map, 'pp_to_tranformer_layers') + + def show_transformer_file_map(self): + self._dump_mapping(self.transformer_file_map, 'rank_to_tranformer_files') + + def _build_global_state(self): + sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu')) + self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0) + self.global_state[ARGS_KEY] = sd.get(ARGS_KEY, None) + + def get_iteration(self): + if not ITERATION_KEY in self.global_state: + sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu')) + self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0) + + return self.global_state[ITERATION_KEY] + + def get_embedding_state(self, tp_index: int) -> Dict: + assert tp_index in self.tp_to_embedding_map.keys() + sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in self.tp_to_embedding_map[tp_index]] + sd = self._merge_state_dicts(sd_list) + return sd + + def get_args(self): + if not ARGS_KEY in self.global_state: + sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu')) + self.global_state[ARGS_KEY] = sd.get(ARGS_KEY, None) + + return self.global_state[ARGS_KEY] + + + def get_transformer_state(self, tp_index: int, pp_index: int) -> list: + assert tp_index < self.tp_degree + assert pp_index < self.pp_degree + t_list = [] + for fname_list in self.transformer_file_map[(tp_index, pp_index)]: + sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list] + sd = self._merge_state_dicts(sd_list) + t_list.append(sd) + return t_list + + def get_final_norm_state(self, tp_index:int) -> Dict: + assert tp_index in self.tp_to_final_norm_map.keys() + sd = torch.load(self.tp_to_final_norm_map[tp_index][0], map_location=torch.device('cpu')) + return sd + + def _build_tp_other_layer_map(self, layer_index:int): + assert layer_index < len(self.layer_files) + layer_files = self._get_files_with_prefix(self.layer_files, self.layer_keys[layer_index]) + layer_file_partitions = self._partition_data(layer_files, self.tp_degree) + data_map = {i:flist for i, flist in enumerate(layer_file_partitions)} + return data_map + + def _build_pp_transformer_map(self): + data_map = {} + transformer_layers = self.layer_keys[1:-1] + layers_per_pp = len(transformer_layers) // self.pp_degree + data_map = {i:transformer_layers[i*layers_per_pp:(i+1)*layers_per_pp] for i in range(0, self.pp_degree)} + return data_map + + def _dump_mapping(self, data_map, map_tag = None): + if map_tag is not None: + print(f'Dump mapping: {map_tag}') + for k, v in data_map.items(): + print(f'{k} = {v}') + + def _build_transformer_file_map(self): + transformer_layer_keys = self.layer_keys[1:-1] + file_map = {} + layers_per_pp = len(transformer_layer_keys) // self.pp_degree + for key_index, layer_key in enumerate(transformer_layer_keys): + pp_index = key_index // layers_per_pp + layer_files = self._get_files_with_prefix(self.layer_files, layer_key) + layer_file_partitions = self._partition_data(layer_files, self.tp_degree) + for tp_index in range(self.tp_degree): + map_key = (tp_index, pp_index) + if not map_key in file_map.keys(): + file_map[map_key] = [] + file_map[map_key].append(layer_file_partitions[tp_index]) + + return file_map + + def _sanity_check(self): + assert len(self.mp_rank_files) % self.tp_degree == 0 + assert len(self.zero_files) % (self.pp_degree * self.tp_degree) == 0 + if not self.no_pp: + assert len(self.layer_keys) > 2 + assert (len(self.layer_keys) - 2) % self.pp_degree == 0 + + def _get_files_with_prefix(self, all_files, prefix): + file_list = [] + for file_path in all_files: + _, fname = os.path.split(file_path) + if fname.startswith(prefix): + file_list.append(file_path) + + return sorted(file_list) + + def validate_files(self): + for file in self.file_list: + if not os.path.isfile(file): + print(f'Error: {file} is not existent') + + def _get_files(self, dir): + file_list = [] + for root, dirs, files in os.walk(dir): + for file in files: + file_list.append(os.path.join(root, file)) + return file_list + + def _get_layer_keys(self): + key_set = set() + key_len = len(LAYER_FILE_PREFIX) + 2 + for file_path in self.layer_files: + _, fname = os.path.split(file_path) + key_set.add(fname[:key_len]) + return sorted(list(key_set)) + + def _partition_data(self, data_list, num_partitions): + num_elems = len(data_list) + assert num_elems % num_partitions == 0 + partition_size = num_elems // num_partitions + partitions_list = [data_list[i:i+partition_size] for i in range(0, num_elems, partition_size)] + return partitions_list + + def _merge_state_dicts(self, sd_list): + merged_sd = {} + for key in sd_list[0].keys(): + if not key in SEQUENTIAL_LAYERS: + cat_dim = LAYER_CONCAT_DIM.get(key, 0) + merged_sd[key] = torch.cat([sd[key] for sd in sd_list], dim=cat_dim) + else: + merged_sd[key] = sd_list[0][key] + return merged_sd diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_megatron.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_megatron.py new file mode 100644 index 0000000000000000000000000000000000000000..017036af470c9be4a5ad74478d2c631642432fcb --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_megatron.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python + +import argparse +import os +import torch +from collections import OrderedDict +from .deepspeed_checkpoint import ARGS_KEY, DeepSpeedCheckpoint + +MODEL_KEY = 'model' +ARGS_KEY = 'args' +LANGUGAGE_MODEL_KEY = 'language_model' +EMBEDDING_KEY = 'embedding' +ENCODER_KEY = 'encoder' +WORD_EMBEDDINGS_FOR_HEAD_KEY = 'word_embeddings_for_head' +WORD_EMBEDDINGS_KEY = 'word_embeddings' +FINAL_LAYER_NORM_KEY ='final_layernorm' +CHECKPOINT_VERSION_KEY = 'checkpoint_version' +CHECKPOINT_VERSION_VALUE = 3.0 +ITERATION_KEY = 'iteration' + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument('--input_folder', default=None, type=str, help='Input DeepSpeed Checkpoint folder') + parser.add_argument('--output_folder', default=None, type=str, help='Output Megatron checkpoint folder') + parser.add_argument('--target_tp', default=1, type=int, help='Target TP degree') + parser.add_argument('--target_pp', default=1, type=int, help='Target PP degree') + parser.add_argument('--for_release', action='store_true', help='Convert for release purpose, reset some (progress) counters.') + args = parser.parse_args() + print(f'args = {args}') + return args + + +def _convert_ds_transformer_state(sd_list): + new_sd = OrderedDict() + for i, sd in enumerate(sd_list): + for key, value in sd.items(): + new_key = f'layers.{i}.{key}' + new_sd[new_key] = value + + return new_sd + +def _create_checkpoint_paths(base_folder, iteration, tp_degree, pp_degree): + path_list = [] + iter_folder = f'iter_{iteration:07d}' + for i in range(0, tp_degree): + path_list.append([]) + for j in range(0, pp_degree): + rank_folder = f'mp_rank_{i:02d}' if pp_degree == 1 else f'mp_rank_{i:02d}_{j:03d}' + ckpt_path = os.path.join(rank_folder, 'model_optim_rng.pt') + path_list[i].append(os.path.join(base_folder, iter_folder, ckpt_path)) + + return path_list + + +def _create_megatron_dict(): + language_model_dict = { + EMBEDDING_KEY: {}, + ENCODER_KEY: {} + } + megatron_dict = { + MODEL_KEY: {LANGUGAGE_MODEL_KEY: language_model_dict}, + CHECKPOINT_VERSION_KEY: CHECKPOINT_VERSION_VALUE + } + return megatron_dict + + +def _save_checkpoint(file_path, chkpt_sd): + dir, _ = os.path.split(file_path) + os.makedirs(dir, exist_ok=True) + torch.save(chkpt_sd, file_path) + + +def _renest_sd(sd): + new_sd = OrderedDict() + for key, value in sd.items(): + a, b = key.split('.') + new_sd[a] = {b: value} + return new_sd + + +def _create_rank_checkpoint(ds_checkpoint, checkpoint_path, tp_index, pp_index, for_release=False): + meg_encoder_sd = OrderedDict() + meg_embedding_sd = OrderedDict() + meg_embedding_for_head_sd = OrderedDict() + + transformer_sd = ds_checkpoint.get_transformer_state(tp_index, pp_index) + meg_encoder_sd.update(_convert_ds_transformer_state(transformer_sd)) + + if pp_index in [0, ds_checkpoint.pp_degree - 1]: + embedding_sd = ds_checkpoint.get_embedding_state(tp_index) + nested_embedding_sd = _renest_sd(embedding_sd) + if pp_index == 0: + meg_embedding_sd.update(nested_embedding_sd) + + if pp_index == ds_checkpoint.pp_degree -1: + for key, value in embedding_sd.items(): + if key.startswith(WORD_EMBEDDINGS_KEY): + fields = key.split('.') + new_fields = fields[1:] + new_key = '.'.join(new_fields) + meg_embedding_for_head_sd[new_key] = value + + final_norm_sd = ds_checkpoint.get_final_norm_state(tp_index) + new_final_norm_sd = {f'{FINAL_LAYER_NORM_KEY}.{key}': value for key, value in final_norm_sd.items()} + meg_encoder_sd.update(new_final_norm_sd) + + checkpoint_sd = _create_megatron_dict() + + iteration = ds_checkpoint.get_iteration() + checkpoint_sd[ITERATION_KEY] = iteration + if pp_index == 0: + checkpoint_sd[MODEL_KEY][LANGUGAGE_MODEL_KEY][EMBEDDING_KEY] = meg_embedding_sd + checkpoint_sd[MODEL_KEY][LANGUGAGE_MODEL_KEY][ENCODER_KEY] = meg_encoder_sd + if pp_index == ds_checkpoint.pp_degree -1: + checkpoint_sd[MODEL_KEY][WORD_EMBEDDINGS_FOR_HEAD_KEY] = meg_embedding_for_head_sd + + checkpoint_sd[ARGS_KEY] = ds_checkpoint.get_args() + # Adjust specific fields + checkpoint_sd[ARGS_KEY].tensor_model_parallel_size = ds_checkpoint.tp_degree + checkpoint_sd[ARGS_KEY].pipeline_model_parallel_size = ds_checkpoint.pp_degree + if for_release: + checkpoint_sd[ARGS_KEY].consumed_train_samples = 0 + checkpoint_sd[ARGS_KEY].consumed_valid_samples = 0 + + return checkpoint_sd + + +def _create_latest_file(base_folder, iteration): + file_path = os.path.join(base_folder, 'latest_checkpointed_iteration.txt') + os.makedirs(base_folder, exist_ok=True) + with open(file_path, 'w') as f: + f.write(str(iteration)) + +def main(): + print(f'Convert DeepSpeed Checkpoint to Megatron Checkpoint') + + args = parse_arguments() + print(f'Converting DeepSpeed checkpoint in {args.input_folder} to Megatron checkpoint in {args.output_folder}') + + ds_checkpoint = DeepSpeedCheckpoint(args.input_folder, args.target_tp, args.target_pp) + iteration = ds_checkpoint.get_iteration() + _create_latest_file(args.output_folder, iteration) + checkpoint_paths = _create_checkpoint_paths(args.output_folder, iteration, ds_checkpoint.tp_degree, ds_checkpoint.pp_degree) + for i in range(0, ds_checkpoint.tp_degree): + for j in range(0, ds_checkpoint.pp_degree): + sd = _create_rank_checkpoint(ds_checkpoint, i, j, args.for_release) + _save_checkpoint(checkpoint_paths[i][j], sd) + +if __name__ == "__main__": + main() diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_transformers.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_transformers.py new file mode 100644 index 0000000000000000000000000000000000000000..69375642c2b4edaccc51de22b8f108060e0bdc25 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/deepspeed_to_transformers.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +import os +import torch +import json + +from deepspeed_checkpoint import DeepSpeedCheckpoint +from deepspeed_to_megatron import _create_rank_checkpoint, parse_arguments + +# the import was tested to work with this version +# https://github.com/huggingface/transformers/commit/0af901e83 if it diverges we may consider +# copying that version here instead +from transformers.models.megatron_gpt2.convert_megatron_gpt2_checkpoint import convert_megatron_checkpoint +from transformers import GPT2Config + +def main(): + + # this first part comes mainly from deepspeed_to_megatron.main + args = parse_arguments() + print(f'Converting DeepSpeed checkpoint in {args.input_folder} to HF Transformers checkpoint in {args.output_folder}') + + ds_checkpoint = DeepSpeedCheckpoint(args.input_folder, args.target_tp, args.target_pp) + iteration = ds_checkpoint.get_iteration() + input_state_dict = _create_rank_checkpoint(ds_checkpoint, 0, 0, args.for_release) + + # the 2nd part comes from transformers.models.megatron_gpt2.convert_megatron_gpt2_checkpoint.main + # Spell out all parameters in case the defaults change. + config = GPT2Config( + vocab_size=50257, + n_positions=1024, + n_ctx=1024, + n_embd=1024, + n_layer=24, + n_head=16, + n_inner=4096, + activation_function="gelu", # used to be "gelu_new" in earlier versions + resid_pdrop=0.1, + embd_pdrop=0.1, + attn_pdrop=0.1, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + summary_type="cls_index", + summary_use_proj=True, + summary_activation=None, + summary_proj_to_labels=True, + summary_first_dropout=0.1, + scale_attn_weights=True, + gradient_checkpointing=False, + use_cache=True, + bos_token_id=50256, + eos_token_id=50256, + ) + + # Convert. + print("Converting to HF Checkpoint") + output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config) + + basename = args.output_folder + os.makedirs(basename, exist_ok=True) + + # Print the structure of converted state dict. + #if args.print_checkpoint_structure: + # recursive_print(None, output_state_dict) + + # Store the config to file. + output_config_file = os.path.join(basename, "config.json") + output_config = config.to_dict() + output_config["architectures"] = ["GPT2LMHeadModel"] + output_config["model_type"] = "gpt2" + print(f'Saving config to "{output_config_file}"') + with open(output_config_file, "w") as f: + json.dump(output_config, f) + + # Store the state_dict to file. + output_checkpoint_file = os.path.join(basename, "pytorch_model.bin") + print(f'Saving checkpoint to "{output_checkpoint_file}"') + torch.save(output_state_dict, output_checkpoint_file) + + print("Now add tokenizer files and upload to the hub") + + +if __name__ == "__main__": + main() diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/ds_to_universal.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/ds_to_universal.py new file mode 100644 index 0000000000000000000000000000000000000000..e0467c91de7510e7d7a540d656f627fffaee777b --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/ds_to_universal.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python + +from collections import OrderedDict +from functools import partial +import argparse +import glob +import itertools +import multiprocessing +import os +import re +import shutil +import torch +import tqdm + +from deepspeed.checkpoint import DeepSpeedCheckpoint + +MODEL_KEY = 'model' +ARGS_KEY = 'args' +LANGUAGE_MODEL_KEY = 'language_model' +EMBEDDING_KEY = 'embedding' +ENCODER_KEY = 'encoder' +WORD_EMBEDDINGS_FOR_HEAD_KEY = 'word_embeddings_for_head' +WORD_EMBEDDINGS_KEY = 'word_embeddings' +FINAL_LAYER_NORM_KEY = 'final_layernorm' +CHECKPOINT_VERSION_KEY = 'checkpoint_version' +CHECKPOINT_VERSION_VALUE = 3.0 +ITERATION_KEY = 'iteration' +ORIGINAL_VOCAB_SIZE = 'original_vocab_size' + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--input_folder', + type=str, + help='Input DeepSpeed Checkpoint folder') + parser.add_argument( + '--output_folder', + type=str, + help='Output Megatron checkpoint folder') + parser.add_argument( + '--num_extract_workers', + default=4, + type=int, + help='How many parallel processes to extract zero shards') + parser.add_argument( + '--num_merge_workers', + default=2, + type=int, + help='How many parallel processes to merge tp slices ' + '(more memory intensive, use much fewer than --num_extract_workers))') + + args = parser.parse_args() + print(f'args = {args}') + return args + + +def _convert_ds_transformer_state(sd_list): + new_sd = OrderedDict() + for i, sd in enumerate(sd_list): + for key, value in sd.items(): + new_key = f'layers.{i}.{key}' + new_sd[new_key] = value + + return new_sd + + +def _create_megatron_dict(): + language_model_dict = {EMBEDDING_KEY: {}, ENCODER_KEY: {}} + megatron_dict = { + MODEL_KEY: { + LANGUAGE_MODEL_KEY: language_model_dict + }, + CHECKPOINT_VERSION_KEY: CHECKPOINT_VERSION_VALUE + } + return megatron_dict + + +def _save_checkpoint(file_path, chkpt_sd): + ckp_dir, _ = os.path.split(file_path) + os.makedirs(ckp_dir, exist_ok=True) + torch.save(chkpt_sd, file_path) + + +def extract_zero_shards(out_path, ds_checkpoint, indices_3d): + pp_index, tp_index, dp_index = indices_3d + sd = ds_checkpoint.get_zero_checkpoint_state( + pp_index=pp_index, + tp_index=tp_index, + dp_index=dp_index) + + optim_sd = sd["optimizer_state_dict"] + param_slice_mappings = optim_sd["param_slice_mappings"] + + # dict + state_groups = optim_sd["base_optimizer_state"]["state"] + + # list + fp32_groups = optim_sd["single_partition_of_fp32_groups"] + param_groups_cnt = len(state_groups) + + for param_group_id in range(param_groups_cnt): + flat_state = dict( + exp_avg=state_groups[param_group_id]["exp_avg"], + exp_avg_sq=state_groups[param_group_id]["exp_avg_sq"], + fp32=fp32_groups[param_group_id], + ) + + for name, fragment_mapping in param_slice_mappings[param_group_id].items(): + if "tied_modules.embed" in name and pp_index > 0: + # Skip word_embeddings.weight that is replicated in first and last pp stages + # Skip position_embeddings.weight that is only used in first pp stage + continue + + for state_key in flat_state.keys(): + dump_param_fragment(out_path, tp_index, dp_index, state_key, + flat_state[state_key], name, + fragment_mapping.start, + fragment_mapping.numel) + + +def dump_param_fragment(out_path, tp_index, dp_index, state_name, + state_flat_tensor, param_name, offset, numel): + param_base_path = os.path.join(out_path, param_name, str(tp_index)) + os.makedirs(param_base_path, exist_ok=True) + + counter = f"{dp_index:0>2d}" + path = os.path.join(param_base_path, f"{state_name}.{counter}") + + # clone to force tensor storage to ignore views + t = state_flat_tensor.narrow(0, offset, numel).clone() + _save_checkpoint(path, t) + + +def _merge_zero_shards(param_base_path, state, tp_degree, slice_shape): + slices = [] + for tp_index in range(tp_degree): + prefix_path = os.path.join(param_base_path, str(tp_index), f"{state}") + paths = sorted(list(glob.glob(f"{prefix_path}.*"))) + shards = [torch.load(p) for p in paths] + param_slice = torch.cat(shards, dim=0).reshape(slice_shape) + slices.append(param_slice) + + return slices + + +def _strip_vocab_padding(ds_checkpoint, padded_vocab_tensor): + checkpoint_info = ds_checkpoint.get_checkpoint_info() + return padded_vocab_tensor.narrow(0, 0, checkpoint_info[ORIGINAL_VOCAB_SIZE]) + + +WEIGHTS_TO_AVERAGE_PATTERNS = [ + r"tied_modules.embed.word_embeddings.norm.weight", + r"tied_modules.embed.word_embeddings.norm.bias", + r"tied_modules.embed.position_embeddings.weight", + r"\d+.input_layernorm.weight", + r"\d+.input_layernorm.bias", + r"\d+.post_attention_layernorm.weight", + r"\d+.post_attention_layernorm.bias", + r"\d+.self_attention.dense.bias", + r"\d+.attention.dense.bias", + r"\d+.mlp.dense_4h_to_h.bias", + r"\d+.weight", + r"\d+.bias", +] + +WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN = [ + "dense_4h_to_h.weight", + "self_attention.dense.weight", + "attention.dense.weight", +] + + +def _get_vocab_divisibility_padding_tensor(ds_checkpoint, padded_vocab_tensor): + checkpoint_info = ds_checkpoint.get_checkpoint_info() + if checkpoint_info and padded_vocab_tensor.shape[0] > checkpoint_info[ORIGINAL_VOCAB_SIZE]: + return padded_vocab_tensor[-1] + else: + return torch.zeros(padded_vocab_tensor.shape[1]) + + +def _all_same_tensor(arr): + assert len(arr) > 0 + if len(arr) == 1: + return True + res = all([x.eq(arr[0]).all().item() for x in arr[1:]]) + return res + + +def merge_tp_slices(ds_checkpoint, out_path, slice_dir, tp_degree, name_and_shape): + name, shape = name_and_shape + slice_base_path = os.path.join(slice_dir, name) + param_base_path = os.path.join(out_path, name) + + for state in ("fp32", "exp_avg", "exp_avg_sq"): + slices = _merge_zero_shards(slice_base_path, state, tp_degree, shape) + final_path = os.path.join(param_base_path, f"{state}.pt") + + ckpt_dict = {} + if any(re.match(pattern, name) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS): + assert _all_same_tensor(slices), f'Checkpoint misalignment detected for parameter: {name}' + param = slices[0] + else: + cat_dim = 1 if any(text in name for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0 + param = torch.cat(slices, dim=cat_dim) + ckpt_dict['cat_dim'] = cat_dim + + if "word_embeddings.weight" in name: + # strip padding + # param = _strip_vocab_padding(ds_checkpoint, param) + ckpt_dict['vocab_divisibility_padding_tensor'] = \ + _get_vocab_divisibility_padding_tensor(ds_checkpoint, param) + + ckpt_dict['param'] = param + _save_checkpoint(final_path, ckpt_dict) + + +def _get_chunks(l, n): + for i in range(0, len(l), n): + yield l[i:i + n] + + +def _do_parallel_work(do_work, work_chunks, num_workers): + pool = multiprocessing.Pool(num_workers) + for batch in tqdm.tqdm(work_chunks): + pool.map(do_work, batch) + pool.close() + pool.join() + + +def _extract_zero_shard_files(args, ds_checkpoint, temp_dir): + _3d_range_list = list(itertools.product(range(ds_checkpoint.pp_degree), + range(ds_checkpoint.tp_degree), + range(ds_checkpoint.dp_degree))) + work_chunks = list(_get_chunks(_3d_range_list, args.num_extract_workers)) + + do_work = partial(extract_zero_shards, temp_dir, ds_checkpoint) + _do_parallel_work(do_work, work_chunks, args.num_extract_workers) + + +def _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir): + work_chunks = list(_get_chunks(list(slice_shapes.items()), args.num_merge_workers)) + zero_output_folder = os.path.join(args.output_folder, "zero") + do_work = partial(merge_tp_slices, ds_checkpoint, zero_output_folder, temp_dir, ds_checkpoint.tp_degree) + _do_parallel_work(do_work, work_chunks, args.num_merge_workers) + + +def main(): + print(f'Convert DeepSpeed Checkpoint to Universal Checkpoint') + + args = parse_arguments() + print( + f'Converting DeepSpeed checkpoint in {args.input_folder} ' + f'to Universal checkpoint in {args.output_folder}' + ) + + ds_checkpoint = DeepSpeedCheckpoint(args.input_folder) + + slice_shapes = [] + for mp_rank_file in ds_checkpoint.mp_rank_files: + mp_sd = torch.load(mp_rank_file, map_location=torch.device('cpu')) + slice_shapes += mp_sd["param_shapes"] + + # fix back to normal flat dict, merge duplicates for tp>1 + slice_shapes = dict((k, v) for d in slice_shapes for k, v in d.items()) + temp_dir = os.path.join(args.output_folder, 'tmp') + + print('*** 1. Extracting ZeRO fragments') + _extract_zero_shard_files(args, ds_checkpoint, temp_dir) + + print('*** 2. Merging slices') + _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir) + + shutil.rmtree(temp_dir, ignore_errors=True) + + # Copy mp* files into output folder + for f in glob.glob(os.path.join(args.input_folder, 'mp*')): + shutil.copy2(f, args.output_folder) + + # Update latest to output folder + checkpoint_root_folder, step_folder = os.path.split(args.output_folder) + latest_file = os.path.join(checkpoint_root_folder, 'latest_universal') + with open(latest_file, "w") as f: + f.write(step_folder) + + print('*** Done!') + + +if __name__ == "__main__": + main() diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merge.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merge.py new file mode 100644 index 0000000000000000000000000000000000000000..993f53400c727b5191b57c81b0fb0025ee40ce1e --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merge.py @@ -0,0 +1,340 @@ +# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import multiprocessing +from datetime import datetime +from pathlib import Path +import os +import copy +import numpy as np +import torch # pytype: disable=import-error +import pickle + +def save_numpy(optim_state, lyr_name, saved_dir): + for opt_key, opt_val in optim_state["state"].items(): + np.save((saved_dir / F"{lyr_name}.{opt_key}.npy").as_posix(), opt_val.float().cpu().numpy().astype(np.float32)) + np.save((saved_dir / F"{lyr_name}.fp32_from_fp16_params.npy").as_posix(), optim_state["fp32_from_fp16_params"].float().cpu().numpy().astype(np.float32)) + with open((saved_dir / F"{lyr_name}.param.pickle").as_posix(), 'wb') as handle: + pickle.dump(optim_state["param_groups"], handle, protocol=pickle.HIGHEST_PROTOCOL) + + +# This tool is used to support the new megatron model trained by pipeline parallel + tensor parallel +def merge( + key, pp_id, saved_dir, model_args, optim_states, ckpt_ver, is_save_numpy +): + #i, pipeline_para_rank, saved_dir, factor, key, model_args, transformer_model_list, ckpt_ver + saved_dir = Path(saved_dir) + if key.find("layers.") != -1: + # key name: language_model.encoder.layers + layer_index = (int)(key[30 : key.find(".", 30)]) + saved_key = key.replace( + "layers.%d." % layer_index, + "layers.%d." + % (layer_index + pp_id * model_args.num_layers // model_args.pipeline_model_parallel_size), + ) + abs_layer_index = "%d" % (layer_index + pp_id * model_args.num_layers // model_args.pipeline_model_parallel_size) + abs_layer_dir = "layer_" + abs_layer_index + saved_dir = saved_dir / abs_layer_dir + else: + saved_key = key + #major_device = transformer_model_list[0][key].device + #print(saved_key) + optim_state = copy.deepcopy(optim_states[key]) + del optim_state['group_index'] + del optim_state['index_within_group'] + + if ( + key.find("input_layernorm.weight") != -1 + or key.find("input_layernorm.bias") != -1 + or key.find("attention.dense.bias") != -1 + or key.find("post_attention_layernorm.weight") != -1 + or key.find("post_attention_layernorm.bias") != -1 + or key.find("mlp.dense_4h_to_h.bias") != -1 + or key.find("final_layernorm.weight") != -1 + or key.find("final_layernorm.bias") != -1 + ): + # shared weights, only need to convert the weights from single tp instance + for opt_key, opt_val in optim_state["state"].items(): + optim_state['state'][opt_key] = opt_val[0] + #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}") + optim_state["fp32_from_fp16_params"] = optim_state["fp32_from_fp16_params"][0] + #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}") + elif key.find("attention.dense.weight") != -1: + state_key = list(optim_state["state"].keys())[0] + head_num = model_args.num_attention_heads // model_args.tensor_model_parallel_size + hidden_dim = int(optim_state["state"][state_key][0].shape[0]) + dim_per_head = int(optim_state["state"][state_key][0].shape[1] / head_num) + for opt_key, opt_val in optim_state["state"].items(): + vals = [] + for k in range(model_args.tensor_model_parallel_size): + val = opt_val[k] + val = val.reshape(hidden_dim, head_num, dim_per_head) + vals.append(val) + optim_state['state'][opt_key] = torch.cat(vals, dim=1) + #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}") + vals = [] + for k in range(model_args.tensor_model_parallel_size): + val = optim_state["fp32_from_fp16_params"][k] + val = val.reshape(hidden_dim, head_num, dim_per_head) + vals.append(val) + optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=1) + #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}") + elif key.find("mlp.dense_4h_to_h.weight") != -1: + for opt_key, opt_val in optim_state["state"].items(): + vals = [] + for k in range(model_args.tensor_model_parallel_size): + vals.append(opt_val[k]) + optim_state['state'][opt_key] = torch.cat(vals, dim=-1) + #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}") + vals = [] + for k in range(model_args.tensor_model_parallel_size): + vals.append(optim_state["fp32_from_fp16_params"][k]) + optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=-1) + #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}") + elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1: + for opt_key, opt_val in optim_state["state"].items(): + vals = [] + for k in range(model_args.tensor_model_parallel_size): + vals.append(opt_val[k]) + optim_state['state'][opt_key] = torch.cat(vals, dim=0) + #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}") + vals = [] + for k in range(model_args.tensor_model_parallel_size): + vals.append(optim_state["fp32_from_fp16_params"][k]) + optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=0) + #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}") + elif key.find("attention.query_key_value.bias") != -1: + state_key = list(optim_state["state"].keys())[0] + num_splits = 3 + head_num = model_args.num_attention_heads // model_args.tensor_model_parallel_size + size_per_head = int(optim_state["state"][state_key][0].shape[0] / num_splits / head_num) + for opt_key, opt_val in optim_state["state"].items(): + vals = [] + for k in range(model_args.tensor_model_parallel_size): + val = opt_val[k] + val = val.reshape(head_num, num_splits, size_per_head) + vals.append(val) + optim_state['state'][opt_key] = torch.cat(vals, dim=0) + #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}") + vals = [] + for k in range(model_args.tensor_model_parallel_size): + val = optim_state["fp32_from_fp16_params"][k] + val = val.reshape(head_num, num_splits, size_per_head) + vals.append(val) + optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=0) + #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}") + elif key.find("attention.query_key_value.weight") != -1: + state_key = list(optim_state["state"].keys())[0] + num_splits = 3 + hidden_dim = int(optim_state["state"][state_key][0].shape[1]) + head_num = model_args.num_attention_heads // model_args.tensor_model_parallel_size + size_per_head = int(optim_state["state"][state_key][0].shape[0] / num_splits / head_num) + for opt_key, opt_val in optim_state["state"].items(): + vals = [] + for k in range(model_args.tensor_model_parallel_size): + val = opt_val[k] + val = val.reshape(head_num, num_splits, size_per_head, hidden_dim) + vals.append(val) + optim_state['state'][opt_key] = torch.cat(vals, dim=0) + #print(F"lyr_name: {key} key: {opt_key}: {optim_state['state'][opt_key].shape}") + vals = [] + for k in range(model_args.tensor_model_parallel_size): + val = optim_state["fp32_from_fp16_params"][k] + val = val.reshape(head_num, num_splits, size_per_head, hidden_dim) + vals.append(val) + optim_state["fp32_from_fp16_params"] = torch.cat(vals, dim=0) + #print(F"lyr_name: {key} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}") + else: + print(f"[ERROR] cannot find key '{key}'") + exit(1) + + #print(F"{saved_key}: {tmp.shape}") + if is_save_numpy: + save_numpy(optim_state, saved_key, saved_dir) + else: + saved_path = saved_dir / f"{saved_key}.pt" + torch.save(optim_state, saved_path) + +def merge_checkpoint(args): + saved_dir = Path(args.saved_dir) / "gpu" / "optimizer" + saved_dir.mkdir(parents=True, exist_ok=True) + + prefix = Path(args.in_dir) + ckpt_name = "model_optim_rng.pt" + + # load position_embedding from rank 0 + if (prefix / "mp_rank_00").is_dir(): + model_00 = torch.load((prefix / "mp_rank_00" / ckpt_name).as_posix()) + elif (prefix / "mp_rank_00_000").is_dir(): + model_00 = torch.load((prefix / "mp_rank_00_000" / ckpt_name).as_posix()) + else: + print(f"[ERROR] Cannot find checkpoint in {prefix}.") + exit(1) + + model_args = model_00["args"] + with open((saved_dir / "args.txt").as_posix(), "w") as f: + for k, v in vars(model_args).items(): + f.write(f"{k}:{v} \n") + + del model_00 + + tp_size = model_args.tensor_model_parallel_size + + for i in range(model_args.num_layers): + pp_id_dir = (saved_dir / f"layer_{i}").as_posix() + os.makedirs(pp_id_dir, exist_ok=True) + + torch.multiprocessing.set_start_method("spawn") + torch.multiprocessing.set_sharing_strategy("file_system") + pool = multiprocessing.Pool(args.pool) + w_e_list = [] + w_e_h_list = [] + #for pp_id in [2]: + for pp_id in range(model_args.pipeline_model_parallel_size): + if model_args.pipeline_model_parallel_size == 1: + layer_rank_num = "" + else: + layer_rank_num = f"_{pp_id:03d}" + optim_states = {} + for tp_id in range(tp_size): + #if tp_id == 0: + print(F"Loading ckpt file from: mp_rank_{tp_id:02d}{layer_rank_num}") + m = torch.load((prefix / f"mp_rank_{tp_id:02d}{layer_rank_num}" / ckpt_name).as_posix(), map_location="cpu") + #m["model"]["language_model"]["encoder"] = {key: value for key, value in m["model"]["language_model"]["encoder"].items() if ("attention.dense.weight" in key) or ("mlp.dense_4h_to_h.weight" in key)} + #print(m["model"]["language_model"]["encoder"].keys()) + target_optim_map_orig = m['optimizer_model_map'] + target_optim_map = copy.deepcopy(target_optim_map_orig) + substr = "module.module." + for key, value in target_optim_map.items(): + if value.startswith(substr): + target_optim_map[key] = value[len(substr):] + #del target_optim_map_orig + #for key, value in m["optimizer_model_map"].items(): + for key, value in target_optim_map.items(): + if value in optim_states: + for opt_key, opt_val in m["optimizer"]["optimizer"]["state"][key].items(): + optim_states[value]["state"][opt_key].append(opt_val) + group_index = optim_states[value]["group_index"] + index_within_group = optim_states[value]["index_within_group"] + optim_states[value]["fp32_from_fp16_params"].append(m["optimizer"]["fp32_from_fp16_params"][group_index][index_within_group]) + else: + optim_states[value] = {} + optim_states[value]["state"] = {} + for opt_key, opt_val in m["optimizer"]["optimizer"]["state"][key].items(): + optim_states[value]["state"][opt_key] = [] + optim_states[value]["state"][opt_key].append(opt_val) + # Find index param group + group_index = 0 + index_within_group = 0 + for index, group in enumerate(m["optimizer"]["optimizer"]["param_groups"]): + if key in group["params"]: + group_index = index + index_within_group = group["params"].index(key) + optim_states[value]["group_index"] = group_index + optim_states[value]["index_within_group"] = index_within_group + optim_states[value]["param_groups"] = copy.deepcopy(group) + if "params" in optim_states[value]["param_groups"]: + del optim_states[value]["param_groups"]["params"] + break + if "group_index" not in optim_states[value]: + print(F"couldn't find index for layer: {value}") + exit(1) + optim_states[value]["fp32_from_fp16_params"] = [] + optim_states[value]["fp32_from_fp16_params"].append(m["optimizer"]["fp32_from_fp16_params"][group_index][index_within_group]) + + if pp_id == 0: + lyr_name = 'language_model.embedding.word_embeddings.weight' + optim_state = copy.deepcopy(optim_states[lyr_name]) + for opt_key, opt_val in optim_state["state"].items(): + optim_state['state'][opt_key] = torch.cat(opt_val, dim=0) + #print(F"lyr_name: {lyr_name} key: {opt_key}: {optim_state['state'][opt_key].shape}") + optim_state["fp32_from_fp16_params"] = torch.cat(optim_state["fp32_from_fp16_params"], dim=0) + #print(F"lyr_name: {lyr_name} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}") + del optim_state['group_index'] + del optim_state['index_within_group'] + if args.save_numpy: + save_numpy(optim_state, lyr_name, saved_dir) + else: + torch.save(optim_state, (saved_dir / F"{lyr_name}.pt").as_posix()) + del optim_states[lyr_name] + + lyr_name = 'language_model.embedding.position_embeddings.weight' + optim_state = copy.deepcopy(optim_states[lyr_name]) + for opt_key, opt_val in optim_state["state"].items(): + optim_state['state'][opt_key] = opt_val[0] + #print(F"lyr_name: {lyr_name} key: {opt_key}: {optim_state['state'][opt_key].shape}") + optim_state["fp32_from_fp16_params"] = optim_state["fp32_from_fp16_params"][0] + #print(F"lyr_name: {lyr_name} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}") + del optim_state['group_index'] + del optim_state['index_within_group'] + if args.save_numpy: + save_numpy(optim_state, lyr_name, saved_dir) + else: + torch.save(optim_state, (saved_dir / F"{lyr_name}.pt").as_posix()) + del optim_states[lyr_name] + + if pp_id == (model_args.pipeline_model_parallel_size - 1) and model_args.pipeline_model_parallel_size > 1: + lyr_name = 'word_embeddings.weight' + optim_state = copy.deepcopy(optim_states[lyr_name]) + for opt_key, opt_val in optim_state["state"].items(): + optim_state['state'][opt_key] = torch.cat(opt_val, dim=0) + #print(F"lyr_name: {lyr_name} key: {opt_key}: {optim_state['state'][opt_key].shape}") + optim_state["fp32_from_fp16_params"] = torch.cat(optim_state["fp32_from_fp16_params"], dim=0) + #print(F"lyr_name: {lyr_name} key: fp32_from_fp16_params: {optim_state['fp32_from_fp16_params'].shape}") + del optim_state['group_index'] + del optim_state['index_within_group'] + if args.save_numpy: + save_numpy(optim_state, lyr_name, saved_dir) + else: + torch.save(optim_state, (saved_dir / F"{lyr_name}.pt").as_posix()) + del optim_states[lyr_name] + + pool.starmap( + merge, + [ + ( + k, + pp_id, + saved_dir, + model_args, + optim_states, + m["checkpoint_version"], + args.save_numpy + ) + for (k, _) in optim_states.items() + ], + ) + + pool.close() + pool.join() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument("-saved_dir", "-o", type=str, help="output directory for saving converted checkpoints", required=True) + parser.add_argument("-in_dir", "-i", type=str, help="input checkpoint directory path", required=True) + parser.add_argument("-save_numpy", "-npy", action='store_true', help="save output as numpy array", default=False) + parser.add_argument("-pool", "-pl", type=int, help="Process pool", default=4) + args = parser.parse_args() + print("\n=============== Argument ===============") + for key in vars(args): + print(f"{key}: {vars(args)[key]}") + print("========================================") + + start_time = datetime.now() + merge_checkpoint(args) + stop_time = datetime.now() + run_time = stop_time - start_time + print(f"[INFO] Spent {run_time} (h:m:s) to convert the model") \ No newline at end of file diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merged_to_ds_universal_convert.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merged_to_ds_universal_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..17ae760d202ba1b4dd19f2dabde5c4373e198ef6 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merged_to_ds_universal_convert.py @@ -0,0 +1,321 @@ +############################################################################### +# Copyright (c) 2023 Habana Labs Ltd. All rights reserved. +############################################################################### +import argparse +import multiprocessing +from datetime import datetime +from pathlib import Path +import os +import copy +import numpy as np +import torch # pytype: disable=import-error +import pickle +import glob +import re + + +WEIGHTS_TO_AVERAGE_PATTERNS = [ + r"tied_modules.embed.word_embeddings.norm.weight", + r"tied_modules.embed.word_embeddings.norm.bias", + r"tied_modules.embed.position_embeddings.weight", + r"\d+.input_layernorm.weight", + r"\d+.input_layernorm.bias", + r"\d+.post_attention_layernorm.weight", + r"\d+.post_attention_layernorm.bias", + r"\d+.self_attention.dense.bias", + r"\d+.attention.dense.bias", + r"\d+.mlp.dense_4h_to_h.bias", + r"\d+.weight", + r"\d+.bias", +] + +WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN = [ + "dense_4h_to_h.weight", + "self_attention.dense.weight", + "attention.dense.weight", +] +def _get_vocab_divisibility_padding_tensor(padded_vocab_tensor): + return padded_vocab_tensor[-1] + +def _save_checkpoint(file_path, chkpt_sd): + ckp_dir, _ = os.path.split(file_path) + os.makedirs(ckp_dir, exist_ok=True) + torch.save(chkpt_sd, file_path) + +def tensor_convert(tensor_name_mapping, tensor_index): + fp32_ckpt = {} + exp_avg_ckpt = {} + exp_avg_sq_ckpt = {} + + tensor_name = tensor_name_mapping[tensor_index] + megatron_optimizer_states = torch.load(tensor_name[1]) + if 'self_attention.query_key_value' in tensor_name[1]: + dim = megatron_optimizer_states['fp32_from_fp16_params'].size()[len(megatron_optimizer_states['fp32_from_fp16_params'].size())-1] + fp32_ckpt['param'] = megatron_optimizer_states['fp32_from_fp16_params'].view(-1,dim) + exp_avg_ckpt['param'] = megatron_optimizer_states['state']['exp_avg'].view(-1,dim) + exp_avg_sq_ckpt['param'] = megatron_optimizer_states['state']['exp_avg_sq'].view(-1,dim) + + cat_dim = 0 + fp32_ckpt['cat_dim'] = cat_dim + exp_avg_ckpt['cat_dim'] = cat_dim + exp_avg_sq_ckpt['cat_dim'] = cat_dim + else: + fp32_ckpt['param'] = megatron_optimizer_states['fp32_from_fp16_params'] + exp_avg_ckpt['param'] = megatron_optimizer_states['state']['exp_avg'] + exp_avg_sq_ckpt['param'] = megatron_optimizer_states['state']['exp_avg_sq'] + + ds_tensor_name = os.path.split(tensor_name[0])[-1] + if not any(re.match(pattern, ds_tensor_name) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS): + cat_dim = 1 if any(text in ds_tensor_name for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0 + if '.bias' not in ds_tensor_name: + fp32_ckpt['cat_dim'] = cat_dim + exp_avg_ckpt['cat_dim'] = cat_dim + exp_avg_sq_ckpt['cat_dim'] = cat_dim + + if 'word_embeddings.weight' in tensor_name[1]: + fp32_ckpt['vocab_divisibility_padding_tensor'] = \ + _get_vocab_divisibility_padding_tensor(fp32_ckpt['param']) + exp_avg_ckpt['vocab_divisibility_padding_tensor'] = \ + _get_vocab_divisibility_padding_tensor(exp_avg_ckpt['param']) + exp_avg_sq_ckpt['vocab_divisibility_padding_tensor'] = \ + _get_vocab_divisibility_padding_tensor(exp_avg_sq_ckpt['param']) + + + fp32_weight_file_path = os.path.join(tensor_name[0], 'fp32.pt') + _save_checkpoint(fp32_weight_file_path, fp32_ckpt) + + exp_avg_file_path = os.path.join(tensor_name[0], 'exp_avg.pt') + _save_checkpoint(exp_avg_file_path, exp_avg_ckpt) + + exp_avg_sq_file_path = os.path.join(tensor_name[0], 'exp_avg_sq.pt') + _save_checkpoint(exp_avg_sq_file_path, exp_avg_sq_ckpt) + +def mp_rank_files_info_adjustment(file,megatron_state_dict,same_config, ds_universal_checkpoints_path): + ds_state_dict = torch.load(file, map_location=torch.device('cpu')) + ds_state_dict['lr_scheduler']['num_steps'] = megatron_state_dict['opt_param_scheduler']['num_steps'] + ds_state_dict['lr_scheduler']['warmup_steps'] = megatron_state_dict['opt_param_scheduler']['warmup_steps'] + ds_state_dict['lr_scheduler']['decay_steps'] = megatron_state_dict['opt_param_scheduler']['decay_steps'] + ds_state_dict['iteration'] = megatron_state_dict['iteration'] + ds_state_dict['global_steps'] = megatron_state_dict['iteration'] + ds_state_dict['global_samples'] = megatron_state_dict['args'].consumed_train_samples + ds_state_dict['tokens'] = megatron_state_dict['args'].consumed_train_samples* megatron_state_dict['args'].seq_length + ds_state_dict['args'].consumed_train_samples = megatron_state_dict['args'].consumed_train_samples + ds_state_dict['args'].consumed_valid_samples = megatron_state_dict['args'].consumed_valid_samples + ds_state_dict['args'].consumed_train_tokens = ds_state_dict['tokens'] + + # if both megatron-lm and megatron-deepspeed have the same TP, PP configuration, we copy the rng states from megatron-lm to megatron-deepspeed + if same_config == 'True': + ds_state_dict['random_rng_state'] = megatron_state_dict['rng_state'][0]['random_rng_state'] + ds_state_dict['np_rng_state'] = megatron_state_dict['rng_state'][0]['np_rng_state'] + ds_state_dict['torch_rng_state'] = megatron_state_dict['rng_state'][0]['torch_rng_state'] + ds_state_dict['cuda_rng_state'] = megatron_state_dict['rng_state'][0]['cuda_rng_state'] + ds_state_dict['rng_tracker_states'] = megatron_state_dict['rng_state'][0]['rng_tracker_states'] + + file = os.path.join(ds_universal_checkpoints_path,os.path.split(file)[1]) + torch.save(ds_state_dict,file) + + +def mp_rank_files_info_adjustment_parallel_processing(ds_mp_rank_files_dir,ds_universal_checkpoints_path,megatron_lm_non_merged_input_dir, \ + model_parallel_same_config,pp_index,tp_index,tp_rank): + + state_dict = torch.load(os.path.join(megatron_lm_non_merged_input_dir, + 'mp_rank_{:02d}_{:03d}'.format( + tp_index, + pp_index), + 'model_optim_rng.pt'), map_location=torch.device('cpu')) + + # Need to update according to how the mapping is done when tp_rank * pp_rank > 9 + mp_rank_file_index = '0' + str(pp_index * tp_rank + tp_index) + mp_rank_file = os.path.join(ds_mp_rank_files_dir, 'mp_rank_' + mp_rank_file_index + '_model_states.pt') + mp_rank_files_info_adjustment(mp_rank_file, state_dict, model_parallel_same_config, ds_universal_checkpoints_path) + + + +def ds_universal_convert(args): + + torch.multiprocessing.set_start_method("spawn") + torch.multiprocessing.set_sharing_strategy("file_system") + pool = multiprocessing.Pool(args.pool) + + ds_universal_checkpoints_path = args.ds_universal_dir + latest_file = os.path.join(ds_universal_checkpoints_path, 'latest_universal') + os.makedirs(ds_universal_checkpoints_path, exist_ok=True) + with open(latest_file, "w") as f: + f.write(str(args.iteration)) + + ds_universal_checkpoints_path = os.path.join(ds_universal_checkpoints_path, str(args.iteration)) + os.makedirs(ds_universal_checkpoints_path, exist_ok=True) + + if (args.update_only_mp_rank_files == False): + layers_per_model_pipeline_slice = args.num_layers // args.pp_rank + # tensor_name_mapping maps the ds tensor directory name to the megatron-lm merged optimizer tensor path + if args.pp_rank == 1: + tensor_name_mapping = [ + [os.path.join(ds_universal_checkpoints_path, 'zero', 'tied_modules.embed.position_embeddings.weight'),os.path.join(args.megatron_lm_merged_input_dir, 'language_model.embedding.position_embeddings.weight.pt')], \ + [os.path.join(ds_universal_checkpoints_path, 'zero', 'tied_modules.embed.word_embeddings.weight'), os.path.join(args.megatron_lm_merged_input_dir, 'language_model.embedding.word_embeddings.weight.pt')], + [os.path.join(ds_universal_checkpoints_path, 'zero', str(4 + args.num_layers) + '.bias'), os.path.join(args.megatron_lm_merged_input_dir, 'language_model.encoder.final_layernorm.bias.pt')], + [os.path.join(ds_universal_checkpoints_path, 'zero', str(4 + args.num_layers) + '.weight'), os.path.join(args.megatron_lm_merged_input_dir, 'language_model.encoder.final_layernorm.weight.pt')] + ] + else: + tensor_name_mapping = [ + [os.path.join(ds_universal_checkpoints_path, 'zero','tied_modules.embed.position_embeddings.weight'), os.path.join(args.megatron_lm_merged_input_dir,'language_model.embedding.position_embeddings.weight.pt')], \ + [os.path.join(ds_universal_checkpoints_path, 'zero','tied_modules.embed.word_embeddings.weight'), os.path.join(args.megatron_lm_merged_input_dir,'language_model.embedding.word_embeddings.weight.pt')], + [os.path.join(ds_universal_checkpoints_path, 'zero','word_embeddings.weight'),os.path.join(args.megatron_lm_merged_input_dir,'word_embeddings.weight.pt')], \ + [os.path.join(ds_universal_checkpoints_path, 'zero',str(4+args.num_layers)+'.bias'), os.path.join(args.megatron_lm_merged_input_dir,'language_model.encoder.final_layernorm.bias.pt')], + [os.path.join(ds_universal_checkpoints_path, 'zero',str(4+args.num_layers)+'.weight'),os.path.join(args.megatron_lm_merged_input_dir,'language_model.encoder.final_layernorm.weight.pt')] + ] + + layer_name_mapping = [ + ['.attention.dense.bias', 'language_model.encoder.layers.LAYER_INDEX.self_attention.dense.bias'], \ + ['.attention.dense.weight','language_model.encoder.layers.LAYER_INDEX.self_attention.dense.weight'], \ + ['.attention.query_key_value.bias', 'language_model.encoder.layers.LAYER_INDEX.self_attention.query_key_value.bias'], \ + ['.attention.query_key_value.weight', 'language_model.encoder.layers.LAYER_INDEX.self_attention.query_key_value.weight'], \ + ['.input_layernorm.bias', 'language_model.encoder.layers.LAYER_INDEX.input_layernorm.bias'], \ + ['.input_layernorm.weight', 'language_model.encoder.layers.LAYER_INDEX.input_layernorm.weight'], \ + ['.mlp.dense_4h_to_h.bias', 'language_model.encoder.layers.LAYER_INDEX.mlp.dense_4h_to_h.bias'], \ + ['.mlp.dense_4h_to_h.weight', 'language_model.encoder.layers.LAYER_INDEX.mlp.dense_4h_to_h.weight'], \ + ['.mlp.dense_h_to_4h.bias', 'language_model.encoder.layers.LAYER_INDEX.mlp.dense_h_to_4h.bias'], \ + ['.mlp.dense_h_to_4h.weight', 'language_model.encoder.layers.LAYER_INDEX.mlp.dense_h_to_4h.weight'], \ + ['.post_attention_layernorm.bias', 'language_model.encoder.layers.LAYER_INDEX.post_attention_layernorm.bias'], \ + ['.post_attention_layernorm.weight', 'language_model.encoder.layers.LAYER_INDEX.post_attention_layernorm.weight'] + ] + + for layer_index in np.arange(args.num_layers): + for layer_tensor_index in np.arange(len(layer_name_mapping)): + + ds_tensor_name_map = os.path.join(ds_universal_checkpoints_path,'zero',str(3+layer_index)+layer_name_mapping[layer_tensor_index][0]) + megatron_tensor_name_map = os.path.join(args.megatron_lm_merged_input_dir,'layer_'+str(layer_index),layer_name_mapping[layer_tensor_index][1].replace('LAYER_INDEX',str(layer_index))+'.pt') + tensor_name_map = [ds_tensor_name_map, megatron_tensor_name_map] + tensor_name_mapping.append(tensor_name_map) + + + # go over all the tensors in tensor_name_mapping and convert them from megatron optimizer format to ds_universal + + #for tensors_index in np.arange(len(tensor_name_mapping)): + # tensor_convert(tensor_name_mapping,tensors_index) + # print('finished converting tensor {}'.format(tensors_index)) + + # multiprocessing of the tensors in tensor_name_mapping and converting them from megatron optimizer format to ds_universal + + pool.starmap( + tensor_convert, + [ + ( + tensor_name_mapping, + k + ) + for k in np.arange(len(tensor_name_mapping)) + ], + ) + + pool.close() + pool.join() + + + # updating the deepspeed ds_mp_rank files according to megatron non merged ( original megatron checkpoint structure files) + + if args.model_parallel_same_config == 'True': + for pp_index in np.arange(args.pp_rank): + for tp_index in np.arange(args.tp_rank): + if args.pp_rank > 1: + file_name = os.path.join(args.megatron_lm_non_merged_input_dir,'mp_rank_{:02d}_{:03d}'.format(tp_index,pp_index),'model_optim_rng.pt') + else: + file_name = os.path.join(args.megatron_lm_non_merged_input_dir,'mp_rank_{:02d}'.format(tp_index),'model_optim_rng.pt') + + state_dict = torch.load(file_name, map_location=torch.device('cpu')) + + # Need to update according to how the mapping is done when tp_rank * pp_rank > 9 + mp_rank_file_index = '0'+str(pp_index*args.tp_rank+tp_index) + mp_rank_file = os.path.join(args.ds_mp_rank_files_dir,'mp_rank_'+mp_rank_file_index+'_model_states.pt') + mp_rank_files_info_adjustment(mp_rank_file, state_dict, args.model_parallel_same_config, + ds_universal_checkpoints_path) + + + + model_parallel_matrix_index = [] + for pp_index in np.arange(args.pp_rank): + for tp_index in np.arange(args.tp_rank): + model_parallel_matrix_index.append([pp_index, tp_index]) + + + pool = multiprocessing.Pool(args.pool) + + pool.starmap( + mp_rank_files_info_adjustment_parallel_processing, + [ + ( + args.ds_mp_rank_files_dir, + ds_universal_checkpoints_path, + args.megatron_lm_non_merged_input_dir, + args.model_parallel_same_config, + pp_index, + tp_index, + args.tp_rank + ) + for (pp_index, tp_index) in model_parallel_matrix_index + ], + ) + + pool.close() + pool.join() + + else: + mp_rank_files = glob.glob(os.path.join(args.ds_mp_rank_files_dir, 'mp_rank_*.pt')) + if args.megatron_lm_non_merged_input_dir is not None: + file_name = glob.glob(os.path.join(args.megatron_lm_non_merged_input_dir,'*'))[0]+'/model_optim_rng.pt' + megatron_state_dict = torch.load(file_name, map_location=torch.device('cpu')) + + else: + class My_args: + def __init__(self, consumed_train_samples=args.iteration * args.global_batch_size, seq_length=args.seq_length, consumed_valid_samples=0): + self.consumed_train_samples = consumed_train_samples + self.seq_length = seq_length + self.consumed_valid_samples = consumed_valid_samples + + megatron_state_dict = { 'opt_param_scheduler': args.iteration, 'iteration': args.iteration, 'args' : None } + megatron_state_dict['opt_param_scheduler'] = {'num_steps': args.iteration*args.global_batch_size, 'warmup_steps': args.lr_warmup_samples , 'decay_steps': args.lr_decay_samples} + megatron_state_dict['args']= My_args(consumed_train_samples=args.iteration * args.global_batch_size, + seq_length=args.seq_length) + + for mp_rank_file in mp_rank_files: + print(f"Adjusting {mp_rank_file=}", flush=True) + mp_rank_files_info_adjustment(mp_rank_file, megatron_state_dict, args.model_parallel_same_config, ds_universal_checkpoints_path) + # Deleting redundant mp_rank files, in case number of devices was decreased + universal_mp_rank_files = glob.glob(os.path.join(ds_universal_checkpoints_path, 'mp_rank_*.pt')) + for universal_mp_rank_file in universal_mp_rank_files: + if os.path.basename(universal_mp_rank_file) not in [os.path.basename(file_elem) for file_elem in mp_rank_files]: + print(f"Deleting old redundant mp_rank file {universal_mp_rank_file=}", flush=True) + os.remove(universal_mp_rank_file) + + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument("--ds-universal-dir", "--o", type=str, help="output directory for saving the converted ds_universal checkpoints", required=True) + parser.add_argument("--megatron-lm-merged-input-dir", "--merged-input", type=str, help="megatron-lm merged optimizer input checkpoint directory path", required=False) + parser.add_argument("--megatron-lm-non-merged-input-dir", "--non-merged-input", type=str, help="megatron-lm non merged checkpoint directory path", default = None) + parser.add_argument("--ds-mp-rank-files-dir", "--ds", type=str, help="deepspeed mp_rank_files directory path", required=True) + parser.add_argument("--tp-rank", "--tp",type=int, help="deepseed tp_rank configuration", default=8,required=True) + parser.add_argument("--pp-rank", "--pp",type=int, help="deepseed tp_rank configuration", default=8,required=True) + parser.add_argument("--num-layers", "--nl", type=int, help="GPT-3 number of layers", default=96) + parser.add_argument("--iteration", "--iter", type=int, help="#iteration ", default=None, required=True) + parser.add_argument("--global-batch-size", "--gbs", type=int, help="load ckpt global batch size", default=1536) + parser.add_argument("--seq_length", "--sl", type=int, help="Sequence length", default=2048) + parser.add_argument("--lr-warmup-samples", "--lws", type=int, help="lr warmup samples", default=407040) + parser.add_argument("--lr-decay-samples", "--lds", type=int, help="lr decay samples", default=166809600) + parser.add_argument("--model-parallel-same-config", "--same_config", help="if megatron-lm and megatron deepspeed tp, pp configuration is the same", default=True) + parser.add_argument("--pool", "-pl", type=int, help="Process pool", default=4) + parser.add_argument("--update-only-mp-rank-files", "--update", type=bool, help="if set will update only the mp_rank files w/o converting the nvidia-merged format to ds universal ", default=False, required=False) + + args = parser.parse_args() + print("\n=============== Argument ===============") + for key in vars(args): + print(f"{key}: {vars(args)[key]}") + print("========================================") + + print("Converting megatron merged optimizer checkpoint to deepspeed universal format checkpoint") + start_time = datetime.now() + ds_universal_convert(args) + stop_time = datetime.now() + run_time = stop_time - start_time + print(f"[INFO] Spent {run_time} (h:m:s) to convert the merged optimizer to deepspeed universal format") \ No newline at end of file diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/verify_checkpoint_non_tp_consistency.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/verify_checkpoint_non_tp_consistency.py new file mode 100644 index 0000000000000000000000000000000000000000..282a153c25cb98025f4411f36efde76a33734211 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/convert_checkpoint/verify_checkpoint_non_tp_consistency.py @@ -0,0 +1,290 @@ +import re +import tqdm +import argparse +from dataclasses import dataclass +import torch +from deepspeed.checkpoint import DeepSpeedCheckpoint + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument('--folder', default=None, type=str, help='DeepSpeed Checkpoint folder') + parser.add_argument('--model_type', default='GPT', type=str, help='Type of the model', + choices=['GPT', 'BLOOM', 'LLAMA']) + parser.add_argument('--sequence-parallel', action='store_true', help='Is sequence parallel enabled') + args = parser.parse_args() + print(f'args = {args}') + return args + + +def show_3d(ds_checkpoint): + src_3d = ds_checkpoint.zero_checkpoint.src_3d + dp, tp, pp = src_3d.dp_degree, src_3d.tp_degree, src_3d.pp_degree + print(f'3D configuration: DP={dp} TP={tp} PP={pp}') + + +def get_layer_patterns_for_non_sharded(model_type): + if model_type == 'GPT': + return [ + 'position_embeddings.weight', + 'input_layernorm.weight', + 'input_layernorm.bias', + 'self_attention.dense.bias', + "attention.dense.bias", + 'post_attention_layernorm.weight', + 'post_attention_layernorm.bias', + 'mlp.dense_4h_to_h.bias', + 'weight', + 'bias' + ] + + if model_type == 'BLOOM': + return [ + 'input_layernorm.weight', + 'input_layernorm.bias', + 'self_attention.dense.bias', + "attention.dense.bias", + 'post_attention_layernorm.weight', + 'post_attention_layernorm.bias', + 'mlp.dense_4h_to_h.bias', + 'weight', + 'bias' + ] + if model_type == 'LLAMA': + return [ + 'input_layernorm.weight', + 'input_layernorm.bias', + 'self_attention.dense.bias', + "attention.dense.bias", + 'post_attention_layernorm.weight', + 'post_attention_layernorm.bias', + 'mlp.dense_4h_to_h.bias', + 'final_rmsnorm.weight', + ] + + +def get_zero_patterns_for_non_sharded(model_type, sequence_parallel): + if model_type == 'GPT': + patterns = [ + r"tied_modules.embed.word_embeddings.norm.weight", + r"tied_modules.embed.word_embeddings.norm.bias", + r"tied_modules.embed.position_embeddings.weight", + r"\d+.self_attention.dense.bias", + r"\d+.attention.dense.bias", + r"\d+.mlp.dense_4h_to_h.bias", + ] + if not sequence_parallel: + patterns = patterns + [ + r"\d+.input_layernorm.weight", + r"\d+.input_layernorm.bias", + r"\d+.post_attention_layernorm.weight", + r"\d+.post_attention_layernorm.bias", + r"\d+.weight", + r"\d+.bias", + ] + return patterns + if model_type == 'BLOOM': + patterns = [ + r"tied_modules.embed.word_embeddings.norm.weight", + r"tied_modules.embed.word_embeddings.norm.bias", + r"\d+.self_attention.dense.bias", + r"\d+.attention.dense.bias", + r"\d+.mlp.dense_4h_to_h.bias", + ] + if not sequence_parallel: + patterns = patterns + [ + r"\d+.input_layernorm.weight", + r"\d+.input_layernorm.bias", + r"\d+.post_attention_layernorm.weight", + r"\d+.post_attention_layernorm.bias", + r"\d+.weight", + r"\d+.bias", + ] + return patterns + if model_type == 'LLAMA': + patterns = [ + r"tied_modules.embed.word_embeddings.norm.weight", + r"tied_modules.embed.word_embeddings.norm.bias", + r"\d+.self_attention.dense.bias", + r"\d+.attention.dense.bias", + r"\d+.mlp.dense_4h_to_h.bias", + ] + if not sequence_parallel: + patterns = patterns + [ + r"\d+.input_layernorm.weight", + r"\d+.input_layernorm.bias", + r"\d+.post_attention_layernorm.weight", + r"\d+.post_attention_layernorm.bias", + r"\d+.final_rmsnorm.weight", + ] + return patterns + + + +@dataclass +class ParamInfo: + pp: int + tp: int + dp: int + data: torch.Tensor + numel: int + + +def get_zero_pp_stage_non_sharded_params(ds_checkpoint, model_type, sequence_parallel, pp_stage, dp_stage): + patterns = get_zero_patterns_for_non_sharded(model_type, sequence_parallel) + params = {} + for tp_stage in tqdm.tqdm(range(ds_checkpoint.tp_degree), desc='bf16 zero files'): + sd = ds_checkpoint.get_zero_checkpoint_state( + pp_index=pp_stage, + tp_index=tp_stage, + dp_index=dp_stage) + + optim_sd = sd["optimizer_state_dict"] + param_slice_mappings = optim_sd["param_slice_mappings"] + state_groups = optim_sd["base_optimizer_state"]["state"] + fp32_groups = optim_sd["single_partition_of_fp32_groups"] + + for param_group_id in range(len(state_groups)): + flat_state = dict( + exp_avg=state_groups[param_group_id]["exp_avg"], + exp_avg_sq=state_groups[param_group_id]["exp_avg_sq"], + fp32=fp32_groups[param_group_id], + ) + + for name, fragment_mapping in param_slice_mappings[param_group_id].items(): + if not any(re.match(pattern, name) for pattern in patterns): + continue + + for state_key in flat_state.keys(): + tensor = flat_state[state_key].narrow( + dim=0, + start=fragment_mapping.start, + length=fragment_mapping.numel).clone() + info = ParamInfo(pp=pp_stage, tp=tp_stage, dp=dp_stage, + data=tensor, numel=fragment_mapping.numel) + full_name = name + '.__' + state_key + if full_name not in params: + params[full_name] = [] + params[full_name].append(info) + return params + + +def verify_equal_params(params, tp): + failed = 0 + report = {} + for name, info in params.items(): + n = len(info) + if n != tp: + ok = False + print(f'{name}: FAILED expected n={n} == tp={tp}') + elif n == 1: + ok = True + else: + ok = all([(x.numel == info[0].numel) for x in info[1:]]) + if not ok: + print(f'{name}: FAILED numel comparison [n={n}]') + else: + ok = all([x.data.eq(info[0].data).all().item() for x in info[1:]]) + if not ok: + print(f'{name}: FAILED data comparison [n={n}]') + failed += (ok == False) + report[name] = (ok, n) + if ok: + print(f'{name}: OK [n={n}]') + return failed, report + + +def update_layer_non_sharded_params(params, model_type, filename, pp_index, tp_index): + layer_id, file_tp_index = re.search('layer_(\d+)-model_(\d+)', filename).groups() + layer_id = int(layer_id) + file_tp_index = int(file_tp_index) + #assert tp_index == file_tp_index, f'Inconsistent tp index tp_index={tp_index} file_tp_index={file_tp_index}' + if tp_index != file_tp_index: + print('bad') + + sd = torch.load(filename, map_location=torch.device('cpu')) + sequential_layers = get_layer_patterns_for_non_sharded(model_type) + for key in sd.keys(): + if key in sequential_layers: + param_key = str(layer_id) + '.' + key + if param_key not in params: + params[param_key] = [] + info = ParamInfo(pp=pp_index, tp=tp_index, dp=-1, + data=sd[key], numel=sd[key].numel()) + params[param_key].append(info) + return params + + +def verify_layer_files(ds_checkpoint, model_type): + src_3d = ds_checkpoint.zero_checkpoint.src_3d + dp, tp, pp = src_3d.dp_degree, src_3d.tp_degree, src_3d.pp_degree + + total_failed = 0 + for pp_index in range(pp): + print(f'\nChecking pp_stage={pp_index}') + params = {} + if pp_index == 0: + for tp_index in range(tp): + for filename in ds_checkpoint.tp_to_embedding_map[tp_index]: + update_layer_non_sharded_params(params, model_type, + filename, pp_index, tp_index) + for tp_index in range(tp): + for filename_list in ds_checkpoint.transformer_file_map[(tp_index, pp_index)]: + for filename in filename_list: + update_layer_non_sharded_params(params, model_type, + filename, pp_index, tp_index) + if pp_index == (pp-1): + for tp_index in range(tp): + for filename in ds_checkpoint.tp_to_final_norm_map[tp_index]: + update_layer_non_sharded_params(params, model_type, + filename, pp_index, tp_index) + failed, report = verify_equal_params(params, tp) + total_failed += failed + return total_failed + + +def verify_zero_files(ds_checkpoint, model_type,sequence_parallel): + src_3d = ds_checkpoint.zero_checkpoint.src_3d + dp, tp, pp = src_3d.dp_degree, src_3d.tp_degree, src_3d.pp_degree + + total_failed = 0 + for i in range(pp): + for j in range(dp): + print(f'\nChecking pp_stage={i} dp_stage={j}') + params = get_zero_pp_stage_non_sharded_params(ds_checkpoint, model_type, sequence_parallel, + pp_stage=i, dp_stage=j) + failed, report = verify_equal_params(params, tp) + total_failed += failed + return total_failed + +def verify_checkpoint(folder,model_type,sequence_parallel=False): + final_layer_norm_idx = -2 if model_type == 'LLAMA' else -1 + ds_checkpoint = DeepSpeedCheckpoint(folder,final_layer_norm_idx=final_layer_norm_idx) + ds_checkpoint.validate_files() + show_3d(ds_checkpoint) + + print('\nVerify ** layer_ ** files') + total_failed_layer = verify_layer_files(ds_checkpoint, model_type) + if total_failed_layer == 0: + print('\nCheckpoint layer files OK') + else: + print(f"\nCheckpoint layer files BAD with total_failed={total_failed_layer}") + + print('\nVerify ** bf16_zero_ ** files') + total_failed_zero = verify_zero_files(ds_checkpoint, model_type,sequence_parallel) + if total_failed_zero == 0: + print('\nCheckpoint zero files OK') + else: + print(f"\nCheckpoint zero files BAD with total_failed={total_failed_zero}") + + return (total_failed_layer + total_failed_zero) == 0 + + +def main(): + print(f'Verify DeepSpeed Checkpoint consistency for non-TP-sharded parameters') + args = parse_arguments() + print(args) + assert verify_checkpoint(args.folder, args.model_type, args.sequence_parallel) is True, "Checkpoint verification failed" + +if __name__ == "__main__": + main() diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/create_doc_index.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/create_doc_index.py new file mode 100644 index 0000000000000000000000000000000000000000..4448d0e29080df5fc7370d8b83238d712d881e18 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/create_doc_index.py @@ -0,0 +1,33 @@ +import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) + +from megatron import print_rank_0 +from megatron.indexer import IndexBuilder +from megatron.initialize import initialize_megatron + + +def main(): + """Create a BlockData data structure by running an IndexBuilder over an ICT Dataset + - Include all args needed for initial model specification + + Other key args: + --block-data-path: path to write to + --ict-load or --realm-load: path to checkpoint with which to embed + --data-path and --titles-data-path: paths for dataset + --indexer-log-interval: reporting interval + --indexer-batch-size: size specific for indexer jobs + + Check README.md for example script + """ + + initialize_megatron(extra_args_provider=None, + args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'}) + index_builder = IndexBuilder() + index_builder.build_and_save_index() + print_rank_0("Build and save indices: done!") + +if __name__ == "__main__": + main() + diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/generate_samples_gpt.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/generate_samples_gpt.py new file mode 100644 index 0000000000000000000000000000000000000000..01019fe2f68f634f1e6e07ec227f1b94846fa5e3 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/generate_samples_gpt.py @@ -0,0 +1,173 @@ +# coding=utf-8 +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company. +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Sample Generate GPT""" + +import deepspeed + +import os +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) + +from megatron import get_args +from megatron import print_rank_0 +from megatron import get_tokenizer +from megatron import mpu +from megatron.checkpointing import load_checkpoint +from megatron.initialize import initialize_megatron +from megatron.model import GPTModel +from megatron.training import get_model +from megatron.text_generation_utils import generate_and_write_samples_unconditional +from megatron.text_generation_utils import generate_samples_input_from_file +from megatron.text_generation_utils import generate_samples_interactive +import deepspeed +import torch + +def model_provider(pre_process=True, post_process=True): + """Build the model.""" + + print_rank_0('building GPT model ...') + model = GPTModel(num_tokentypes=0, parallel_output=False, + pre_process=pre_process, post_process=post_process, + return_moe_loss=False) # we need to set "return_moe_loss" for the inference_mode + return model + + +def add_text_generate_args(parser): + """Text generation arguments.""" + group = parser.add_argument_group(title='text generation') + + group.add_argument("--temperature", type=float, default=1.0, + help='Sampling temperature.') + group.add_argument("--greedy", action='store_true', default=False, + help='Use greedy sampling.') + group.add_argument("--top_p", type=float, default=0.0, + help='Top p sampling.') + group.add_argument("--top_k", type=int, default=0, + help='Top k sampling.') + group.add_argument("--out-seq-length", type=int, default=1024, + help='Size of the output generated text.') + group.add_argument("--sample-input-file", type=str, default=None, + help='Get input from file instead of interactive mode, ' + 'each line is an input.') + group.add_argument("--sample-output-file", type=str, default=None, + help='Output file got from --sample-input-file') + group.add_argument("--num-samples", type=int, default=0, + help='Number of samples to generate unconditionally, ' + 'defaults to 0 and interactive conditional sampling') + group.add_argument("--genfile", type=str, + help='Output file when generating unconditionally') + group.add_argument("--recompute", action='store_true', + help='During generation recompute all attention ' + 'instead of using previously computed keys/values.') + + return parser + +def print_latency(latency_set, title=""): + # 10 warmup queries + latency_set = latency_set[10:] + count = len(latency_set) + if count > 0: + latency_set.sort() + n50 = (count - 1) * 0.5 + 1 + n90 = (count - 1) * 0.9 + 1 + n95 = (count - 1) * 0.95 + 1 + n99 = (count - 1) * 0.99 + 1 + n999 = (count - 1) * 0.999 + 1 + + avg = sum(latency_set) / count + p50 = latency_set[int(n50) - 1] + p90 = latency_set[int(n90) - 1] + p95 = latency_set[int(n95) - 1] + p99 = latency_set[int(n99) - 1] + p999 = latency_set[int(n999) - 1] + + print("====== latency stats {0} ======", title) + print("\tAvg Latency: {0:8.2f} ms".format(avg * 1000)) + print("\tP50 Latency: {0:8.2f} ms".format(p50 * 1000)) + print("\tP90 Latency: {0:8.2f} ms".format(p90 * 1000)) + print("\tP95 Latency: {0:8.2f} ms".format(p95 * 1000)) + print("\tP99 Latency: {0:8.2f} ms".format(p99 * 1000)) + print("\t999 Latency: {0:8.2f} ms".format(p999 * 1000)) + +def main(): + """Main program.""" + latencies = [] + model_latencies = [] + single_token_latency = [] + + initialize_megatron(extra_args_provider=add_text_generate_args, + args_defaults={'tokenizer_type': 'GPT2BPETokenizer', + 'no_load_rng': True, + 'no_load_optim': True}) + + args = get_args() + + if args.num_layers_per_virtual_pipeline_stage is not None: + print("Interleaved pipeline schedule is not yet supported for text generation.") + exit() + + # Set up model and load checkpoint. + model = get_model(model_provider) + + if args.load is not None: + _ = load_checkpoint(model, None, None) + + assert len(model) == 1, "Above condition should have caught this" + model = model[0] + + if args.ds_inference: + model = ds_inference(model, args) + print('> DeepSpeed Inference engine initialized') + + # Generate samples. + if args.num_samples == 0: + assert args.micro_batch_size == args.eval_micro_batch_size, \ + "main (generate_samples_gpt) - Unsupported for split micro batch size" + args.micro_batch_size = 1 + # Next line should be considered once eval_micro_batch_size is supported here + args.eval_micro_batch_size = args.micro_batch_size + if args.sample_input_file != None: + generate_samples_input_from_file(model) + else: + generate_samples_interactive(model) + else: + generate_and_write_samples_unconditional(model, latencies, single_token_latency, model_latencies) + + + #if torch.cuda.current_device() == 0: + if torch.distributed.get_rank() == 0: + print_latency(latencies) + print_latency(model_latencies, "model_latencies") + print_latency(single_token_latency, "single_token_latency") + + +def ds_inference(model, args): + import megatron.model as mm + engine = deepspeed.init_inference(model=model, + mp_size=args.tensor_model_parallel_size, + mpu=mpu, + dtype=torch.half, + replace_with_kernel_inject=True, + moe_experts=args.num_experts, + moe_type=args.mlp_type) + + return engine.module + +if __name__ == "__main__": + + main() diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/merge_mp_partitions.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/merge_mp_partitions.py new file mode 100644 index 0000000000000000000000000000000000000000..4dc2d99f86d412e255c18f20fe288695c56e46f8 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/merge_mp_partitions.py @@ -0,0 +1,352 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Merge model parallel partitions.""" + +import os +import re +import sys +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir))) + +import torch + +from megatron import mpu +from megatron.checkpointing import load_checkpoint, save_checkpoint +from megatron.checkpointing import ensure_directory_exists +from megatron.checkpointing import get_checkpoint_name +from megatron.checkpointing import get_checkpoint_version +from megatron.checkpointing import get_checkpoint_tracker_filename +from megatron.global_vars import set_global_variables, get_args +from megatron.global_vars import rebuild_tokenizer + + +def split_into_partitions(tensor, num_partitions, partition_dim, stride): + + per_partition_size = mpu.utils.divide(tensor.size(partition_dim), + num_partitions) + per_partition_per_stride_size = mpu.utils.divide(per_partition_size, stride) + + partitions_list = torch.split(tensor, + per_partition_per_stride_size, + dim=partition_dim) + + partitions = [] + for i in range(num_partitions): + partition = torch.cat(partitions_list[i::num_partitions], + dim=partition_dim) + partitions.append(partition) + + return partitions + + +def merge_partitions(merged, partitions, partition_dim, stride): + + # Number and size of each partition. + num_partitions = len(partitions) + per_partition_size = None + for partition in partitions: + if per_partition_size is None: + per_partition_size = partition.size(partition_dim) + else: + assert per_partition_size == partition.size(partition_dim) + + def concat_partitions(partitions_): + with torch.no_grad(): + if (per_partition_size * num_partitions) == merged.size( + partition_dim): + torch.cat(partitions_, dim=partition_dim, out=merged) + else: + print(' ***WARNING*** sizes do not match. Will cut ' + 'the merged partitions by {} along dimension {} ' + 'to reduce the size from {} to {} ...'.format( + (per_partition_size * num_partitions) - \ + merged.size(partition_dim), partition_dim, + per_partition_size * num_partitions, + merged.size(partition_dim))) + merged_ = torch.cat(partitions_, dim=partition_dim) + merged_split = torch.split(merged_, merged.size(partition_dim), + dim=partition_dim) + merged_ = merged_split[0] + assert merged_.size(partition_dim) == merged.size(partition_dim) + merged.data.copy_(merged_.data) + + # If stride is 1, then do simple concatination. + if stride == 1: + concat_partitions(partitions) + return + + # For none unity strides, first split based on stride and then group. + per_partition_per_stride_size = mpu.utils.divide(per_partition_size, stride) + # Chunk and build a list. + chunks = None + for i, partition in enumerate(partitions): + chunk = torch.split(partition, + per_partition_per_stride_size, + dim=partition_dim) + + if chunks is None: + chunks = [0]*(num_partitions*len(chunk)) + chunks[i::num_partitions] = chunk + + # Concatinate. + concat_partitions(chunks) + + return + + +def get_model(model_type): + + if model_type == 'BERT': + from pretrain_bert import model_provider + elif model_type == 'GPT': + from pretrain_gpt import model_provider + elif model_type == 'RACE': + from tasks.race.finetune import model_provider + elif model_type == ['MNLI', 'QQP']: + num_classes = 2 + if model_type == 'MNLI': + num_classes = 3 + from megatron.model.classification import Classification + def model_provider(): + return Classification(num_classes=num_classes, num_tokentypes=2) + else: + raise Exception('unrecognized model type: {}'.format(model_type)) + + model = model_provider() + model = model.half() + + return model + + +def get_parallel_checkpoint_name(path): + + tracker_filename = get_checkpoint_tracker_filename(path) + iteration = 0 + with open(tracker_filename, 'r') as f: + metastring = f.read().strip() + iteration = int(metastring) + assert iteration > 0 + checkpoint_name = get_checkpoint_name(path, iteration) + + return checkpoint_name, iteration + + +def test_split_merge(): + + print('testing split and merge ...') + + #[QKV.ROW-COL] + tensor = torch.FloatTensor([[1.11, 1.12, 1.13, 1.14, 1.15], + [1.21, 1.22, 1.23, 1.24, 1.25], + [1.31, 1.32, 1.33, 1.34, 1.35], + [1.41, 1.42, 1.43, 1.44, 1.45], + [2.11, 2.12, 2.13, 2.14, 2.15], + [2.21, 2.22, 2.23, 2.24, 2.25], + [2.31, 2.32, 2.33, 2.34, 2.35], + [2.41, 2.42, 2.43, 2.44, 2.45], + [3.11, 3.12, 3.13, 3.14, 3.15], + [3.21, 3.22, 3.23, 3.24, 3.25], + [3.31, 3.32, 3.33, 3.34, 3.35], + [3.41, 3.42, 3.43, 3.44, 3.45]]) + + num_partitions = 2 + partition_dim = 0 + stride = 3 + partitions = split_into_partitions(tensor, num_partitions, + partition_dim, stride) + + merged = torch.zeros_like(tensor) + merge_partitions(merged, partitions, partition_dim, stride) + + max_error = (merged - tensor).abs().max() + print(' > max error (should be zero): {}'.format(max_error)) + + +def get_mp_merge_args(parser): + """Provide extra arguments required for merging.""" + group = parser.add_argument_group(title='mp merge') + + group.add_argument('--model-type', type=str, required=True, + choices=['BERT', 'GPT', 'RACE', 'MNLI', 'QQP'], + help='Type of the mdoel.') + group.add_argument('--target-pipeline-model-parallel-size', type=int, default=1, + help='Degree of pipeline model parallelism in output model.') + + return parser + + +def main(): + + # Arguments do sanity checks on the world size, but we don't care, + # so trick it into thinking we are plenty of processes + os.environ["WORLD_SIZE"] = f'{2**31}' + + # Args + set_global_variables(extra_args_provider=get_mp_merge_args, + args_defaults = {'use_cpu_initialization': True, + 'micro_batch_size': 1, + 'no_load_optim': True, + 'no_load_rng': True, + 'no_save_optim': True, + 'no_save_rng': True, + 'save_interval': 1}) + args = get_args() + + if args.pipeline_model_parallel_size > 1: + print("Checkpoints with pipeline model parallelism are not currently supported.") + exit() + + model_type = args.model_type + orig_tensor_model_parallel_size = args.tensor_model_parallel_size + args.tensor_model_parallel_size = 1 + tokenizer = rebuild_tokenizer(args) + + print('\n merging model parallel partitions ...') + print(' > number of partitions: {}'.format(orig_tensor_model_parallel_size)) + print(' > checkpoint path: {}'.format(args.load)) + print(' > model parameters:') + print(' number of tokens ................ {} '.format( + tokenizer.vocab_size)) + print(' number of layers ................ {}'.format(args.num_layers)) + print(' hidden size ..................... {}'.format(args.hidden_size)) + print(' number of attention heads ....... {}'.format( + args.num_attention_heads)) + print(' maximum position embeddings ..... {}'.format( + args.max_position_embeddings)) + + # Full model. + print('> building the full model ...') + mpu.initialize.set_tensor_model_parallel_world_size(1) + mpu.initialize.set_tensor_model_parallel_rank(0) + mpu.initialize.set_pipeline_model_parallel_world_size(1) + mpu.initialize.set_pipeline_model_parallel_rank(0) + merged_model = get_model(model_type) + + # Build and load partitions. + partitions = [] + iteration = 0 + args.tensor_model_parallel_size = orig_tensor_model_parallel_size + tokenizer = rebuild_tokenizer(args) + mpu.initialize.set_tensor_model_parallel_world_size(args.tensor_model_parallel_size) + for rank in range(args.tensor_model_parallel_size): + # Reset these since load_checkpoint asserts they are 0, but we are loading + # multiple checkpoints in the same process and they get set each time + args.consumed_train_samples = 0 + args.consumed_valid_samples = 0 + + mpu.initialize.set_tensor_model_parallel_rank(rank) + checkpoint_name, iteration = get_parallel_checkpoint_name(args.load) + model_ = get_model(model_type) + print(f'> loading {checkpoint_name} ...') + load_checkpoint(model_, None, None) + print(f'> checkpoint version {get_checkpoint_version()}') + partitions.append(model_) + + # Parameter generators so we can loop through them semiltaneouly. + merged_params_gen = merged_model.named_parameters() + partitions_params_gen = [partition.named_parameters() + for partition in partitions] + while True: + try: + + # Get the params and check names. + name, merged_param = next(merged_params_gen) + print(' > working on {} ...'.format(name)) + print(' merged type: {}, size: {}'.format( + merged_param.dtype, list(merged_param.size()))) + partitions_param = [] + for rank, partition_params_gen in enumerate(partitions_params_gen): + partition_name, partition_param = next(partition_params_gen) + assert partition_name == name + partitions_param.append(partition_param) + print(' partition {} type: {}, size: {}'.format( + rank, partition_param.dtype, list(partition_param.size()))) + + # For the non-parallel parameters, simply copy the rank 0 values. + if not hasattr(merged_param, 'tensor_model_parallel'): + print(' none-parallel parameter, simple copy from rank 0') + with torch.no_grad(): + merged_param.data.copy_(partitions_param[0].data) + # For parallel parameters, merge the values + else: + dim = merged_param.partition_dim + stride = merged_param.partition_stride + print(f' parallel parameter merge with stride {stride} along ' + f'dimention {dim}') + merge_partitions(merged_param, + partitions_param, + dim, + stride) + + except StopIteration: + break + + partitions = [] + args.tensor_model_parallel_size = 1 + args.pipeline_model_parallel_size = args.target_pipeline_model_parallel_size + + assert args.num_layers % args.pipeline_model_parallel_size == 0, \ + 'num_layers must be divisible by target pipeline model parallel size' + layers_per_part = args.num_layers // args.pipeline_model_parallel_size + + tokenizer = rebuild_tokenizer(args) + mpu.initialize.set_tensor_model_parallel_world_size(args.tensor_model_parallel_size) + mpu.initialize.set_tensor_model_parallel_rank(0) + mpu.initialize.set_pipeline_model_parallel_world_size(args.pipeline_model_parallel_size) + + # regex to parse out layer number from param name + layer_re = re.compile('layers\.([0-9]+)') + + if args.pipeline_model_parallel_size > 1: + merged_params = {} + for name, merged_param in merged_model.named_parameters(): + merged_params[name] = merged_param + + for rank in range(args.pipeline_model_parallel_size): + mpu.initialize.set_pipeline_model_parallel_rank(rank) + model = get_model(model_type) + def update_layer_num(m): + # TODO! This assumes no interleaved pipeline execution + layer = int(m.group(1)) + layer += rank * layers_per_part + return f'layers.{layer}' + + for dst_name, partition_param in model.named_parameters(): + if dst_name == "word_embeddings.weight": + # See comment in MegatronModule.initialize_word_embeddings() + src_name = "language_model.embedding.word_embeddings.weight" + else: + # Translate destination layer number (0-N for each partition) + # to source layer number (single-model layer number) + src_name = re.sub(layer_re, update_layer_num, dst_name) + print(f" > copying {src_name} to {dst_name} in rank {rank}'s model") + partition_param.data.copy_(merged_params[src_name].data) + + partitions.append(model) + else: + partitions = [merged_model] + + for rank, model in enumerate(partitions): + mpu.initialize.set_pipeline_model_parallel_rank(rank) + print(f"> saving rank {rank}'s model") + save_checkpoint(iteration, model, None, None) + + print('done :-)') + + +if __name__ == '__main__': + + main() diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/add_id.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/add_id.py new file mode 100644 index 0000000000000000000000000000000000000000..e74e2d4c97f4b8fe1debab570c590d005f7abbd3 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/add_id.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import json +import os +import time + +""" +This code adds id to each json object in a json file. User can add prefix +to the ids. +""" + +if __name__ == '__main__': + + print('parsing the arguments ...') + + parser = argparse.ArgumentParser() + parser.add_argument('--input-file', type=str, default=None, help='Input'\ + ' json file where id needs to be added') + parser.add_argument('--output-file', type=str, default=None, help=\ + 'Output file name with id') + parser.add_argument('--id-prefix', type=str, default=None, help=\ + 'Id prefix') + parser.add_argument('--log-interval', type=int, default=100, + help='Log interval') + args = parser.parse_args() + + print('Adding ids to dataset ...') + + f_input = open(args.input_file, 'r', encoding='utf-8') + f_output = open(args.output_file, 'wb') + + unique_ids = 1 + start_time = time.time() + for row in f_input: + each_row = json.loads(row) + adlr_id_string = args.id_prefix + '-{:010d}'.format(int(unique_ids)) + each_row['adlr_id'] = adlr_id_string + myjson = json.dumps(each_row, ensure_ascii=False) + + f_output.write(myjson.encode('utf-8')) + f_output.write('\n'.encode('utf-8')) + + if unique_ids % args.log_interval == 0: + print(' processed {:9d} documents in {:.2f} seconds ...'.format( \ + unique_ids, time.time() - start_time), flush=True) + + unique_ids += 1 + + # Close the file. + f_input.close() + f_output.close() + + print('done :-)', flush=True) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/cleanup_dataset.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/cleanup_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..77dfbf70027cd56eeba81a30adf6a0f3f948400b --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/cleanup_dataset.py @@ -0,0 +1,115 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import ftfy +import json +from langdetect import detect +import numpy as np +import time +import os +import sys + +from tokenizer import Tokenizer + +MIN_DOCUMENT_LENGHT = 128 + + +def print_progress(prefix, start_time, num_docs, num_fixed_text, + num_non_english_docs, chars_non_english_docs, + num_small_docs, chars_small_docs): + + string = prefix + ' | ' + string += 'elapsed time: {:.2f} | '.format(time.time() - start_time) + string += 'documents: {} | '.format(num_docs) + string += 'fixed text: {} | '.format(num_fixed_text) + string += 'non-english: {} | '.format(num_non_english_docs) + string += 'non-english chars: {} | '.format(chars_non_english_docs) + string += 'small docs: {} | '.format(num_small_docs) + string += 'small docs chars: {}'.format(chars_small_docs) + print(string, flush=True) + + +def filter_corpus(filename, out_filename, print_interval=10000): + + print(' > filtering {}'.format(filename)) + + tokenizer = Tokenizer(cache_dir='./cache') + + num_docs = 0 + num_written_docs = 0 + num_small_docs = 0 + num_fixed_text = 0 + num_non_english_docs = 0 + chars_non_english_docs = 0 + chars_small_docs = 0 + start_time = time.time() + with open(out_filename, 'wb') as f: + with open(filename, 'r') as fin: + for line in fin: + try: + num_docs += 1 + myjson = json.loads(line) + # Fix text + text = ftfy.fix_text(myjson['text']) + if text != myjson['text']: + num_fixed_text += 1 + myjson['text'] = text + # Detect language. + if detect(text) != 'en': + print('[non-english text]', myjson) + num_non_english_docs += 1 + chars_non_english_docs += len(text) + continue + # On average each token is 5 characters so 8 is an + # upper bound. + if len(text) < (8 * MIN_DOCUMENT_LENGHT): + tokens = tokenizer.tokenize_document(text) + if len(tokens) < MIN_DOCUMENT_LENGHT: + print('[small document, skipping]:', myjson) + num_small_docs += 1 + chars_small_docs += len(text) + continue + myjson = json.dumps(myjson, ensure_ascii=False) + f.write(myjson.encode('utf-8')) + f.write('\n'.encode('utf-8')) + num_written_docs += 1 + if num_docs % print_interval == 0: + print_progress('[PROGRESS]', start_time, num_docs, + num_fixed_text, num_non_english_docs, + chars_non_english_docs, + num_small_docs, chars_small_docs) + except Exception as e: + print(' skipping ', line, e) + + print_progress('[FINAL]', start_time, num_docs, + num_fixed_text, num_non_english_docs, + chars_non_english_docs, + num_small_docs, chars_small_docs) + + +if __name__ == '__main__': + + print('building gpt2 dataset ...') + + input_filename = sys.argv[1] + output_filename = sys.argv[2] + + print('will be reading {}'.format(input_filename)) + print('and will write the results to {}'.format(output_filename)) + + filter_corpus(input_filename, output_filename) + + diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/filter_ngrams.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/filter_ngrams.py new file mode 100644 index 0000000000000000000000000000000000000000..d9058f462d35123b5db109b1c05c21667f91ec62 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/filter_ngrams.py @@ -0,0 +1,492 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Deduplicate downstream tasks from training dataset. 13-grams have been used. +All split documents with less than 200 characters got filtered. Any document +with more than 10 splits got filtered as well. +""" + +import argparse +from functools import partial +import json +import multiprocessing +import nltk +import pickle +import re +import string +import sys +import time + +def get_words(text): + # get all the lowercase words from text + words, positions = [], [] + for match in re.finditer(r'\w+', text.lower()): + words.append(match.group(0)) + positions.append(match.start()) + return words, positions + +# splits the text +def split_text(text, start_position, remove_char_each_side, seq): + # first part of the text + punctuations = ".!?" + pos = start_position - remove_char_each_side + text_first = "" + while pos > 0 and not text[pos] in punctuations: + pos -= 1 + if pos > 0: + text_first = text[0:pos+1] + + # add length of seq and remove_char_each_side + pos = start_position + len(seq) + remove_char_each_side + + # last part of the text + text_second = "" + while pos < len(text) and not text[pos] in punctuations: + pos += 1 + if pos + 1 < len(text): + text_second = text[pos+1:len(text)] + + return text_first, text_second + +def check_and_clean_text(args, words, ngrams, text, start_position, \ + text_buf_ngram_free, text_buf, local_ngram): + + seq = " ".join(words) + if seq in ngrams: + print(" [matched]: {}".format(seq), flush=True) + + if args.get_ngram_freq_only: + # increase freq of this seq and then only consider the later part + # of the text for further processing + if seq in local_ngram: + local_ngram[seq] += 1 + else: + local_ngram[seq] = 1 + #print(" [increased]: {} {}".format(seq, ngrams[seq]), flush=True) + if (start_position + len(seq) + 1) < len(text): + text_buf.append(text[start_position + len(seq) + 1:len(text)]) + return False + + # split the text + text_first, text_second = split_text(text, start_position, \ + args.remove_char_each_side, seq) + + # first part of ngrams free + if len(text_first) > args.filter_text_char_len: + text_buf_ngram_free.append(text_first) + + # add second part for further processing + if len(text_second) > args.filter_text_char_len: + text_buf.append(text_second) + + return False # not ngram free + + # ngram free + return True + + +def free_ngram(line, args, key, ngrams, ngrams_freq_sorted): + # remove all the ngrams + + try: + myjson = json.loads(line) + text_buf = [myjson[key]] + except Exception as e: + print("Error: {}".format(e), flush=True) + text_buf = [] + + text_buf_ngram_free = [] + local_ngram = {} + while len(text_buf) > 0: + + # get the first one from the buffer + text = text_buf.pop(0) + words, positions = get_words(text) + + ngram_free = True + # find each max n-grams and check dictionary + for i in range(len(words) - args.max_ngram_size + 1): + check_ngram_free = check_and_clean_text(args, words[i:\ + i+args.max_ngram_size], ngrams, text, positions[i], \ + text_buf_ngram_free, text_buf, local_ngram) + + # the seq is ngram free? if yes, break + if not check_ngram_free: + ngram_free = False + break + + # if max ngrams doesn't match, check if any other lower n-grams + # within max ngram macthes + for ngram_len, _ in ngrams_freq_sorted: + check_ngram_free = check_and_clean_text(args, words[i:\ + i+ngram_len], ngrams, text, positions[i], \ + text_buf_ngram_free, text_buf, local_ngram) + + # same check as above + if not check_ngram_free: + ngram_free = False + break + + # check break from lower than max ngram loop above + if not ngram_free: + break + + # for the last max n-gram, check all the lower ngrams in it + if ngram_free and len(words) - args.max_ngram_size > 0: + # get the last words of the lax max ngram + last_seq_words = words[(len(words)-args.max_ngram_size):len(words)] + last_seq_start_position = len(words) - args.max_ngram_size + + # check all n-grams lower than the max + for pos, (ngram_len, _) in enumerate(ngrams_freq_sorted): + + # ignore the max ngram as has been considered already + if ngram_len == args.max_ngram_size: + continue + + # find each ngram of ngram_len in max n-grams and check + for i in range(len(last_seq_words) - ngram_len + 1): + check_ngram_free = check_and_clean_text(args, \ + last_seq_words[i:i+ngram_len], ngrams, text,\ + positions[last_seq_start_position+i], \ + text_buf_ngram_free, text_buf, local_ngram) + + if not check_ngram_free: + ngram_free = False + break + + if not ngram_free: + break + + # texts are ngram free + if ngram_free and not args.get_ngram_freq_only: + text_buf_ngram_free.append(text) + + # check if the text has only been trimmed + trimmed = 0 + if not args.get_ngram_freq_only and len(text_buf_ngram_free) == 1 and \ + len(text_buf_ngram_free[0]) < len(myjson[key]): + trimmed = 1 + + return text_buf_ngram_free, trimmed, myjson, local_ngram + +# insert word sequence into dictionary +def insert_dict(words, ngrams, pos): + seq = " ".join(words) + if seq not in ngrams: + ngrams[seq] = 0 + #ngrams[seq] = pos + +# insert each ngram from text into the ngrams dictionary +def compute_ngrams_insert_dict(args, text, ngrams): + words, positions = get_words(text) + if len(words) < args.min_ngram_size: + return + + if len(words) < args.max_ngram_size: + insert_dict(words, ngrams, positions[0]) + + for i in range(len(words) - args.max_ngram_size+1): + insert_dict(words[i:i+args.max_ngram_size], ngrams, positions[i]) + + +# Build ngrams for the lambada dataset +def process_task_lambda(args, task_file, ngrams): + print(' reading from {} and computing ngrams'.format(task_file)) + with open(task_file, 'r') as f: + for line in f: + try: + myjson = json.loads(line) + text = myjson['text'] + compute_ngrams_insert_dict(args, text, ngrams) + except Exception as e: + print('Error:', e) + print(" Entities in ngrams {}".format(len(ngrams)), flush=True) + + +# Build ngrams for the dataset of the given task +def process_task(args, task_name, ngrams): + + print(' reading from {} and computing ngrams'.format('import datasets')) + print(" Current entities in ngrams {}".format(len(ngrams)), flush=True) + # using validation/test data from datasets + from datasets import load_dataset + + entities_in_ngrams = len(ngrams) + + # load the dataset + if task_name == 'squad': + dataset = load_dataset('squad_v2', split='validation') + elif task_name == 'natural_questions': + dataset = load_dataset('natural_questions', split='validation') + elif task_name == 'triviaqa': + dataset = load_dataset('trivia_qa', 'unfiltered', split='test') + elif task_name == 'webqa': + dataset = load_dataset('web_questions', split='test') + elif task_name == 'race': + dataset = load_dataset('race', 'all', split='test') + elif task_name == 'drop': + dataset = load_dataset('drop', split='validation') + elif task_name == 'coqa': + dataset = load_dataset('coqa', split='validation') + elif task_name == 'piqa': + dataset = load_dataset('piqa', split='test') + else: + print("Invalid task name: {}".format(task_name), flush=True) + return + + # read the dataset and add to ngrams + for line in dataset: + try: + if task_name in ['squad', 'triviaqa', 'webqa', 'race', 'drop']: + text = line['question'] + compute_ngrams_insert_dict(args, text, ngrams) + elif task_name == 'natural_questions': + text = line['question']['text'] + compute_ngrams_insert_dict(args, text, ngrams) + elif task_name == 'coqa': + all_questions = line['questions'] + for question in all_questions: + compute_ngrams_insert_dict(args, question, ngrams) + elif task_name == 'piqa': + text = line['goal'] + compute_ngrams_insert_dict(args, text, ngrams) + except Exception as e: + print('Error:', e) + + print(" After task {} entities in ngrams {}, added {}".format(task_name, \ + len(ngrams), len(ngrams) - entities_in_ngrams), flush=True) + +def compute_tasks_ngrams(args, ngrams): + start_time = time.time() + for _, task_name in enumerate(args.tasks): + print('Task: {}'.format(task_name), flush=True) + if task_name == 'lambada': + assert args.lambada_path is not None + process_task_lambda(args, args.lambada_path, ngrams) + else: + process_task(args, task_name, ngrams) + print(" Taken time to compute ngrams {:.2f}".format(time.time() - \ + start_time), flush=True) + +def compute_ngram_freq_sorted(args, ngrams): + ngrams_freq = {} + for ngram_key in ngrams.keys(): + length = len(ngram_key.split()) + ngrams_freq[length] = ngrams_freq[length] + 1 if length in \ + ngrams_freq else 1 + + ngrams_freq_sorted = sorted(ngrams_freq.items(), key=lambda item: item[0]) + print(" Ngram frequencies: {}".format(ngrams_freq_sorted), flush=True) + print(" Entities in ngrams {} min_ngram_size {} max_ngram_size {}".format(\ + len(ngrams), ngrams_freq_sorted[0][0], ngrams_freq_sorted[len(\ + ngrams_freq_sorted) -1 ][0]), flush=True) + return ngrams_freq_sorted + +def get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \ + dedup_file, dedup_key, ngrams_freq_sorted): + + start_time = time.time() + # get the ngrams frequency + args.get_ngram_freq_only = True + + # Open the large file to process in parallel + num_workers = args.num_threads + pool = multiprocessing.Pool(num_workers) + fin = open(dedup_file, 'r', encoding='utf-8') + free_ngram_abt_partial=partial(free_ngram, args=args, key=dedup_key, \ + ngrams=ngrams, ngrams_freq_sorted=ngrams_freq_sorted) + free_ngrams_abt = pool.imap(free_ngram_abt_partial, fin, 500) + + counter = 0 + for _, _, _, local_ngram in free_ngrams_abt: + counter += 1 + if counter % 1000 == 0: + print(' [compute_stat]> processed {} documents in {:.2f} seconds ...'. + format(counter, time.time() - start_time), flush=True) + for local_key in local_ngram: + if local_key in ngrams: + ngrams[local_key] += 1 + local_ngram = {} + + print(' Time taken to compute statistics {:.2f} seconds'.format(time.time() - \ + start_time), flush=True) + pool.close() + pool.join() + + start_time = time.time() + counter_threshold = 0 + # Get ngram below theadhold + for local_key, local_val in ngrams.items(): + if ngrams[local_key] < args.key_threshold: + print(" [threshold] {} {}".format(local_key, local_val), flush=True) + counter_threshold += 1 + ngrams_below_threshold[local_key] = 1 + + print(' Ngrams below threshold {}'.format(counter_threshold), flush=True) + fin.close() + +def clean_ngrams_below_threshold(args, ngrams_below_threshold, dedup_file, \ + dedup_key): + + start_time = time.time() + # Now actually filter the dataset + args.get_ngram_freq_only = False + #id_prefix = '-'.join(args.tasks[::2]) + id_prefix = '-'.join(args.tasks[::1]) + + # get the range of the size of the ngrams + ngrams_freq_sorted = compute_ngram_freq_sorted(args, ngrams_below_threshold) + + # Open the large file to process in parallel + counter = splitted = ignored = split_mt_thld = trimmed_count = 0 + num_workers = args.num_threads + pool = multiprocessing.Pool(num_workers) + fin = open(dedup_file, 'r', encoding='utf-8') + free_ngram_clean_partial=partial(free_ngram, args=args, key=dedup_key, \ + ngrams=ngrams_below_threshold, ngrams_freq_sorted=ngrams_freq_sorted) + free_ngrams_clean = pool.imap(free_ngram_clean_partial, fin, 500) + + out_f = open(args.output, 'wb') + + for text_buf_ngram_free, trimmed, myjson, _ in free_ngrams_clean: + counter += 1 + try: + + trimmed_count += trimmed + + if len(text_buf_ngram_free) > 1: + splitted += 1 + if len(text_buf_ngram_free) == 0: + ignored += 1 + # more than 10 splits ignored + if len(text_buf_ngram_free) > args.splits_count: + text_buf_ngram_free = [] + split_mt_thld += 1 + + if args.output is not None: + if "split_id" in myjson: + use_prefix = myjson["split_id"] + "-" + else: + use_prefix = "" + + for i in range(len(text_buf_ngram_free)): + split_id_string = id_prefix + '-{:010d}'.format(int(\ + counter)) + '-{:04d}'.format(int(i)) + myjson[dedup_key] = text_buf_ngram_free[i] + myjson["split_id"] = use_prefix + split_id_string + outjson = json.dumps(myjson, ensure_ascii=False) + #outjson = json.dumps({"text":text_buf_ngram_free[i], + # id_prefix+"_split_id":split_id_string}, + # ensure_ascii=False) + out_f.write(outjson.encode('utf-8')) + out_f.write('\n'.encode('utf-8')) + + if counter % 1000 == 0: + print(' [final]> processed {} documents in {:.2f} seconds ...'. + format(counter, time.time() - start_time), flush=True) + except Exception as e: + print('Error:', e) + + print(' [final]> processed {} documents in {:.2f} seconds ...'. + format(counter, time.time() - start_time), flush=True) + + print(' Total docs {} splitted {} ignored {} splits > theshold {} trimmed'\ + ' {}'.format(counter, splitted, ignored, split_mt_thld, trimmed_count)\ + , flush=True) + + pool.close() + pool.join() + + out_f.close() + fin.close() + +if __name__ == '__main__': + + # we use 13-grams, any text less than 200 characters got removed + # any text splitted more than 10 got removed as well + + print('parsing the arguments ...') + + parser = argparse.ArgumentParser() + parser.add_argument('--tasks', nargs = '*', required=True, default=None, \ + help = 'Tasks to use for deduplication: currently ' + ' suuport [lambada, squad, natural_questions,' + ' triviaqa, webqa, race, drop, coqa, and piqa]') + parser.add_argument('--lambada-path', type=str, default=None, + help='Only Lambada task needs the path') + parser.add_argument('--dedup-dataset', nargs = '*', default=None, + help='Dataset to deduplicate with the key to use' + ' e.g. cc.json text') + parser.add_argument('--output', type=str, default=None, + help='Output file name to save dedup dataset') + parser.add_argument('--num-threads', type=int, default=40, + help='Number of threads to use') + # Default dedup values + parser.add_argument('--max-ngram-size', type=int, default=13, + help='Maximum size of ngram to use.') + parser.add_argument('--min-ngram-size', type=int, default=8, + help='Minimum size of ngram to use.') + parser.add_argument('--filter-text-char-len', type=int, default=200, + help='Remove any text below this length.') + parser.add_argument('--key-threshold', type=int, default=10, + help='Number of keys to consider as threshold') + parser.add_argument('--save-dictionary', type=str, default=None, + help='Save the dictionary') + parser.add_argument('--load-dictionary', type=str, default=None, + help='Load the dictionary') + parser.add_argument('--splits-count', type=int, default=10, + help='Remove any documents more than this many splits') + parser.add_argument('--remove-char-each-side', type=int, default=200, + help='Maximum size of ngram to use.') + + args = parser.parse_args() + + assert len(args.dedup_dataset) == 2 + dedup_file = args.dedup_dataset[0] + dedup_key = args.dedup_dataset[1] + + # Setup multi-processing + num_workers = args.num_threads + if args.load_dictionary is None: + + # Build ngrams + ngrams = {} + compute_tasks_ngrams(args, ngrams) + + # get the range of the size of the ngrams + ngrams_freq_sorted = compute_ngram_freq_sorted(args, ngrams) + + # get ngram freq from large file in parallel + # get ngrams below threshold + ngrams_below_threshold = {} + get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \ + dedup_file, dedup_key, ngrams_freq_sorted) + + # save the dictionary if needed + if args.save_dictionary is not None: + with open(args.save_dictionary, 'wb') as save_dict_handle: + pickle.dump(ngrams_below_threshold, save_dict_handle) + else: + with open(args.load_dictionary, 'rb') as load_dict_handle: + ngrams_below_threshold = pickle.load(load_dict_handle) + + # filter the large file + if args.output is not None: + clean_ngrams_below_threshold(args, ngrams_below_threshold, \ + dedup_file, dedup_key) + + print('done :-)') diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/merge_jsons.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/merge_jsons.py new file mode 100644 index 0000000000000000000000000000000000000000..ad5ed31295529ac22b53650b81ea7847e6426a2d --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/gpt3/tools/openwebtext/merge_jsons.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import glob +import sys +import json +import argparse + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument("--json_path", type=str, default=".", + help="path where all the json files are located") + + parser.add_argument("--output_file", type=str, default="merged_output.json", + help="filename where the merged json should go") + + args = parser.parse_args() + + json_path = args.json_path + out_file = args.output_file + + json_files = glob.glob(json_path + '/*.json') + + counter = 0 + + with open(out_file, 'w') as outfile: + for fname in json_files: + counter += 1 + + if counter % 1024 == 0: + print("Merging at ", counter, flush=True) + + with open(fname, 'r') as infile: + for row in infile: + each_row = json.loads(row) + outfile.write(row) + + + print("Merged file", out_file, flush=True) + + diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/__init__.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b792ca6ecf7cbe1d51c5c1dd72f1f98328fda8b9 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/__init__.py @@ -0,0 +1 @@ +from .resnet import * diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/utils.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..638ef07cd85ea5135920ed1c7364e32297a21030 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/utils.py @@ -0,0 +1,4 @@ +try: + from torch.hub import load_state_dict_from_url +except ImportError: + from torch.utils.model_zoo import load_url as load_state_dict_from_url diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/batch_256.cfg b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/batch_256.cfg new file mode 100644 index 0000000000000000000000000000000000000000..15fe283de5d86b8149476609bb8d5157b8c6c089 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/batch_256.cfg @@ -0,0 +1,39 @@ +#!/bin/bash + +#general param +export RESNET_SIZE=50 +export IMAGENET_DIR=/root/datasets/imagenet/tf_records +export TRAIN_STEPS=999 +export DISPLAY_STEPS=90000 +export STEPS_PER_LOOP=90000 +export USE_LARS_OPTIMIZER=1 +export CPU_BIND_TYPE=cpu +export EPOCHS_BETWEEN_EVALS=4 +export USE_MLPERF=1 +export NO_EVAL=0 +export TF_BF16_CONVERSION=1 +export USE_HOROVOD=1 +export DATASET_CACHE=true +export SYNTHETIC_DATA=false +export MODELING=false +export NUM_TRAIN_FILES=1024 +export NUM_EVAL_FILES=256 +export HOROVOD_FUSION_THRESHOLD=0 +export NUM_WORKERS_PER_HLS=8 +export HLS_TYPE=HLS2 + +#hp param +export NUM_WORKERS=8 +export BATCH_SIZE=256 +export TRAIN_EPOCHS=35 +export LARS_DECAY_EPOCHS=36 +export EVAL_OFFSET_EPOCHS=3 +export WARMUP_EPOCHS=3 +export BASE_LEARNING_RATE=9 +export WEIGHT_DECAY=0.00005 +export LR_MOMENTUM=0.9 +export LABEL_SMOOTH=0.1 +export STOP_THRESHOLD=0.759 + +unset MPI_TCP_INCLUDE +unset TRAIN_AND_EVAL diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/launch_keras_resnet_hvd.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/launch_keras_resnet_hvd.sh new file mode 100644 index 0000000000000000000000000000000000000000..cf6d1a188db658f6061fa66e578dd99cf9d4ccb3 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/launch_keras_resnet_hvd.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd $SCRIPT_DIR/.. +../scripts/launch_keras_resnet_hvd.sh "$@" diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/list_affinity_topology_bare_metal.sh b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/list_affinity_topology_bare_metal.sh new file mode 100644 index 0000000000000000000000000000000000000000..a74f90cd53a9d9f303af9ab655c2afe556479abd --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-TF/list_affinity_topology_bare_metal.sh @@ -0,0 +1,149 @@ +#!/bin/bash + +#Description +#This script outputs a file for each moduleID. +#These files contain the Hthread_sequence on which the process is bound too (this is a restriction and not a reservation). +#We do this by getting the mapping of (ModuleID, pcie_bus_id) from hl-smi +#Then we map the 2 tuple to a numa by opening the file +#/sys/bus/pci/devices//numa_node +#Now we have a 3 tuple (ModuleID, pcie_bus_id, numa_node) +#Lastly we get the Hthread_sequence that correspond to that numa_node from lscpu so now we have +#(ModuleID, pcie_bus_id, numa_node, Hthread_sequence ) +#The Hthread_sequence is then used to bind the process to the specific threads on the numa closest to the PCIE bus. + +affinity_print() +{ + echo "Affinity: "$1 +} + +hl_smi_check() +{ + if [[ ! -x `which hl-smi` ]]; + then + affinity_print "hl-smi could not be found, exiting" + exit 1 + fi +} + +check_env() +{ + if [[ -z "$NUMA_MAPPING_DIR" ]]; + then + affinity_print "Missing env variable \"NUMA_MAPPING_DIR\", exiting!" + exit 1 + fi +} + +create_temp_files() +{ + # create a temp directory, mktemp is used to create a temp directory with a unique name + temp_dir=$(mktemp -d) + + # create temp files for holding outputs + file_hl_smi=$temp_dir/hl_smi.txt + file_module_id=$temp_dir/module_id.txt + file_pcie_bus_id=$temp_dir/pcie_bus_id.txt + file_pcie_numa=$temp_dir/pcie_numa.txt + file_hl_smi=$temp_dir/hl_smi.txt + file_configuration_table=$temp_dir/configuration_table.txt + file_final_output=$NUMA_MAPPING_DIR/.habana_module_topo +} + +create_configuartion_table() +{ + # save the entire hl-smi output to file + hl-smi -L > $file_hl_smi + + #check that the driver is up + if [ $? -eq 1 ]; then + affinity_print "Issue while trying to run hl-smi, aborting..." + exit 1 + fi + + # get the module IDs (unique identifier for each gaudi) + grep "Module ID" $file_hl_smi > $file_module_id + + # get the bus IDs + grep "Bus Id" $file_hl_smi > $file_pcie_bus_id + + # Get the numa for each PCIE bus + for i in `cat $file_pcie_bus_id|awk '{print $4}'`; do + numa_node=`cat /sys/bus/pci/devices/$i/numa_node` + if [ $numa_node -ge 0 ]; then + echo $numa_node >> $file_pcie_numa + else + for i in `hl-smi -L|grep "Bus Id"|awk '{print $4}'`; do affinity_print "PCIE:"$i", NUMA:"`cat /sys/bus/pci/devices/$i/numa_node`; done + affinity_print "Numa mapping isn't set properly, you are most likley running on an unsupported VM, aborting..." + exit 1 + fi + done + + #append output files horizontally + paste $file_module_id $file_pcie_bus_id $file_pcie_numa | awk ' {print $4,$8,$9}' | sort -k1 > $file_configuration_table +} + + +create_thread_list() +{ + no_of_numa_nodes=`lscpu|grep "NUMA node(s):"|awk '{print $3}'` + no_of_gaudis=`cat $file_configuration_table|wc -l` + no_of_used_numa=`cat $file_pcie_numa | uniq | wc -l` + + + for module_id in $(seq 0 $(($no_of_gaudis-1))); do + #grab one pcieid at a time (busID) + pcie_bus_id=`cat $file_configuration_table | awk '{print $2}' | sed -n $(($module_id+1))p` + + #get the corespoinding numanode (pcie_numa) + numa_node=`cat /sys/bus/pci/devices/$pcie_bus_id/numa_node` + + #special barcelona configuration where two sockets are configured to be 4 virtual numa nodes + if [[ $no_of_used_numa -eq 2 && $no_of_numa_nodes -eq 4 ]]; then + #get current node (moduleID) + curr_node=`cat $file_configuration_table | awk '{print ","$3,$1}'| grep ",$numa_node" | awk '{print $2}'|head -1` + if [ $module_id -eq $curr_node ]; then + numa_node=$(($numa_node-1)) + fi + fi + + #get the list of threads + if [ $numa_node -ge 0 ]; then + vector=`lscpu --parse | grep ",$numa_node,,"|awk -F"," '{print $1}'` + vector=`echo $vector | tr ' ' ,` + echo $vector > $NUMA_MAPPING_DIR/.habana_moduleID$module_id + echo $vector >> $temp_dir/.module + fi + done +} + + +add_thread_list_to_config_table() +{ + #put it all together + echo "ModID BusID NUMA CPUs: " > $file_final_output + echo "===== ===== ===== ===== " >> $file_final_output + paste $file_configuration_table $temp_dir/.module >> $file_final_output +} + +clean_up() +{ + #remove the temp dir + if [ ! -z "$temp_dir" ]; then + rm -fr $temp_dir + fi +} + +main() +{ + check_env + hl_smi_check + create_temp_files + create_configuartion_table + create_thread_list + add_thread_list_to_config_table + clean_up + affinity_print "Script finished successfully" + exit 0 +} + +main \ No newline at end of file diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/debug.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..66f44596be9f24aab7e7ccf61ea7a3c7d7a54631 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/debug.py @@ -0,0 +1,107 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +############################################################################### +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company +############################################################################### + +from absl import flags +from absl import logging +from tensorflow.core.protobuf import debug_event_pb2 +from tensorflow.python.debug.lib import debug_events_writer +from tensorflow.python.framework import op_callbacks +from tensorflow.python.ops import gen_debug_ops +import tensorflow as tf +import re +import json + +flags.DEFINE_string(name='dump_config', default=None, help='Defines config for tensor dumping') + + +class _DumpCallback(object): + def __init__(self, dump_root, tensor_debug_mode, circular_buffer_size, op_regex): + self._dump_root = dump_root + self._tensor_debug_mode = debug_event_pb2.TensorDebugMode.Value(tensor_debug_mode) + self._circular_buffer_size = circular_buffer_size + self._op_regex = re.compile(op_regex) if isinstance(op_regex, str) else op_regex + self._tfdbg_run_id = '' + self._dump_op_counter = 0 + + debug_writer_args = { + "dump_root" : self._dump_root, + "circular_buffer_size": self._circular_buffer_size + } + + if tf.__version__.startswith("2.4"): + debug_writer_args["tfdbg_run_id"] = self._tfdbg_run_id + + self._writer = debug_events_writer.DebugEventsWriter(**debug_writer_args) + + def callback(self, op_type, inputs, attrs, outputs, op_name=None, graph=None): + if op_name is not None and self._op_regex.match(op_name): + graph_name = "missing-graph-name" + if graph is not None and hasattr(graph, "name"): + graph_name=graph.name + + logging.info("Adding dump op for '%s' of type '%s' from graph '%s'" %(op_name, op_type, graph_name)) + + new_outputs = [] + + for output_slot, output in enumerate(outputs): + debug_identity_op_kwargs = { + "tfdbg_context_id": graph_name, + "op_name": op_name, + "output_slot": output_slot, + "tensor_debug_mode": self._tensor_debug_mode, + "debug_urls": ["file://%s" % self._dump_root], + "name": "dump_%d" % self._dump_op_counter + } + + if tf.__version__.startswith("2.4"): + debug_identity_op_kwargs["circular_buffer_size"] = self._circular_buffer_size + debug_identity_op_kwargs["tfdbg_run_id"] = self._tfdbg_run_id + + self._dump_op_counter = self._dump_op_counter + 1 + new_outputs.append(gen_debug_ops.debug_identity_v2(output, **debug_identity_op_kwargs)) + + return new_outputs + else: + return None + + def __enter__(self, *args, **kwargs): + op_callbacks.add_op_callback(self.callback) + logging.info("Enabled tensor dumping") + + def __exit__(self, *args, **kwargs): + op_callbacks.remove_op_callback(self.callback) + logging.info("Disabled tensor dumping") + + def __del__(self): + self._writer.Close() + +class _Dummy(object): + def __enter__(self, *args, **kwargs): + pass + def __exit__(self, *args, **kwargs): + pass + +def dump_callback(config_file=None): + if config_file is not None: + kwargs = json.load(open(config_file, 'r')) + return _DumpCallback(**kwargs) + try: + kwargs = json.load(open(flags.FLAGS.dump_config, 'r')) + return _DumpCallback(**kwargs) + except: + return _Dummy() diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/modeling/__init__.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/modeling/performance.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/modeling/performance.py new file mode 100644 index 0000000000000000000000000000000000000000..09f9161044aa2c251ccbfd155c81469e9a3bddc0 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/modeling/performance.py @@ -0,0 +1,56 @@ +# Lint as: python3 +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions and classes related to training performance.""" + +import tensorflow as tf + + +def configure_optimizer(optimizer, + use_float16=False, + use_graph_rewrite=False, + loss_scale="dynamic"): + """Configures optimizer object with performance options.""" + if use_float16: + # Wraps optimizer with a LossScaleOptimizer. This is done automatically + # in compile() with the "mixed_float16" policy, but since we do not call + # compile(), we must wrap the optimizer manually. + optimizer = ( + tf.keras.mixed_precision.LossScaleOptimizer( + optimizer, loss_scale=loss_scale)) + if use_graph_rewrite: + # Note: the model dtype must be 'float32', which will ensure + # tf.ckeras.mixed_precision and + # tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite do not double + # up. + optimizer = tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite( + optimizer) + return optimizer + + +def set_mixed_precision_policy(dtype, loss_scale=None): + """Sets mix precision policy.""" + if dtype == tf.float16: + policy = tf.keras.mixed_precision.Policy( + 'mixed_float16', loss_scale=loss_scale) + tf.keras.mixed_precision.set_global_policy(policy) + elif dtype == tf.bfloat16: + policy = tf.keras.mixed_precision.Policy( + 'mixed_bfloat16') + tf.keras.mixed_precision.set_global_policy(policy) + elif dtype == tf.float32: + tf.keras.mixed_precision.set_global_policy('float32') + else: + raise ValueError("Unexpected dtype: %s" % dtype) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/tb_utils.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/tb_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ea28099bd5a9b6e0df0faa00e2611d108167e620 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/tb_utils.py @@ -0,0 +1,357 @@ +import os +import time +import tensorflow as tf +from copy import deepcopy +from tensorboard.plugins.hparams import api as hp +from tensorflow.python.eager import context +from tensorflow.keras import backend as K +from tensorflow.python.ops import summary_ops_v2 +from tensorflow.python.summary import summary as tf_summary +from tensorflow.python.training.summary_io import SummaryWriterCache +from tensorflow.compat.v1.keras.callbacks import TensorBoard, Callback + + +def _remove_prefix(s, prefix): + if s.startswith(prefix): + s = s[len(prefix):] + return s + + +def _parse_precision(): + flag = os.environ.get('TF_BF16_CONVERSION', '0') + flag = flag.lower() + try: + value = int(flag) + except: + value = -1 + + if flag == 'false' or value == 0: + return 'fp32' + elif flag == 'true' or value == 1: + return 'bf16' + return flag + + +def _set_precision_if_missing(hparams: dict): + if 'precision' not in hparams: + hparams['precision'] = _parse_precision() + return hparams + + +def _copy_and_clean_hparams(hparams: dict): + hparams_ = dict() + for name, value in hparams.items(): + if isinstance(value, (str, bool, int, float)): + hparams_[name] = value + continue + + try: + hparams_[name] = str(value) + tf.compat.v1.logging.info( + f'Type of parameter "{name}" is not one of (bool, int, float, str). ' + 'It will be saved as a string.') + except: + tf.compat.v1.logging.info( + f'Conversion of parameter "{name}" to string failed. ' + 'Parameter will not be saved.') + + return hparams_ + + +def write_hparams_v1(writer, hparams: dict): + hparams = _copy_and_clean_hparams(hparams) + hparams = _set_precision_if_missing(hparams) + + with tf.compat.v1.Graph().as_default(): + if isinstance(writer, str): + writer = SummaryWriterCache.get(writer) + summary = hp.hparams_pb(hparams).SerializeToString() + writer.add_summary(summary) + + +def write_hparams_v2(writer, hparams: dict): + hparams = _copy_and_clean_hparams(hparams) + hparams = _set_precision_if_missing(hparams) + + with writer.as_default(): + hp.hparams(hparams) + + +class ExamplesPerSecondEstimatorHook(tf.compat.v1.train.StepCounterHook): + """Calculate and report global_step/sec and examples/sec during runtime.""" + # Copy-pasted from tensorflow_estimator/python/estimator/tpu/tpu_estimator.py + + def __init__(self, + batch_size=None, + every_n_steps=1, + every_n_secs=None, + output_dir=None, + summary_writer=None, + extra_metrics=None, + log_global_step=False, + verbose=False): + super().__init__( + every_n_steps=every_n_steps, + every_n_secs=every_n_secs, + output_dir=output_dir, + summary_writer=summary_writer) + self._metrics = extra_metrics or {} + self._verbose = verbose + if log_global_step: + # Because estimator will log global_step/sec by default + # when log_step_count_steps is not None saving it here + # would duplicate events in TensorBoard. + # Use log_global_step=True when RunConfig.log_step_count_step=None + self._metrics['global_step/sec'] = 1 + if batch_size is not None: + self._metrics['examples/sec'] = batch_size + + def _add_summary(self, tag, value, step): + Summary = tf.compat.v1.Summary + global_step_summary = Summary(value=[ + Summary.Value(tag=tag, simple_value=value) + ]) + self._summary_writer.add_summary(global_step_summary, step) + if self._verbose: + tf.compat.v1.logging.info(f'{tag}: {value}') + + def _log_and_record(self, elapsed_steps, elapsed_time, global_step): + global_step_per_sec = elapsed_steps / elapsed_time + if self._summary_writer is not None: + for name, factor in self._metrics.items(): + value = factor * global_step_per_sec + self._add_summary(name, value, global_step) + + def after_create_session(self, session, coord): + self._timer.reset() + + +class ExamplesPerSecondKerasHookV1(Callback): + def __init__(self, + every_n_steps=1, + every_n_secs=None, + output_dir=None, + summary_writer=None, + batch_size=None): + self.writer = summary_writer or SummaryWriterCache.get(output_dir) + self._timer = tf.compat.v1.train.SecondOrStepTimer( + every_n_secs, every_n_steps) + self._total_examples = 0 + self._should_trigger = True + self._batch_size = batch_size + + def on_train_begin(self, logs=None): + self._timer.reset() + + def on_train_batch_begin(self, batch, logs=None): + self._should_trigger = self._timer.should_trigger_for_step( + logs.get('batch', batch)) + + def on_train_batch_end(self, batch, logs=None): + step = logs.get('batch', batch) + self._total_examples += logs.get('size', 0) + if self._should_trigger: + elapsed_time, elapsed_steps = self._timer.update_last_triggered_step( + step) + if elapsed_time is not None: + total_examples = self._total_examples + if self._batch_size is not None: + total_examples = self._batch_size * elapsed_steps + self._log_and_record( + elapsed_steps, elapsed_time, step, total_examples) + self._total_examples = 0 + + def _log_and_record(self, elapsed_steps, elapsed_time, + global_step, total_examples=None): + Summary = tf.compat.v1.Summary + global_step_per_sec = elapsed_steps / elapsed_time + if self.writer is not None: + global_step_summary = Summary(value=[ + Summary.Value( + tag='global_step/sec', simple_value=global_step_per_sec) + ]) + self.writer.add_summary(global_step_summary, global_step) + if total_examples is not None: + examples_per_sec = total_examples / elapsed_time + example_summary = Summary(value=[ + Summary.Value(tag='examples/sec', + simple_value=examples_per_sec) + ]) + self.writer.add_summary(example_summary, global_step) + + +class ExamplesPerSecondKerasHookV2(ExamplesPerSecondKerasHookV1): + def __init__(self, + every_n_steps=1, + every_n_secs=None, + output_dir=None, + summary_writer=None, + batch_size=None): + writer = summary_writer or summary_ops_v2.create_file_writer_v2(output_dir) + super().__init__(every_n_steps, every_n_secs, output_dir, writer, batch_size) + + def _log_and_record(self, elapsed_steps, elapsed_time, + global_step, total_examples=None): + global_step_per_sec = elapsed_steps / elapsed_time + if self.writer is not None: + with self.writer.as_default(), summary_ops_v2.always_record_summaries(): + summary_ops_v2.scalar('global_step/sec', global_step_per_sec, + step=global_step) + if total_examples is not None: + examples_per_sec = total_examples / elapsed_time + summary_ops_v2.scalar('examples/sec', examples_per_sec, + step=global_step) + + +ExamplesPerSecondKerasHook = ExamplesPerSecondKerasHookV1 + + +class TBSummary(object): + """ + Creates a proxy for FileWriter for TensorBoard. + + :param log_dir: - path where experiment is running (usually the same as + model_dir in Estimator) + """ + + def __init__(self, log_dir: str): + super().__init__() + self._log_dir = log_dir + self._session = None + + def __enter__(self): + self._session = tf.compat.v1.Session() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self._session: + self._session.close() + self._session = None + + def add_scalar(self, tag, value, global_step=None): + with self._session: + writer = SummaryWriterCache.get(self._log_dir) + summary = tf.compat.v1.Summary( + value=[tf.compat.v1.Summary.Value(tag=tag, simple_value=value)]) + event = tf.compat.v1.Event(summary=summary) + event.wall_time = time.time() + event.step = global_step + writer.add_event(event) + + +class TensorBoardWithHParamsV1(TensorBoard): + """ + Adds TensorBoard visualization to training process. + + Writes training tfevent file into default log directory, but + stores evaluation in log_dir/eval subdirectory. + """ + + def __init__(self, hparams, *args, **kwargs): + super().__init__(*args, **kwargs) + self.hparams = hparams + self._train_summary = None + self._eval_summary = None + + def _switch_writer(self, mode): + self.writer = self._train_summary if mode == 'train' else self._eval_summary + + def _init_writer(self, model): + """Sets file writer.""" + if context.executing_eagerly(): + raise NotImplementedError('hook does not support eager execution') + + self._train_summary = SummaryWriterCache.get(self.log_dir) + self._eval_summary = SummaryWriterCache.get( + os.path.join(self.log_dir, 'eval')) + self._switch_writer('train') + + write_hparams_v1(self.writer, self.hparams) + + def _write_custom_summaries(self, step, logs=None): + """ + This methods works on the assumption that metrics containing `val` + in name are related to validation (that's the default in Keras). + """ + + logs = logs or {} + train_logs = {} + eval_logs = {} + + for name, value in logs.items(): + if 'val' in name: + if name.startswith('batch_val_'): + name = 'batch_' + _remove_prefix(name, 'batch_val_') + elif name.startswith('epoch_val_'): + name = _remove_prefix(name, 'epoch_val_') + eval_logs[name] = value + else: + if name.startswith('batch_'): + name = _remove_prefix(name, 'batch_') + train_logs[name] = value + + self._switch_writer('eval') + super()._write_custom_summaries(step, eval_logs) + self._switch_writer('train') + super()._write_custom_summaries(step, train_logs) + + +class TensorBoardWithHParamsV2(TensorBoard): + """ + Adds TensorBoard visualization to training process. + + Writes training tfevent file into default log directory, but + stores evaluation in log_dir/eval subdirectory. + """ + + def __init__(self, hparams, *args, **kwargs): + super().__init__(*args, **kwargs) + self.hparams = hparams + + def set_model(self, model): + """Sets Keras model and writes graph if specified.""" + self.model = model + self._log_write_dir = self._get_log_write_dir() + + self._train_dir = self._log_write_dir + self._train_step = self.model._train_counter # pylint: disable=protected-access + + self._val_dir = os.path.join(self._log_write_dir, 'eval') + self._val_step = self.model._test_counter # pylint: disable=protected-access + + self._writers = {} # Resets writers. + + self._should_write_train_graph = False + if self.write_graph: + self._write_keras_model_summary() + self._should_write_train_graph = True + if self.embeddings_freq: + self._configure_embeddings() + + write_hparams_v2(self._train_writer, self.hparams) + + def _log_epoch_metrics(self, epoch, logs): + """Writes epoch metrics out as scalar summaries. + + Arguments: + epoch: Int. The global step to use for TensorBoard. + logs: Dict. Keys are scalar summary names, values are scalars. + """ + if not logs: + return + + train_logs = {k: v for k, + v in logs.items() if not k.startswith('val_')} + val_logs = {k: v for k, v in logs.items() if k.startswith('val_')} + train_logs = self._collect_learning_rate(train_logs) + + with summary_ops_v2.always_record_summaries(): + if train_logs: + with self._train_writer.as_default(): + for name, value in train_logs.items(): + summary_ops_v2.scalar(name, value, step=epoch) + if val_logs: + with self._val_writer.as_default(): + for name, value in val_logs.items(): + name = name[4:] # Remove 'val_' prefix. + summary_ops_v2.scalar(name, value, step=epoch) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/__init__.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..931c2ef11db4a949e6c2e95bca44e36bac1241e9 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/controller.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/controller.py new file mode 100644 index 0000000000000000000000000000000000000000..0248135b3747e2db6bd954db114e4c3d426aa76e --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/controller.py @@ -0,0 +1,395 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A light weight utilities to train TF2 models.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import time +import os + +from absl import logging + +import tensorflow.compat.v2 as tf +from typing import Callable, Dict, Optional, Text + +from TensorFlow.common.training import utils + + +class Controller(object): + """Class that facilitates training and evaluation of models.""" + + def __init__( + self, + strategy: Optional[tf.distribute.Strategy] = None, + train_fn: Optional[Callable[[tf.Tensor], + Optional[Dict[Text, tf.Tensor]]]] = None, + eval_fn: Optional[Callable[[tf.Tensor], + Optional[Dict[Text, tf.Tensor]]]] = None, + warmup_fn: Optional[Callable[[tf.Tensor], + Optional[Dict[Text, tf.Tensor]]]] = None, + global_step: Optional[tf.Variable] = None, + # Train related + train_steps: Optional[int] = None, + steps_per_loop: Optional[int] = None, + summary_dir: Optional[Text] = None, + checkpoint_manager: Optional[tf.train.CheckpointManager] = None, + # summary related + summary_interval: Optional[int] = None, + # Evaluation related + eval_summary_dir: Optional[Text] = None, + eval_steps: Optional[int] = None, + eval_interval: Optional[int] = None, + eval_offset: Optional[int] = 0, + # Warmup related + device_warmup_steps: Optional[int] = None, + train_summary_writer: Optional[tf.summary.SummaryWriter] = None, + eval_summary_writer: Optional[tf.summary.SummaryWriter] = None): + """Constructs a `Controller` instance. + + Args: + strategy: An instance of `tf.distribute.Strategy`. + train_fn: A callable defined as `def train_fn(num_steps)`, which + `num_steps` indicates the number of steps to run for each loop. + eval_fn: A callable defined as `def eval_fn(num_steps)`, which `num_steps` + indicates the number of steps for one evaluation. + global_step: An integer `tf.Variable` indicating the global training step + number. Usually this can be obtained from `iterations` property of the + model's optimizer (e.g. `self.optimizer.iterations`), or users can + create their own global step variable as well. If the users create their + own global step variable, it is recommended to create the `tf.Variable` + inside strategy scope, and with + `aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA`. + train_steps: The total (maximum) number of training steps to perform. + steps_per_loop: The number of steps to run in each "inner loop" of + training (passed to the `num_steps` parameter of `train_fn`). + summary_dir: The directory to restore and write checkpoints and summaries. + If None, it will be set to `checkpoint_manager.directory`. + checkpoint_manager: An instance of `tf.train.CheckpointManager`. + summary_interval: Step interval for training summaries. Note that this + argument only applies to the summaries outside the training loop. If the + value is None, then training summaries are not enabled. + eval_summary_dir: The directory to write eval summaries. If None, it will + be set to `summary_dir`. + eval_steps: Number of steps to run evaluation. + eval_interval: Step interval for evaluation. If None, will skip evaluation + in the middle of training. Note that evaluation only happens outside the + training loop, which the loop iteration is specify by `steps_per_loop` + parameter. + eval_offset: Step number of the first evaluation. + train_summary_writer: Instance of tf.summary.SummaryWriter that should be + used for saving training summaries to TensorBoard. + eval_summary_writer: Instance of tf.summary.SummaryWriter that should be + used for saving evaluation summaries to TensorBoard. + + Raises: + ValueError: If both `train_fn` and `eval_fn` are None. + ValueError: If `train_fn` is not None and `train_steps` is None. + ValueError: If `steps_per_loop` is None when `train_fn` is provided. + ValueError: If `steps_per_loop` is not a positive integer. + """ + if train_fn is None and eval_fn is None: + raise ValueError("`train_fn` and `eval_fn` should not both be None") + + # TODO(rxsang): Support training until exhaustion by passing + # `train_steps=-1`. Currently it cannot be supported with a host training + # loop because break statements are not supported with distributed dataset. + if train_fn is not None: + if train_steps is None: + raise ValueError("`train_steps` is required when `train_fn` is " + "provided.") + if steps_per_loop is None: + raise ValueError("`steps_per_loop` is required when `train_fn is " + "provided.") + if not isinstance(steps_per_loop, int) or steps_per_loop < 1: + raise ValueError("`steps_per_loop` should be a positive integer") + if summary_interval is not None and summary_interval <= 0: + raise ValueError("`summary_interval` should be larger than 0") + + self.strategy = strategy or tf.distribute.get_strategy() + + self.train_fn = train_fn + self.eval_fn = eval_fn + self.warmup_fn = warmup_fn + self.global_step = global_step + self.checkpoint_manager = checkpoint_manager + self.last_eval_output = None + + if self.train_fn is not None: + self.train_steps = train_steps + self.steps_per_loop = steps_per_loop + self.summary_dir = summary_dir or checkpoint_manager.directory + + self.summary_interval = summary_interval + if train_summary_writer is not None: + summary_writer = train_summary_writer + summary_writer = tf.summary.create_file_writer( + self.summary_dir) if self.summary_interval else None + # TODO(rxsang): Consider pass SummaryManager directly into Controller for + # maximum customizability. + self.summary_manager = utils.SummaryManager( + summary_writer, + tf.summary.scalar, + global_step=self.global_step, + summary_interval=self.summary_interval) + + if self.eval_fn is not None: + if eval_summary_dir is None and self.summary_dir is not None: + eval_summary_dir = os.path.join(self.summary_dir, 'eval') + if eval_summary_writer is not None: + summary_writer = eval_summary_writer + elif eval_summary_dir: + summary_writer = tf.summary.create_file_writer(eval_summary_dir) + else: + summary_writer = None + self.eval_summary_manager = utils.SummaryManager( + summary_writer, tf.summary.scalar, global_step=self.global_step) + + self.eval_steps = eval_steps + self.eval_interval = eval_interval + self.eval_offset = eval_offset + + # Create and initialize the interval triggers. + self.eval_trigger = utils.IntervalTrigger(self.eval_interval, + self.eval_offset) + + if self.warmup_fn is not None: + self.device_warmup_steps = device_warmup_steps + + if self.global_step: + tf.summary.experimental.set_step(self.global_step) + + # Restore Model if needed. + if self.checkpoint_manager is not None: + model_restored = self._restore_model() + if not model_restored and self.checkpoint_manager.checkpoint_interval: + # If the model is not restored from a checkpoint, save an initial + # checkpoint. + ckpt_path = self.checkpoint_manager.save( + checkpoint_number=self.global_step) + logging.info("Saved checkpoins in %s", ckpt_path) + + def _restore_model(self, checkpoint_path=None): + """Restore or initialize the model. + + Args: + checkpoint_path: An optional string indicates the checkpoint path to + restore. If None, will restore from `self.checkpoint_manager`. + + Returns: + True if the latest checkpoint is found or restored. Otherwise False. + """ + with self.strategy.scope(): + # Checkpoint restoring should be inside scope. b/139450638 + if checkpoint_path is not None: + self.checkpoint_manager.checkpoint.restore(checkpoint_path) + return True + return self.checkpoint_manager.restore_or_initialize() + + def _evaluate_once(self, current_step): + """Runs the evaluation once.""" + logging.info("Start evaluation at step: %s", current_step) + + with self.eval_summary_manager.summary_writer.as_default(): + eval_outputs = self.eval_fn(self.eval_steps) + + if eval_outputs: + eval_outputs = tf.nest.map_structure( + lambda x: (x if isinstance(x, (float, bool)) else x.numpy()), + eval_outputs) + + info = "step: {} evaluation metric: {}".format( + current_step, eval_outputs) + self._log_info(info) + self.last_eval_output = eval_outputs + + self.eval_summary_manager.write_summaries(eval_outputs) + self.eval_summary_manager.flush() + if "continue_training" in eval_outputs.keys(): + return eval_outputs["continue_training"] + else: + return True + + def _maybe_save_checkpoints(self, current_step, force_trigger=False): + if self.checkpoint_manager.checkpoint_interval: + ckpt_path = self.checkpoint_manager.save( + checkpoint_number=current_step, check_interval=not force_trigger) + if ckpt_path is not None: + logging.info("Saved checkpoins in %s", ckpt_path) + + def _maybe_evaluate(self, current_step, force_trigger=False): + if self.eval_trigger(current_step, force_trigger): + return self._evaluate_once(current_step) + return True + + def _log_info(self, message): + """Logs `message` to the `info` log, and also prints to stdout.""" + logging.info(message) + print(message) + + def train(self, evaluate=True, num_acc_steps:int=1, manifest_path=None): + """Runs the training, with optional evaluation. + + This handles evaluation, gathering summaries, and saving checkpoints. + + Args: + evaluate: A boolean indicates whether to perform evaluation during + training. + num_acc_steps: Number of gradient accumulation steps. + + Raises: + RuntimeError: If `global_step` is not updated correctly in `train_fn`. + """ + if self.train_fn is None: + raise ValueError("`self.train_fn` is required when calling `train` " + "method.") + if self.global_step is None: + raise ValueError("`self.global_step` is required when calling `train` " + "method.") + if evaluate and self.eval_fn is None: + raise ValueError("`self.eval_fn` is required when calling `train` method " + "with `evaluate=True`") + + step_timer = _StepTimer(self.global_step) + current_step = self.global_step.numpy() + logging.info("Train at step %s of %s", current_step, self.train_steps) + while current_step < self.train_steps: + # Calculates steps to run for the next train loop. + steps_per_loop = min(self.train_steps - current_step, self.steps_per_loop) + logging.info("Entering training loop with %s steps, at step %s of %s", + steps_per_loop, current_step, self.train_steps) + current_step += steps_per_loop + steps_per_loop = tf.convert_to_tensor(steps_per_loop, dtype=tf.int32) + + with self.summary_manager.summary_writer.as_default(): + train_outputs = self.train_fn(steps_per_loop, num_acc_steps, manifest_path) + + # Updates and verifies the current step after a training loop finishes. + if current_step != self.global_step.numpy(): + raise RuntimeError("`self.train_fn` is not updating `global_step` " + "correctly, expected: %s, actual: %s" % + (current_step, self.global_step.numpy())) + + # Print information like metrics and steps_per_second after a training + # loop. + if train_outputs: + train_outputs = tf.nest.map_structure( + lambda x: x.numpy(), train_outputs) + steps_per_second = step_timer.steps_per_second() + info = "step: {} steps_per_second: {:.2f} {}".format( + current_step, steps_per_second, train_outputs) + self._log_info(info) + + train_outputs = train_outputs or {} + train_outputs["steps_per_second"] = steps_per_second + self.summary_manager.write_summaries(train_outputs) + + self._maybe_save_checkpoints(current_step) + + if evaluate: + continue_training = self._maybe_evaluate(current_step) + if not continue_training: + break + + self.summary_manager.write_summaries(train_outputs, always_write=True) + self.summary_manager.flush() + self._maybe_save_checkpoints(current_step, force_trigger=True) + + def evaluate(self, continuous=False, timeout_fn=None): + """Runs the evaluation. + + Args: + continuous: If `True`, will continously monitor the checkpoint directory + to evaluate on the latest checkpoint. If `False`, will do the evaluation + once. + timeout_fn: Optional callable to call after a timeout. If the function + returns True, then it means that no new checkpoints will be generated + and the iterator will exit. + + Raises: + ValueError: If no checkpoint found in `self.checkpoint_manager.directory`. + """ + if self.eval_fn is None: + raise ValueError("`self.eval_fn` should not be None to call " + "`evaluate()` method.") + + if not continuous and timeout_fn is not None: + raise ValueError("`timeout_fn` can be only passed when `continuous` is " + "True") + + if continuous: + for checkpoint_path in tf.train.checkpoints_iterator( + self.checkpoint_manager.directory, timeout_fn=timeout_fn): + self._restore_model(checkpoint_path) + self._evaluate_once(self.global_step.numpy()) + return + + latest_checkpoint = self.checkpoint_manager.latest_checkpoint + if not latest_checkpoint: + raise ValueError("no checkpoint found in dir %s" % + self.checkpoint_manager.directory) + self._restore_model() + self._evaluate_once(self.global_step.numpy()) + + def warmup(self): + """Runs device warmup. + + This handles running a training loop on dummy data to move TF function + compilation outside of the training loop. + + """ + if self.global_step is None: + raise ValueError("`self.global_step` is required when calling `warmup` " + "method.") + + step_timer = _StepTimer(self.global_step) + current_step = self.global_step.numpy() + logging.info("Warmup at step %s of %s", current_step, + self.device_warmup_steps) + while current_step < self.device_warmup_steps: + # Calculates steps to run for the next train loop. + steps_per_loop = self.device_warmup_steps + logging.info("Entering warmup loop with %s steps, at step %s of %s", + steps_per_loop, current_step, self.device_warmup_steps) + current_step += steps_per_loop + steps_per_loop = tf.convert_to_tensor(steps_per_loop, dtype=tf.int32) + + with self.summary_manager.summary_writer.as_default(): + self.warmup_fn(steps_per_loop) + + steps_per_second = step_timer.steps_per_second() + info = "step: {} steps_per_second: {:.2f}".format( + current_step, steps_per_second) + self._log_info(info) + +class _StepTimer(object): + """Utility class for measuring steps/second.""" + + def __init__(self, step): + self.step = step + self.start() + + def start(self): + self.last_iteration = self.step.numpy() + self.last_time = time.time() + + def steps_per_second(self, restart=True): + value = ((self.step.numpy() - self.last_iteration) / + (time.time() - self.last_time)) + if restart: + self.start() + return value diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/grad_utils.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/grad_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..16f13db938bd184ae4231b13ff4a4c82eeb20fdb --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/grad_utils.py @@ -0,0 +1,143 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Some gradient util functions to help users writing custom training loop.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +from absl import logging + +import tensorflow.compat.v2 as tf + + +def _filter_grads(grads_and_vars): + """Filter out iterable with grad equal to None.""" + grads_and_vars = tuple(grads_and_vars) + if not grads_and_vars: + return grads_and_vars + filtered = [] + vars_with_empty_grads = [] + for grad, var in grads_and_vars: + if grad is None: + vars_with_empty_grads.append(var) + else: + filtered.append((grad, var)) + filtered = tuple(filtered) + if not filtered: + raise ValueError("No gradients provided for any variable: %s." % + ([v.name for _, v in grads_and_vars],)) + if vars_with_empty_grads: + logging.warning( + ("Gradients do not exist for variables %s when minimizing the loss."), + ([v.name for v in vars_with_empty_grads])) + return filtered + + +def _filter_and_allreduce_gradients(grads_and_vars, + allreduce_precision="float32"): + """Filter None grads and then allreduce gradients in specified precision. + + This utils function is used when users intent to explicitly allreduce + gradients and customize gradients operations before and after allreduce. + The allreduced gradients are then passed to optimizer.apply_gradients( + experimental_aggregate_gradients=False). + + Arguments: + grads_and_vars: gradients and variables pairs. + allreduce_precision: Whether to allreduce gradients in float32 or float16. + + Returns: + pairs of allreduced non-None gradients and variables. + """ + filtered_grads_and_vars = _filter_grads(grads_and_vars) + (grads, variables) = zip(*filtered_grads_and_vars) + if allreduce_precision == "float16": + grads = [tf.cast(grad, "float16") for grad in grads] + allreduced_grads = tf.distribute.get_replica_context().all_reduce( + tf.distribute.ReduceOp.SUM, grads) + if allreduce_precision == "float16": + allreduced_grads = [tf.cast(grad, "float32") for grad in allreduced_grads] + return allreduced_grads, variables + + +def _run_callbacks(callbacks, grads_and_vars): + for callback in callbacks: + grads_and_vars = callback(grads_and_vars) + return grads_and_vars + + +def minimize_using_explicit_allreduce(tape, + optimizer, + loss, + trainable_variables, + pre_allreduce_callbacks=None, + post_allreduce_callbacks=None): + """Minimizes loss for one step by updating `trainable_variables`. + + Minimizes loss for one step by updating `trainable_variables`. + This explicitly performs gradient allreduce, instead of relying on implicit + allreduce in optimizer.apply_gradients(). If training using FP16 mixed + precision, explicit allreduce will aggregate gradients in FP16 format. + For TPU and GPU training using FP32, explicit allreduce will aggregate + gradients in FP32 format. + + Arguments: + tape: An instance of `tf.GradientTape`. + optimizer: An instance of `tf.keras.optimizers.Optimizer`. + loss: the loss tensor. + trainable_variables: A list of model Variables. + pre_allreduce_callbacks: A list of callback functions that takes gradients + and model variables pairs as input, manipulate them, and returns a new + gradients and model variables pairs. The callback functions will be + invoked in the list order and before gradients are allreduced. + With mixed precision training, the pre_allreduce_allbacks will be + applied on scaled_gradients. Default is no callbacks. + post_allreduce_callbacks: A list of callback functions that takes + gradients and model variables pairs as input, manipulate them, and + returns a new gradients and model variables paris. The callback + functions will be invoked in the list order and right before gradients + are applied to variables for updates. Default is no callbacks. + """ + if isinstance(optimizer, + tf.keras.mixed_precision.LossScaleOptimizer): + # FP16 GPU code path + with tape: + scaled_loss = optimizer.get_scaled_loss(loss) + scaled_grads = tape.gradient(scaled_loss, trainable_variables) + grads_and_vars = zip(scaled_grads, trainable_variables) + if pre_allreduce_callbacks: + grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars) + (allreduced_scaled_grads, + filtered_training_vars) = _filter_and_allreduce_gradients( + grads_and_vars, allreduce_precision="float16") + allreduced_unscaled_grads = optimizer.get_unscaled_gradients( + allreduced_scaled_grads) + grads_and_vars = zip(allreduced_unscaled_grads, filtered_training_vars) + else: + # TPU or FP32 GPU code path + grads = tape.gradient(loss, trainable_variables) + grads_and_vars = zip(grads, trainable_variables) + if pre_allreduce_callbacks: + grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars) + (allreduced_grads, + filtered_training_vars) = _filter_and_allreduce_gradients( + grads_and_vars, allreduce_precision="float32") + grads_and_vars = zip(allreduced_grads, filtered_training_vars) + if post_allreduce_callbacks: + grads_and_vars = _run_callbacks(post_allreduce_callbacks, grads_and_vars) + optimizer.apply_gradients( + grads_and_vars, experimental_aggregate_gradients=False) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/runnable.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/runnable.py new file mode 100644 index 0000000000000000000000000000000000000000..1af6eca06a337506a68d6329e0da16c9ca095e0a --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/runnable.py @@ -0,0 +1,79 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""An abstraction that users can easily handle their custom training loops.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import abc +import six +import tensorflow.compat.v2 as tf +from typing import Dict, Optional, Text + + +@six.add_metaclass(abc.ABCMeta) +class AbstractTrainable(tf.Module): + """An abstract class defining the APIs required for training.""" + + @abc.abstractmethod + def train(self, + num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]: + """Implements model training with multiple steps. + + In training, it is common to break the total training steps into several + training loops, so users can do checkpointing, write summaries and run some + python callbacks. This is necessary for getting good performance in TPU + training, as the overhead for launching a multi worker tf.function may be + large in Eager mode. It is usually encouraged to create a host training loop + (e.g. using a `tf.range` wrapping `strategy.run` inside a + `tf.function`) in the TPU case. For the cases that don't require host + training loop to acheive peak performance, users can just implement a simple + python loop to drive each step. + + Args: + num_steps: A guideline for how many training steps to run. Note that it is + up to the model what constitutes a "step" (this may involve more than + one update to model parameters, e.g. if training a GAN). + + Returns: + The function may return a dictionary of `Tensors`, which will be + written to logs and as TensorBoard summaries. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class AbstractEvaluable(tf.Module): + """An abstract class defining the APIs required for evaluation.""" + + @abc.abstractmethod + def evaluate( + self, num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]: + """Implements model evaluation. + + Args: + num_steps: A guideline for how many evaluation steps to run. Note that it + is up to the model what constitutes a "step". Generally, it may be + desirable to support both a limited number of eval steps and iterating + over a full dataset (however many steps are required) when `num_steps` + is `None`. + + Returns: + The function may return a dictionary of `Tensors`, which will be + written to logs and as TensorBoard summaries. + """ + pass diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/standard_runnable.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/standard_runnable.py new file mode 100644 index 0000000000000000000000000000000000000000..e2bb8f00be232c586bea67d0ffb8c2c66e1ac325 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/standard_runnable.py @@ -0,0 +1,183 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""An abstraction that users can easily handle their custom training loops.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import abc +import six +import tensorflow.compat.v2 as tf +from typing import Dict, Optional, Text + +from TensorFlow.common.training import runnable +from TensorFlow.common.training import utils + + +@six.add_metaclass(abc.ABCMeta) +class StandardTrainable(runnable.AbstractTrainable): + """Implements the standard functionality of AbstractTrainable APIs.""" + + def __init__(self, use_tf_while_loop=True, use_tf_function=True): + if use_tf_while_loop and not use_tf_function: + raise ValueError("`use_tf_while_loop=True` and `use_tf_function=False` " + "is not supported") + self.use_tf_while_loop = use_tf_while_loop + self.use_tf_function = use_tf_function + self.train_dataset = None + self.train_iter = None + self.train_loop_fn = None + + @abc.abstractmethod + def build_train_dataset(self, manifest_path=None): + """Builds the training datasets. + + Returns: + A tf.nest-compatible structure of tf.data.Dataset or DistributedDataset. + """ + pass + + def train(self, num_steps: Optional[tf.Tensor], num_acc_steps:int=1, manifest_path=None) -> Optional[Dict[Text, tf.Tensor]]: + """See base class.""" + if self.train_dataset is None: + # Build train input dataset + self.train_dataset = self.build_train_dataset(manifest_path=manifest_path) + self.train_iter = tf.nest.map_structure(iter, self.train_dataset) + + if self.train_loop_fn is None: + train_fn = self.train_step + if self.use_tf_while_loop: + self.train_loop_fn = utils.create_tf_while_loop_fn(train_fn) + else: + if self.use_tf_function: + train_fn = tf.function(train_fn) + self.train_loop_fn = utils.create_loop_fn(train_fn) + + self.train_loop_begin() + self.train_loop_fn(self.train_iter, num_steps, num_acc_steps) + return self.train_loop_end() + + def train_loop_begin(self): + """Called once at the beginning of the training loop. + + This is a good place to reset metrics that accumulate values over multiple + steps of training. + """ + pass + + @abc.abstractmethod + def train_step(self, iterator): + """Implements one step of training. + + What a "step" consists of is up to the implementer. If using distribution + strategies, the call to this method should take place in the "cross-replica + context" for generality, to allow e.g. multiple iterator dequeues and calls + to `strategy.run`. + + Args: + iterator: A tf.nest-compatible structure of tf.data Iterator or + DistributedIterator. + """ + pass + + def train_loop_end(self) -> Optional[Dict[Text, tf.Tensor]]: + """Called at the end of the training loop. + + This is a good place to get metric results. The value returned from this + function will be returned as-is from the train() method. + + Returns: + The function may return a dictionary of `Tensors`, which will be + written to logs and as TensorBoard summaries. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class StandardEvaluable(runnable.AbstractEvaluable): + """Implements the standard functionality of AbstractEvaluable APIs.""" + + def __init__(self, use_tf_function=True): + self.eval_use_tf_function = use_tf_function + self.eval_dataset = None + self.eval_loop_fn = None + + @abc.abstractmethod + def build_eval_dataset(self): + """Builds the evaluation datasets. + + Returns: + A tf.nest-compatible structure of tf.data.Dataset or DistributedDataset. + """ + pass + + def evaluate( + self, num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]: + """See base class.""" + if self.eval_dataset is None: + # Build train input dataset + self.eval_dataset = self.build_eval_dataset() + + if self.eval_loop_fn is None: + eval_fn = self.eval_step + if self.eval_use_tf_function: + eval_fn = tf.function(eval_fn) + self.eval_loop_fn = utils.create_loop_fn(eval_fn) + + # TODO(b/147718615): When async RPC is enabled in eager runtime, we make + # eval iterator as a class member so it doesn't get destroyed when out of + # the function scope. + self.eval_iter = tf.nest.map_structure(iter, self.eval_dataset) + + self.eval_begin() + self.eval_loop_fn(self.eval_iter, num_steps) + return self.eval_end() + + def eval_begin(self): + """Called once at the beginning of the evaluation. + + This is a good place to reset metrics that accumulate values over the entire + evaluation. + """ + pass + + @abc.abstractmethod + def eval_step(self, iterator): + """Implements one step of evaluation. + + What a "step" consists of is up to the implementer. If using distribution + strategies, the call to this method should take place in the "cross-replica + context" for generality, to allow e.g. multiple iterator dequeues and calls + to `strategy.run`. + + Args: + iterator: A tf.nest-compatible structure of tf.data Iterator or + DistributedIterator. + """ + pass + + def eval_end(self) -> Optional[Dict[Text, tf.Tensor]]: + """Called at the end of the evaluation. + + This is a good place to get metric results. The value returned from this + function will be returned as-is from the evaluate() method. + + Returns: + The function may return a dictionary of `Tensors`, which will be + written to logs and as TensorBoard summaries. + """ + pass diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/utils.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..367ecd96b1982b8749561679ab9e718df73b1040 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/common/training/utils.py @@ -0,0 +1,344 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Some layered modules/functions to help users writing custom training loop.""" + +from __future__ import absolute_import +from __future__ import division +# from __future__ import google_type_annotations +from __future__ import print_function + +import abc +import inspect +import six + +import tensorflow.compat.v2 as tf + + +def create_loop_fn(step_fn): + """Creates a multiple steps function driven by the python while loop. + + Args: + step_fn: A function which takes `iterator` as input. + + Returns: + A callable defined as the `loop_fn` defination below. + """ + + def loop_fn(iterator, num_steps, state=None, reduce_fn=None): + """A loop function with multiple steps. + + Args: + iterator: A nested structure of tf.data `Iterator` or + `DistributedIterator`. + num_steps: The number of steps in the loop. If `num_steps==-1`, will + iterate until exausting the iterator. + state: An optional initial state before running the loop. + reduce_fn: a callable defined as `def reduce_fn(state, value)`, where + `value` is the outputs from `step_fn`. + + Returns: + The updated state. + """ + try: + step = 0 + # To make sure the OutOfRangeError exception can be handled well with + # async remote eager, we need to wrap the loop body in a `async_scope`. + with tf.experimental.async_scope(): + while (num_steps == -1 or step < num_steps): + outputs = step_fn(iterator) + if reduce_fn is not None: + state = reduce_fn(state, outputs) + step += 1 + return state + except (StopIteration, tf.errors.OutOfRangeError): + tf.experimental.async_clear_error() + return state + + return loop_fn + + +def create_tf_while_loop_fn(step_fn): + """Create a multiple steps function driven by tf.while_loop on the host. + + Args: + step_fn: A function which takes `iterator` as input. + + Returns: + A callable defined as the `loop_fn` defination below. + """ + + @tf.function + def loop_fn(iterator, num_steps, num_acc_steps:int=1): + """A loop function with multiple steps. + + Args: + iterator: A nested structure of tf.data `Iterator` or + `DistributedIterator`. + num_steps: The number of steps in the loop. Must be a tf.Tensor. + num_acc_steps: Number of gradient accumulation steps. + """ + if not isinstance(num_steps, tf.Tensor): + raise ValueError("`num_steps` should be an `tf.Tensor`. Python object " + "may cause retracing.") + + for _ in tf.range(num_steps): + for _ in range(num_acc_steps): + step_fn(iterator) + + return loop_fn + + +def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs): + """A helper function to create distributed dataset. + + Args: + strategy: An instance of `tf.distribute.Strategy`. + dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an + `tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If + it is a function, it could optionally have an argument named + `input_context` which is `tf.distribute.InputContext` argument type. + *args: The list of arguments to be passed to dataset_or_fn. + **kwargs: Any keyword arguments to be passed. + + Returns: + A distributed Dataset. + """ + if strategy is None: + strategy = tf.distribute.get_strategy() + + if isinstance(dataset_or_fn, tf.data.Dataset): + return strategy.experimental_distribute_dataset(dataset_or_fn) + + if not callable(dataset_or_fn): + raise ValueError("`dataset_or_fn` should be either callable or an instance " + "of `tf.data.Dataset`") + + def dataset_fn(ctx): + """Wrapped dataset function for creating distributed dataset..""" + + # If `dataset_or_fn` is a function and has `input_context` as argument + # names, pass `ctx` as the value of `input_context` when calling + # `dataset_or_fn`. Otherwise `ctx` will not be used when calling + # `dataset_or_fn`. + if six.PY3: + argspec = inspect.getfullargspec(dataset_or_fn) + else: + argspec = inspect.getargspec(dataset_or_fn) + args_names = argspec.args + + if "input_context" in args_names: + kwargs["input_context"] = ctx + ds = dataset_or_fn(*args, **kwargs) + return ds + + return strategy.experimental_distribute_datasets_from_function(dataset_fn) + + +class SummaryManager(object): + """A class manages writing summaries.""" + + def __init__(self, + summary_writer, + summary_fn, + global_step=None, + summary_interval=None): + """Construct a summary manager object. + + Args: + summary_writer: A `tf.summary.SummaryWriter` instance for writing + summaries. + summary_fn: A callable defined as `def summary_fn(name, tensor, + step=None)`, which describes the summary operation. + global_step: A `tf.Variable` instance for checking the current global step + value, in case users want to save summaries every N steps. + summary_interval: An integer, indicates the minimum step interval between + two summaries. + """ + if summary_writer is not None: + self._summary_writer = summary_writer + self._enabled = True + else: + self._summary_writer = tf.summary.create_noop_writer() + self._enabled = False + self._summary_fn = summary_fn + + if global_step is None: + self._global_step = tf.summary.experimental.get_step() + else: + self._global_step = global_step + + if summary_interval is not None: + if self._global_step is None: + raise ValueError("`summary_interval` is not None, but no `global_step` " + "can be obtained ") + self._last_summary_step = self._global_step.numpy() + self._summary_interval = summary_interval + + @property + def summary_interval(self): + return self._summary_interval + + @property + def summary_writer(self): + """Returns the underlying summary writer.""" + return self._summary_writer + + def flush(self): + """Flush the underlying summary writer.""" + if self._enabled: + tf.summary.flush(self._summary_writer) + + def write_summaries(self, items, always_write=True): + """Write a bulk of summaries. + + Args: + items: a dictionary of `Tensors` for writing summaries. + always_write: An optional boolean. If `True`, the manager will always + write summaries unless the summaries have been written for the same + step. Otherwise the manager will only write the summaries if the + interval between summaries are larger than `summary_interval`. + + Returns: + A boolean indicates whether the summaries are written or not. + """ + # TODO(rxsang): Support writing summaries with nested structure, so users + # can split the summaries into different directories for nicer visualization + # in Tensorboard, like train and eval metrics. + if not self._enabled: + return False + + if self._summary_interval is not None: + current_step = self._global_step.numpy() + if current_step == self._last_summary_step: + return False + if not always_write and current_step < (self._last_summary_step + + self._summary_interval): + return False + self._last_summary_step = current_step + + with self._summary_writer.as_default(): + for name, tensor in items.items(): + self._summary_fn(name, tensor, step=self._global_step) + return True + + +@six.add_metaclass(abc.ABCMeta) +class Trigger(object): + """An abstract class representing a "trigger" for some event.""" + + @abc.abstractmethod + def __call__(self, value: float, force_trigger=False): + """Maybe trigger the event based on the given value. + + Args: + value: the value for triggering. + force_trigger: Whether the trigger is forced triggered. + + Returns: + `True` if the trigger is triggered on the given `value`, and + `False` otherwise. + """ + + @abc.abstractmethod + def reset(self): + """Reset states in the trigger.""" + + +class IntervalTrigger(Trigger): + """Triggers on every fixed interval.""" + + def __init__(self, interval, start=0): + """Constructs the IntervalTrigger. + + Args: + interval: The triggering interval. + start: An initial value for the trigger. + """ + self._interval = interval + self._last_trigger_value = start + + def __call__(self, value, force_trigger=False): + """Maybe trigger the event based on the given value. + + Args: + value: the value for triggering. + force_trigger: If True, the trigger will be forced triggered unless the + last trigger value is equal to `value`. + + Returns: + `True` if the trigger is triggered on the given `value`, and + `False` otherwise. + """ + if force_trigger and value != self._last_trigger_value: + self._last_trigger_value = value + return True + + if self._interval and self._interval > 0: + if value >= self._last_trigger_value + self._interval: + self._last_trigger_value = value + return True + return False + + def reset(self): + """See base class.""" + self._last_trigger_value = 0 + + +class EpochHelper(object): + """A Helper class to handle epochs in Customized Training Loop.""" + + def __init__(self, epoch_steps, global_step): + """Constructs the EpochHelper. + + Args: + epoch_steps: An integer indicates how many steps in an epoch. + global_step: A `tf.Variable` instance indicates the current global step. + """ + self._epoch_steps = epoch_steps + self._global_step = global_step + self._current_epoch = None + self._epoch_start_step = None + self._in_epoch = False + + def epoch_begin(self): + """Returns whether a new epoch should begin.""" + if self._in_epoch: + return False + current_step = self._global_step.numpy() + self._epoch_start_step = current_step + self._current_epoch = current_step // self._epoch_steps + self._in_epoch = True + return True + + def epoch_end(self): + """Returns whether the current epoch should end.""" + if not self._in_epoch: + raise ValueError("`epoch_end` can only be called inside an epoch") + current_step = self._global_step.numpy() + epoch = current_step // self._epoch_steps + + if epoch > self._current_epoch: + self._in_epoch = False + return True + return False + + @property + def batch_index(self): + """Index of the next batch within the current epoch.""" + return self._global_step.numpy() - self._epoch_start_step + + @property + def current_epoch(self): + return self._current_epoch diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/__init__.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/common.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/common.py new file mode 100644 index 0000000000000000000000000000000000000000..db8f12d6a6dd4dc6eef3d953e7956975ebb9f239 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/common.py @@ -0,0 +1,523 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# List of changes: +# - added Habana specific flags +# - added helper function for prefetching + +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company + +"""Common util functions and classes used by both keras cifar and imagenet.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import flags +import tensorflow as tf + +import tensorflow_model_optimization as tfmot +from TensorFlow.utils.flags import core as flags_core +from TensorFlow.utils.misc import keras_utils +from TensorFlow.utils.flags._conventions import help_wrap +from habana_frameworks.tensorflow.multinode_helpers import comm_size +from TensorFlow.common.tb_utils import ( + ExamplesPerSecondKerasHook, TensorBoardWithHParamsV1) + +from TensorFlow.computer_vision.common import imagenet_preprocessing +from TensorFlow.computer_vision.Resnets.utils.optimizers.keras import lars_optimizer +from TensorFlow.computer_vision.Resnets.utils.optimizers.keras import lars_util + +try: + import horovod.tensorflow as hvd +except ImportError: + hvd = None + +FLAGS = flags.FLAGS +BASE_LEARNING_RATE = 0.1 # This matches Jing's version. +TRAIN_TOP_1 = 'training_accuracy_top_1' +LR_SCHEDULE = [ # (multiplier, epoch to start) tuples + (1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80) +] + +global_batch_size = None +def get_global_batch_size(batch_size, num_acc_steps:int=1): + global global_batch_size + if global_batch_size is None: + global_batch_size = batch_size + if hvd is not None and hvd.is_initialized(): + global_batch_size = batch_size * comm_size() + global_batch_size = global_batch_size * num_acc_steps + return global_batch_size + +class PiecewiseConstantDecayWithWarmup( + tf.keras.optimizers.schedules.LearningRateSchedule): + """Piecewise constant decay with warmup schedule.""" + + def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries, + multipliers, compute_lr_on_cpu=False, name=None): + super(PiecewiseConstantDecayWithWarmup, self).__init__() + if len(boundaries) != len(multipliers) - 1: + raise ValueError('The length of boundaries must be 1 less than the ' + 'length of multipliers') + + base_lr_batch_size = 256 + steps_per_epoch = epoch_size // batch_size + + self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size + self.step_boundaries = [float(steps_per_epoch) * x for x in boundaries] + self.lr_values = [self.rescaled_lr * m for m in multipliers] + self.warmup_steps = warmup_epochs * steps_per_epoch + self.compute_lr_on_cpu = compute_lr_on_cpu + self.name = name + + self.learning_rate_ops_cache = {} + + def __call__(self, step): + if tf.executing_eagerly(): + return self._get_learning_rate(step) + + # In an eager function or graph, the current implementation of optimizer + # repeatedly call and thus create ops for the learning rate schedule. To + # avoid this, we cache the ops if not executing eagerly. + graph = tf.compat.v1.get_default_graph() + if graph not in self.learning_rate_ops_cache: + if self.compute_lr_on_cpu: + with tf.device('/device:CPU:0'): + self.learning_rate_ops_cache[graph] = self._get_learning_rate(step) + else: + self.learning_rate_ops_cache[graph] = self._get_learning_rate(step) + return self.learning_rate_ops_cache[graph] + + def _get_learning_rate(self, step): + """Compute learning rate at given step.""" + with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup', + [self.rescaled_lr, self.step_boundaries, + self.lr_values, self.warmup_steps, + self.compute_lr_on_cpu]): + def warmup_lr(step): + return self.rescaled_lr * ( + tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32)) + def piecewise_lr(step): + return tf.compat.v1.train.piecewise_constant( + step, self.step_boundaries, self.lr_values) + return tf.cond(step < self.warmup_steps, + lambda: warmup_lr(step), + lambda: piecewise_lr(step)) + + def get_config(self): + return { + 'rescaled_lr': self.rescaled_lr, + 'step_boundaries': self.step_boundaries, + 'lr_values': self.lr_values, + 'warmup_steps': self.warmup_steps, + 'compute_lr_on_cpu': self.compute_lr_on_cpu, + 'name': self.name + } + + +def get_lr_schedule(flags_obj, global_batch_size, train_steps,mlperf_mlloger,mlperf_mllog): + lr_schedule = None + + if flags_obj.lr_schedule == 'polynomial': + lr_schedule = lars_util.PolynomialDecayWithWarmup( + batch_size=global_batch_size, + steps_per_epoch=imagenet_preprocessing.NUM_IMAGES['train'] // global_batch_size, + train_steps=train_steps, + initial_learning_rate=flags_obj.base_learning_rate, + end_learning_rate=flags_obj.end_learning_rate, + warmup_epochs=flags_obj.warmup_epochs, + mlperf_mlloger=mlperf_mlloger, + mlperf_mllog=mlperf_mllog) + elif flags_obj.lr_schedule == 'piecewise': + lr_schedule = PiecewiseConstantDecayWithWarmup( + batch_size=global_batch_size, + epoch_size=imagenet_preprocessing.NUM_IMAGES['train'], + warmup_epochs=LR_SCHEDULE[0][1], + boundaries=list(p[1] for p in LR_SCHEDULE[1:]), + multipliers=list(p[0] for p in LR_SCHEDULE), + compute_lr_on_cpu=False) + elif flags_obj.lr_schedule == 'constant': + lr_schedule = flags_obj.base_learning_rate * global_batch_size / 256 + else: + raise ValueError('lr_schedule "%s" is unknown.' % flags_obj.lr_schedule) + + return lr_schedule + + +def get_optimizer(flags_obj, global_batch_size, train_steps,mlperf_mlloger,mlperf_mllog): + optimizer = None + lr_schedule = get_lr_schedule(flags_obj, global_batch_size, train_steps,mlperf_mlloger,mlperf_mllog) + + if flags_obj.optimizer == 'SGD': + # The learning_rate is overwritten at the beginning of each step by callback. + optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=lr_schedule, momentum=0.9) + + elif flags_obj.optimizer == 'LARS': + optimizer = lars_optimizer.LARSOptimizer( + learning_rate=lr_schedule, + momentum=flags_obj.momentum, + weight_decay=flags_obj.weight_decay, + skip_list=['batch_normalization', 'bias', 'bn'], + epsilon=flags_obj.lars_epsilon) + else: + raise ValueError('optimizer "%s" is unknown.' % flags_obj.optimizer) + + return optimizer + + +def get_callbacks( + steps_per_epoch, + pruning_method=None, + enable_checkpoint_and_export=False, + model_dir=None): + """Returns common callbacks.""" + time_callback = keras_utils.TimeHistory( + FLAGS.batch_size, + FLAGS.log_steps, + logdir=FLAGS.model_dir if FLAGS.enable_tensorboard else None) + callbacks = [time_callback] + + if FLAGS.enable_tensorboard: + callbacks += [ + TensorBoardWithHParamsV1( + FLAGS.flag_values_dict(), + log_dir=FLAGS.model_dir, + update_freq=FLAGS.log_steps), + ExamplesPerSecondKerasHook( + output_dir=FLAGS.model_dir, + every_n_steps=FLAGS.log_steps) + ] + + if FLAGS.profile_steps: + profiler_callback = keras_utils.get_profiler_callback( + FLAGS.model_dir, + FLAGS.profile_steps, + FLAGS.enable_tensorboard, + steps_per_epoch) + callbacks.append(profiler_callback) + + is_pruning_enabled = pruning_method is not None + + if is_pruning_enabled: + callbacks.append(tfmot.sparsity.keras.UpdatePruningStep()) + if model_dir is not None: + callbacks.append(tfmot.sparsity.keras.PruningSummaries( + log_dir=model_dir, profile_batch=0)) + + if enable_checkpoint_and_export: + if model_dir is not None: + ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}') + callbacks.append( + tf.keras.callbacks.ModelCheckpoint(ckpt_full_path, + save_weights_only=True)) + return callbacks + + +def build_stats(history, eval_output, callbacks): + """Normalizes and returns dictionary of stats. + + Args: + history: Results of the training step. Supports both categorical_accuracy + and sparse_categorical_accuracy. + eval_output: Output of the eval step. Assumes first value is eval_loss and + second value is accuracy_top_1. + callbacks: a list of callbacks which might include a time history callback + used during keras.fit. + + Returns: + Dictionary of normalized results. + """ + stats = {} + if eval_output: + stats['accuracy_top_1'] = float(eval_output[1]) + stats['eval_loss'] = float(eval_output[0]) + if history and history.history: + train_hist = history.history + # Gets final loss from training. + stats['loss'] = float(train_hist['loss'][-1]) + # Gets top_1 training accuracy. + if 'categorical_accuracy' in train_hist: + stats[TRAIN_TOP_1] = float(train_hist['categorical_accuracy'][-1]) + elif 'sparse_categorical_accuracy' in train_hist: + stats[TRAIN_TOP_1] = float(train_hist['sparse_categorical_accuracy'][-1]) + elif 'accuracy' in train_hist: + stats[TRAIN_TOP_1] = float(train_hist['accuracy'][-1]) + + if not callbacks: + return stats + + # Look for the time history callback which was used during keras.fit + for callback in callbacks: + if isinstance(callback, keras_utils.TimeHistory): + timestamp_log = callback.timestamp_log + stats['step_timestamp_log'] = timestamp_log + stats['train_finish_time'] = callback.train_finish_time + if callback.epoch_runtime_log: + stats['avg_exp_per_second'] = callback.average_examples_per_second + + return stats + + +def define_keras_flags( + dynamic_loss_scale=True, + model=False, + optimizer=False, + pretrained_filepath=False): + """Define flags for Keras models.""" + flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True, + train_epochs=True, epochs_between_evals=True, + distribution_strategy=True) + flags_core.define_performance(num_parallel_calls=False, + synthetic_data=True, + dtype=True, + all_reduce_alg=True, + num_packs=True, + tf_gpu_thread_mode=True, + datasets_num_private_threads=True, + dynamic_loss_scale=dynamic_loss_scale, + loss_scale=True, + fp16_implementation=True, + tf_data_experimental_slack=True, + enable_xla=True, + dataset_cache=True) + flags_core.define_image() + flags_core.define_benchmark() + flags_core.define_distribution() + flags.adopt_module_key_flags(flags_core) + + flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?') + flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?') + # TODO(b/135607288): Remove this flag once we understand the root cause of + # slowdown when setting the learning phase in Keras backend. + flags.DEFINE_boolean( + name='set_learning_phase_to_train', default=True, + help='If skip eval, also set Keras learning phase to 1 (training).') + flags.DEFINE_boolean( + name='explicit_gpu_placement', default=False, + help='If not using distribution strategy, explicitly set device scope ' + 'for the Keras training loop.') + flags.DEFINE_boolean(name='use_trivial_model', default=False, + help='Whether to use a trivial Keras model.') + flags.DEFINE_boolean(name='report_accuracy_metrics', default=True, + help='Report metrics during training and evaluation.') + flags.DEFINE_boolean(name='use_tensor_lr', default=True, + help='Use learning rate tensor instead of a callback.') + flags.DEFINE_string( + name='lr_schedule', + default='piecewise', + help='learning rate schedule. ' + '"piecewise" for PiecewiseConstantDecayWithWarmup, ' + '"polynomial" for PolynomialDecayWithWarmup, ' + 'and "constant" for static learning rate.') + flags.DEFINE_boolean( + name='enable_tensorboard', default=False, + help='Whether to enable Tensorboard callback.') + flags.DEFINE_integer( + name='train_steps', default=None, + help='The number of steps to run for training. If it is larger than ' + '# batches per epoch, then use # batches per epoch. This flag will be ' + 'ignored if train_epochs is set to be larger than 1. ') + flags.DEFINE_string( + name='profile_steps', default=None, + help='Save profiling data to model dir at given range of global steps. The ' + 'value must be a comma separated pair of positive integers, specifying ' + 'the first and last step to profile. For example, "--profile_steps=2,4" ' + 'triggers the profiler to process 3 steps, starting from the 2nd step. ' + 'Note that profiler has a non-trivial performance overhead, and the ' + 'output file can be gigantic if profiling many steps.') + flags.DEFINE_boolean( + name='batchnorm_spatial_persistent', default=True, + help='Enable the spacial persistent mode for CuDNN batch norm kernel.') + flags.DEFINE_boolean( + name='enable_get_next_as_optional', default=False, + help='Enable get_next_as_optional behavior in DistributedIterator.') + flags.DEFINE_boolean( + name='enable_checkpoint_and_export', default=False, + help='Whether to enable a checkpoint callback and export the savedmodel.') + flags.DEFINE_string( + name='tpu', default='', help='TPU address to connect to.') + flags.DEFINE_integer( + name='steps_per_loop', + default=500, + help='Number of steps per training loop. Only training step happens ' + 'inside the loop. Callbacks will not be called inside. Will be capped at ' + 'steps per epoch.') + flags.DEFINE_boolean( + name='use_tf_while_loop', + default=True, + help='Whether to build a tf.while_loop inside the training loop on the ' + 'host. Setting it to True is critical to have peak performance on ' + 'TPU.') + flags.DEFINE_string( + 'optimizer', 'SGD', + 'Name of optimizer preset. (SGD, LARS)') + flags.DEFINE_float( + 'label_smoothing', 0.0, + 'Apply label smoothing to the loss. This applies to ' + 'categorical_cross_entropy; when label_smoothing > 0, ' + 'one-hot encoding is used for the labels.') + flags.DEFINE_integer('eval_offset_epochs', 0, + 'Epoch number of the first evaluation.') + + if model: + flags.DEFINE_string('model', 'resnet50_v1.5', + 'Name of model preset. (mobilenet, resnet50_v1.5)') + if optimizer: + # TODO(kimjaehong): Replace as general hyper-params not only for mobilenet. + flags.DEFINE_float('initial_learning_rate_per_sample', 0.00007, + 'Initial value of learning rate per sample for ' + 'mobilenet_default.') + flags.DEFINE_float('lr_decay_factor', 0.94, + 'Learning rate decay factor for mobilenet_default.') + flags.DEFINE_float('num_epochs_per_decay', 2.5, + 'Number of epochs per decay for mobilenet_default.') + if pretrained_filepath: + flags.DEFINE_string('pretrained_filepath', '', + 'Pretrained file path.') + flags.DEFINE_float('target_accuracy', None, + 'Target eval accuracy, after which training will stop.') + + +def get_synth_data(height, width, num_channels, num_classes, dtype): + """Creates a set of synthetic random data. + + Args: + height: Integer height that will be used to create a fake image tensor. + width: Integer width that will be used to create a fake image tensor. + num_channels: Integer depth that will be used to create a fake image tensor. + num_classes: Number of classes that should be represented in the fake labels + tensor + dtype: Data type for features/images. + + Returns: + A tuple of tensors representing the inputs and labels. + + """ + # Synthetic input should be within [0, 255]. + inputs = tf.random.truncated_normal([height, width, num_channels], + dtype=dtype, + mean=127, + stddev=60, + name='synthetic_inputs') + labels = tf.random.uniform([1], + minval=0, + maxval=num_classes - 1, + dtype=tf.int32, + name='synthetic_labels') + return inputs, labels + + +def define_pruning_flags(): + """Define flags for pruning methods.""" + flags.DEFINE_string('pruning_method', None, + 'Pruning method.' + 'None (no pruning) or polynomial_decay.') + flags.DEFINE_float('pruning_initial_sparsity', 0.0, + 'Initial sparsity for pruning.') + flags.DEFINE_float('pruning_final_sparsity', 0.5, + 'Final sparsity for pruning.') + flags.DEFINE_integer('pruning_begin_step', 0, + 'Begin step for pruning.') + flags.DEFINE_integer('pruning_end_step', 100000, + 'End step for pruning.') + flags.DEFINE_integer('pruning_frequency', 100, + 'Frequency for pruning.') + + +# Map string to TensorFlow dtype +DTYPE_MAP = { + "fp16": tf.float16, + "fp32": tf.float32, + "bf16": tf.bfloat16, +} + + +def get_dl_type(flags_obj): + return DTYPE_MAP[flags_obj.data_loader_image_type] + + +def define_habana_flags(): + """Define HABANA specific flags.""" + flags.DEFINE_enum(name="data_loader_image_type", short_name="dlit", default="fp32", + enum_values=DTYPE_MAP.keys(), + help="data loader images output type") + flags.DEFINE_boolean(name='experimental_preloading', default=False, + help=help_wrap("Support for data.experimental.prefetch_to_device TensorFlow operator." + "This feature is experimental and works only with single node." + "See `-x` switch for `demo_resnet50` script.")) + flags.DEFINE_boolean("use_horovod", default=False, help="Use horovod") + flags.DEFINE_boolean("modeling", default=False, help="Write graph to graph.pbtxt in model dir and export meta graph when enabled.") + + +def get_synth_input_fn(height, width, num_channels, num_classes, + dtype=tf.float32, drop_remainder=True, + experimental_preloading=False): + """Returns an input function that returns a dataset with random data. + + This input_fn returns a data set that iterates over a set of random data and + bypasses all preprocessing, e.g. jpeg decode and copy. The host to device + copy is still included. This used to find the upper throughput bound when + tuning the full input pipeline. + + Args: + height: Integer height that will be used to create a fake image tensor. + width: Integer width that will be used to create a fake image tensor. + num_channels: Integer depth that will be used to create a fake image tensor. + num_classes: Number of classes that should be represented in the fake labels + tensor + dtype: Data type for features/images. + drop_remainder: A boolean indicates whether to drop the remainder of the + batches. If True, the batch dimension will be static. + + Returns: + An input_fn that can be used in place of a real one to return a dataset + that can be used for iteration. + """ + # pylint: disable=unused-argument + def input_fn(is_training, data_dir, batch_size, *args, **kwargs): + """Returns dataset filled with random data.""" + inputs, labels = get_synth_data(height=height, + width=width, + num_channels=num_channels, + num_classes=num_classes, + dtype=dtype) + # Cast to float32 for Keras model. + labels = tf.cast(labels, dtype=tf.float32) + data = tf.data.Dataset.from_tensors((inputs, labels)).repeat() + + # `drop_remainder` will make dataset produce outputs with known shapes. + data = data.batch(batch_size, drop_remainder=drop_remainder) + if experimental_preloading: + device = "/device:HPU:0" + with tf.device(device): + data = data.apply(tf.data.experimental.prefetch_to_device(device)) + else: + data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) + return data + + return input_fn + + +def set_cudnn_batchnorm_mode(): + """Set CuDNN batchnorm mode for better performance. + + Note: Spatial Persistent mode may lead to accuracy losses for certain + models. + """ + if FLAGS.batchnorm_spatial_persistent: + os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1' + else: + os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/mlperf_variable_map.json b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/mlperf_variable_map.json new file mode 100644 index 0000000000000000000000000000000000000000..f439d8c1989adf431c29031ba63b1df37ab8c90f --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/mlperf_variable_map.json @@ -0,0 +1,163 @@ +{ + "conv1/kernel": "conv0_weight", + "bn_conv1/gamma": "bn0_gamma", + "bn_conv1/beta": "bn0_beta", + "res2a_branch2a/kernel": "stage1_unit1_conv1_weight", + "bn2a_branch2a/gamma": "stage1_unit1_bn1_gamma", + "bn2a_branch2a/beta": "stage1_unit1_bn1_beta", + "res2a_branch2b/kernel": "stage1_unit1_conv2_weight", + "bn2a_branch2b/gamma": "stage1_unit1_bn2_gamma", + "bn2a_branch2b/beta": "stage1_unit1_bn2_beta", + "res2a_branch2c/kernel": "stage1_unit1_conv3_weight", + "bn2a_branch2c/gamma": "stage1_unit1_bn3_gamma", + "bn2a_branch2c/beta": "stage1_unit1_bn3_beta", + "res2a_branch1/kernel": "stage1_unit1_conv1sc_weight", + "bn2a_branch1/gamma": "stage1_unit1_bnsc_gamma", + "bn2a_branch1/beta": "stage1_unit1_bnsc_beta", + "res2b_branch2a/kernel": "stage1_unit2_conv1_weight", + "bn2b_branch2a/gamma": "stage1_unit2_bn1_gamma", + "bn2b_branch2a/beta": "stage1_unit2_bn1_beta", + "res2b_branch2b/kernel": "stage1_unit2_conv2_weight", + "bn2b_branch2b/gamma": "stage1_unit2_bn2_gamma", + "bn2b_branch2b/beta": "stage1_unit2_bn2_beta", + "res2b_branch2c/kernel": "stage1_unit2_conv3_weight", + "bn2b_branch2c/gamma": "stage1_unit2_bn3_gamma", + "bn2b_branch2c/beta": "stage1_unit2_bn3_beta", + "res2c_branch2a/kernel": "stage1_unit3_conv1_weight", + "bn2c_branch2a/gamma": "stage1_unit3_bn1_gamma", + "bn2c_branch2a/beta": "stage1_unit3_bn1_beta", + "res2c_branch2b/kernel": "stage1_unit3_conv2_weight", + "bn2c_branch2b/gamma": "stage1_unit3_bn2_gamma", + "bn2c_branch2b/beta": "stage1_unit3_bn2_beta", + "res2c_branch2c/kernel": "stage1_unit3_conv3_weight", + "bn2c_branch2c/gamma": "stage1_unit3_bn3_gamma", + "bn2c_branch2c/beta": "stage1_unit3_bn3_beta", + "res3a_branch2a/kernel": "stage2_unit1_conv1_weight", + "bn3a_branch2a/gamma": "stage2_unit1_bn1_gamma", + "bn3a_branch2a/beta": "stage2_unit1_bn1_beta", + "res3a_branch2b/kernel": "stage2_unit1_conv2_weight", + "bn3a_branch2b/gamma": "stage2_unit1_bn2_gamma", + "bn3a_branch2b/beta": "stage2_unit1_bn2_beta", + "res3a_branch2c/kernel": "stage2_unit1_conv3_weight", + "bn3a_branch2c/gamma": "stage2_unit1_bn3_gamma", + "bn3a_branch2c/beta": "stage2_unit1_bn3_beta", + "res3a_branch1/kernel": "stage2_unit1_conv1sc_weight", + "bn3a_branch1/gamma": "stage2_unit1_bnsc_gamma", + "bn3a_branch1/beta": "stage2_unit1_bnsc_beta", + "res3b_branch2a/kernel": "stage2_unit2_conv1_weight", + "bn3b_branch2a/gamma": "stage2_unit2_bn1_gamma", + "bn3b_branch2a/beta": "stage2_unit2_bn1_beta", + "res3b_branch2b/kernel": "stage2_unit2_conv2_weight", + "bn3b_branch2b/gamma": "stage2_unit2_bn2_gamma", + "bn3b_branch2b/beta": "stage2_unit2_bn2_beta", + "res3b_branch2c/kernel": "stage2_unit2_conv3_weight", + "bn3b_branch2c/gamma": "stage2_unit2_bn3_gamma", + "bn3b_branch2c/beta": "stage2_unit2_bn3_beta", + "res3c_branch2a/kernel": "stage2_unit3_conv1_weight", + "bn3c_branch2a/gamma": "stage2_unit3_bn1_gamma", + "bn3c_branch2a/beta": "stage2_unit3_bn1_beta", + "res3c_branch2b/kernel": "stage2_unit3_conv2_weight", + "bn3c_branch2b/gamma": "stage2_unit3_bn2_gamma", + "bn3c_branch2b/beta": "stage2_unit3_bn2_beta", + "res3c_branch2c/kernel": "stage2_unit3_conv3_weight", + "bn3c_branch2c/gamma": "stage2_unit3_bn3_gamma", + "bn3c_branch2c/beta": "stage2_unit3_bn3_beta", + "res3d_branch2a/kernel": "stage2_unit4_conv1_weight", + "bn3d_branch2a/gamma": "stage2_unit4_bn1_gamma", + "bn3d_branch2a/beta": "stage2_unit4_bn1_beta", + "res3d_branch2b/kernel": "stage2_unit4_conv2_weight", + "bn3d_branch2b/gamma": "stage2_unit4_bn2_gamma", + "bn3d_branch2b/beta": "stage2_unit4_bn2_beta", + "res3d_branch2c/kernel": "stage2_unit4_conv3_weight", + "bn3d_branch2c/gamma": "stage2_unit4_bn3_gamma", + "bn3d_branch2c/beta": "stage2_unit4_bn3_beta", + "res4a_branch2a/kernel": "stage3_unit1_conv1_weight", + "bn4a_branch2a/gamma": "stage3_unit1_bn1_gamma", + "bn4a_branch2a/beta": "stage3_unit1_bn1_beta", + "res4a_branch2b/kernel": "stage3_unit1_conv2_weight", + "bn4a_branch2b/gamma": "stage3_unit1_bn2_gamma", + "bn4a_branch2b/beta": "stage3_unit1_bn2_beta", + "res4a_branch2c/kernel": "stage3_unit1_conv3_weight", + "bn4a_branch2c/gamma": "stage3_unit1_bn3_gamma", + "bn4a_branch2c/beta": "stage3_unit1_bn3_beta", + "res4a_branch1/kernel": "stage3_unit1_conv1sc_weight", + "bn4a_branch1/gamma": "stage3_unit1_bnsc_gamma", + "bn4a_branch1/beta": "stage3_unit1_bnsc_beta", + "res4b_branch2a/kernel": "stage3_unit2_conv1_weight", + "bn4b_branch2a/gamma": "stage3_unit2_bn1_gamma", + "bn4b_branch2a/beta": "stage3_unit2_bn1_beta", + "res4b_branch2b/kernel": "stage3_unit2_conv2_weight", + "bn4b_branch2b/gamma": "stage3_unit2_bn2_gamma", + "bn4b_branch2b/beta": "stage3_unit2_bn2_beta", + "res4b_branch2c/kernel": "stage3_unit2_conv3_weight", + "bn4b_branch2c/gamma": "stage3_unit2_bn3_gamma", + "bn4b_branch2c/beta": "stage3_unit2_bn3_beta", + "res4c_branch2a/kernel": "stage3_unit3_conv1_weight", + "bn4c_branch2a/gamma": "stage3_unit3_bn1_gamma", + "bn4c_branch2a/beta": "stage3_unit3_bn1_beta", + "res4c_branch2b/kernel": "stage3_unit3_conv2_weight", + "bn4c_branch2b/gamma": "stage3_unit3_bn2_gamma", + "bn4c_branch2b/beta": "stage3_unit3_bn2_beta", + "res4c_branch2c/kernel": "stage3_unit3_conv3_weight", + "bn4c_branch2c/gamma": "stage3_unit3_bn3_gamma", + "bn4c_branch2c/beta": "stage3_unit3_bn3_beta", + "res4d_branch2a/kernel": "stage3_unit4_conv1_weight", + "bn4d_branch2a/gamma": "stage3_unit4_bn1_gamma", + "bn4d_branch2a/beta": "stage3_unit4_bn1_beta", + "res4d_branch2b/kernel": "stage3_unit4_conv2_weight", + "bn4d_branch2b/gamma": "stage3_unit4_bn2_gamma", + "bn4d_branch2b/beta": "stage3_unit4_bn2_beta", + "res4d_branch2c/kernel": "stage3_unit4_conv3_weight", + "bn4d_branch2c/gamma": "stage3_unit4_bn3_gamma", + "bn4d_branch2c/beta": "stage3_unit4_bn3_beta", + "res4e_branch2a/kernel": "stage3_unit5_conv1_weight", + "bn4e_branch2a/gamma": "stage3_unit5_bn1_gamma", + "bn4e_branch2a/beta": "stage3_unit5_bn1_beta", + "res4e_branch2b/kernel": "stage3_unit5_conv2_weight", + "bn4e_branch2b/gamma": "stage3_unit5_bn2_gamma", + "bn4e_branch2b/beta": "stage3_unit5_bn2_beta", + "res4e_branch2c/kernel": "stage3_unit5_conv3_weight", + "bn4e_branch2c/gamma": "stage3_unit5_bn3_gamma", + "bn4e_branch2c/beta": "stage3_unit5_bn3_beta", + "res4f_branch2a/kernel": "stage3_unit6_conv1_weight", + "bn4f_branch2a/gamma": "stage3_unit6_bn1_gamma", + "bn4f_branch2a/beta": "stage3_unit6_bn1_beta", + "res4f_branch2b/kernel": "stage3_unit6_conv2_weight", + "bn4f_branch2b/gamma": "stage3_unit6_bn2_gamma", + "bn4f_branch2b/beta": "stage3_unit6_bn2_beta", + "res4f_branch2c/kernel": "stage3_unit6_conv3_weight", + "bn4f_branch2c/gamma": "stage3_unit6_bn3_gamma", + "bn4f_branch2c/beta": "stage3_unit6_bn3_beta", + "res5a_branch2a/kernel": "stage4_unit1_conv1_weight", + "bn5a_branch2a/gamma": "stage4_unit1_bn1_gamma", + "bn5a_branch2a/beta": "stage4_unit1_bn1_beta", + "res5a_branch2b/kernel": "stage4_unit1_conv2_weight", + "bn5a_branch2b/gamma": "stage4_unit1_bn2_gamma", + "bn5a_branch2b/beta": "stage4_unit1_bn2_beta", + "res5a_branch2c/kernel": "stage4_unit1_conv3_weight", + "bn5a_branch2c/gamma": "stage4_unit1_bn3_gamma", + "bn5a_branch2c/beta": "stage4_unit1_bn3_beta", + "res5a_branch1/kernel": "stage4_unit1_conv1sc_weight", + "bn5a_branch1/gamma": "stage4_unit1_bnsc_gamma", + "bn5a_branch1/beta": "stage4_unit1_bnsc_beta", + "res5b_branch2a/kernel": "stage4_unit2_conv1_weight", + "bn5b_branch2a/gamma": "stage4_unit2_bn1_gamma", + "bn5b_branch2a/beta": "stage4_unit2_bn1_beta", + "res5b_branch2b/kernel": "stage4_unit2_conv2_weight", + "bn5b_branch2b/gamma": "stage4_unit2_bn2_gamma", + "bn5b_branch2b/beta": "stage4_unit2_bn2_beta", + "res5b_branch2c/kernel": "stage4_unit2_conv3_weight", + "bn5b_branch2c/gamma": "stage4_unit2_bn3_gamma", + "bn5b_branch2c/beta": "stage4_unit2_bn3_beta", + "res5c_branch2a/kernel": "stage4_unit3_conv1_weight", + "bn5c_branch2a/gamma": "stage4_unit3_bn1_gamma", + "bn5c_branch2a/beta": "stage4_unit3_bn1_beta", + "res5c_branch2b/kernel": "stage4_unit3_conv2_weight", + "bn5c_branch2b/gamma": "stage4_unit3_bn2_gamma", + "bn5c_branch2b/beta": "stage4_unit3_bn2_beta", + "res5c_branch2c/kernel": "stage4_unit3_conv3_weight", + "bn5c_branch2c/gamma": "stage4_unit3_bn3_gamma", + "bn5c_branch2c/beta": "stage4_unit3_bn3_beta", + "fc1000/kernel": "fc1_weight", + "fc1000/bias": "fc1_bias" + } \ No newline at end of file diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/resnet_ctl_imagenet_main.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/resnet_ctl_imagenet_main.py new file mode 100644 index 0000000000000000000000000000000000000000..d28aaaa5a08258f2788d382ff28e0036bdf11d47 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/resnet_ctl_imagenet_main.py @@ -0,0 +1,406 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# List of changes: +# - loading habana module +# - added support for prefetching to HPU +# - added profiling callbacks support +# - changed include paths of modules +# - include mechanism for dumping tensors + +# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company + +"""Runs a ResNet model on the ImageNet dataset using custom training loops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import shutil + +from absl import app +from absl import flags +from absl import logging +import tensorflow as tf +import os +import math + +from TensorFlow.common.modeling import performance +from TensorFlow.common.training import controller +from TensorFlow.utils.flags import core as flags_core +from TensorFlow.utils.logs import logger +from TensorFlow.utils.misc import distribution_utils +from TensorFlow.utils.misc import keras_utils +from TensorFlow.utils.misc import model_helpers +from TensorFlow.computer_vision.common import imagenet_preprocessing +from TensorFlow.computer_vision.Resnets.utils.optimizers.keras import lars_util +from TensorFlow.computer_vision.Resnets.resnet_keras import common +from TensorFlow.computer_vision.Resnets.resnet_keras import resnet_runnable +from TensorFlow.computer_vision.Resnets.resnet_keras.common import get_global_batch_size +from habana_frameworks.tensorflow import load_habana_module +from TensorFlow.common.debug import dump_callback +from TensorFlow.common.tb_utils import write_hparams_v2 +from habana_frameworks.tensorflow.synapse_logger_helpers import synapse_logger_init +from TensorFlow.computer_vision.Resnets.resnet_keras.mlp_log import get_mllog_mlloger + +try: + import horovod.tensorflow as hvd +except ImportError as e: + _hvd_exc = e + hvd = None + +flags.DEFINE_boolean(name='use_tf_function', default=True, + help='Wrap the train and test step inside a ' + 'tf.function.') +flags.DEFINE_boolean(name='single_l2_loss_op', default=False, + help='Calculate L2_loss on concatenated weights, ' + 'instead of using Keras per-layer L2 loss.') +flags.DEFINE_boolean(name='cache_decoded_image', + default=False, + help='Whether or not to cache decoded images in the ' + 'input pipeline. If this flag and `cache` is enabled, ' + 'then TFExample protos will be parsed and then cached ' + 'which reduces the load on hosts.') +flags.DEFINE_boolean(name='dist_eval', default=True, + help='Partial eval in each rank and allreduce the partial result') +flags.DEFINE_boolean(name='enable_device_warmup', + default=False, + help='Whether or not to enable device warmup. This ' + 'includes training on dummy data and enabling graph/XLA ' + 'compilation before run_start.') +flags.DEFINE_integer(name='device_warmup_steps', + default=2, + help='The number of steps to apply for device warmup.') +flags.DEFINE_float('base_learning_rate', 0.1, + 'Base learning rate. ' + 'This is the learning rate when using batch size 256; when using other ' + 'batch sizes, the learning rate will be scaled linearly.') +flags.DEFINE_boolean(name='profile', default=False, + help='Running RN50 with profiling') +flags.DEFINE_integer(name='num_train_files', + default=1024, + help='The number of training tf records.') +flags.DEFINE_integer(name='num_eval_files', + default=128, + help='The number of evaluation tf records.') +flags.DEFINE_integer(name='num_acc_steps', default=1, help='Number of gradient accumulation steps.') + + +def build_stats(runnable, time_callback): + """Normalizes and returns dictionary of stats. + + Args: + runnable: The module containing all the training and evaluation metrics. + time_callback: Time tracking callback instance. + + Returns: + Dictionary of normalized results. + """ + stats = {} + + if not runnable.flags_obj.skip_eval: + if runnable.test_loss: + stats['eval_loss'] = runnable.test_loss.result().numpy() + if runnable.test_accuracy: + stats['eval_acc'] = runnable.eval_accuracy + + if runnable.train_loss: + stats['train_loss'] = runnable.train_loss.result().numpy() + if runnable.train_accuracy: + stats['train_acc'] = runnable.train_accuracy.result().numpy() + + if time_callback: + timestamp_log = time_callback.timestamp_log + stats['step_timestamp_log'] = timestamp_log + stats['train_finish_time'] = time_callback.train_finish_time + if time_callback.epoch_runtime_log: + stats['avg_exp_per_second'] = time_callback.average_examples_per_second + + return stats + + +def get_num_train_iterations(flags_obj): + """Returns the number of training steps, train and test epochs.""" + global_batch_size = get_global_batch_size(flags_obj.batch_size, flags_obj.num_acc_steps) + steps_per_epoch = math.ceil(imagenet_preprocessing.NUM_IMAGES['train'] / global_batch_size) + train_epochs = flags_obj.train_epochs + + if train_epochs == 0 and flags_obj.train_steps > 0: + steps_per_epoch = flags_obj.train_steps + train_epochs = 1 + + eval_batch_size = flags_obj.batch_size + if flags_obj.dist_eval: + eval_batch_size = global_batch_size + eval_steps = ( + math.ceil(imagenet_preprocessing.NUM_IMAGES['validation'] / eval_batch_size)) + + return steps_per_epoch, train_epochs, eval_steps + + +def _steps_to_run(steps_in_current_epoch, steps_per_epoch, steps_per_loop): + """Calculates steps to run on device.""" + if steps_per_loop <= 0: + raise ValueError('steps_per_loop should be positive integer.') + if steps_per_loop == 1: + return steps_per_loop + return min(steps_per_loop, steps_per_epoch - steps_in_current_epoch) + + +def run(flags_obj): + """Run ResNet ImageNet training and eval loop using custom training loops. + + Args: + flags_obj: An object containing parsed flag values. + + Raises: + ValueError: If fp16 is passed as it is not currently supported. + + Returns: + Dictionary of training and eval stats. + """ + tf.get_logger().propagate = False + output_dir = None + if "LOG_DIR" in os.environ: + output_dir = os.environ["LOG_DIR"] + mlperf_mlloger, mlperf_mllog = get_mllog_mlloger(output_dir) + mlperf_mlloger.event(key=mlperf_mllog.constants.CACHE_CLEAR, value=True) + mlperf_mlloger.start(key=mlperf_mllog.constants.INIT_START, value=None) + mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_BENCHMARK, value=mlperf_mllog.constants.RESNET) + mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_ORG, value='Habana') + mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_DIVISION, value='closed') + mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_PLATFORM, value='gaudi-{}'.format(flags_obj.num_gpus)) + mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_STATUS, value='onprem') + + keras_utils.set_session_config( + enable_eager=flags_obj.enable_eager, + enable_xla=flags_obj.enable_xla) + performance.set_mixed_precision_policy(flags_core.get_tf_dtype(flags_obj)) + + # This only affects GPU. + common.set_cudnn_batchnorm_mode() + + # TODO(anj-s): Set data_format without using Keras. + data_format = flags_obj.data_format + if data_format is None: + data_format = ('channels_first' + if tf.test.is_built_with_cuda() else 'channels_last') + tf.keras.backend.set_image_data_format(data_format) + + if hvd is not None and hvd.is_initialized(): + model_dir = os.path.join( + flags_obj.model_dir, "worker_" + str(hvd.rank())) + else: + model_dir = flags_obj.model_dir + + global_batch_size = get_global_batch_size(flags_obj.batch_size, flags_obj.num_acc_steps) + + strategy = distribution_utils.get_distribution_strategy( + distribution_strategy=flags_obj.distribution_strategy, + num_gpus=flags_obj.num_gpus, + all_reduce_alg=flags_obj.all_reduce_alg, + num_packs=flags_obj.num_packs, + tpu_address=flags_obj.tpu) + + mlperf_mlloger.event(key=mlperf_mllog.constants.GLOBAL_BATCH_SIZE, value=global_batch_size) + mlperf_mlloger.event(key=mlperf_mllog.constants.TRAIN_SAMPLES, value=imagenet_preprocessing.NUM_IMAGES['train']) + mlperf_mlloger.event(key=mlperf_mllog.constants.EVAL_SAMPLES, value=imagenet_preprocessing.NUM_IMAGES['validation']) + group_batch_norm = 1 + mlperf_mlloger.event(key=mlperf_mllog.constants.MODEL_BN_SPAN, value= flags_obj.batch_size * group_batch_norm) + mlperf_mlloger.event(key=mlperf_mllog.constants.GRADIENT_ACCUMULATION_STEPS, value= flags_obj.num_acc_steps) + + train_writer, eval_writer = None, None + if flags_obj.enable_tensorboard: + train_writer = tf.summary.create_file_writer(model_dir) + eval_writer = tf.summary.create_file_writer(os.path.join(model_dir, 'eval')) + hparams = flags_obj.flag_values_dict() + write_hparams_v2(train_writer, hparams) + + per_epoch_steps, train_epochs, eval_steps = get_num_train_iterations( + flags_obj) + steps_per_loop = min(flags_obj.steps_per_loop, per_epoch_steps) + train_steps = train_epochs * per_epoch_steps + + logging.info( + 'Training %d epochs, each epoch has %d steps, ' + 'total steps: %d; Eval %d steps', train_epochs, per_epoch_steps, + train_steps, eval_steps) + + time_callback = keras_utils.TimeHistory( + global_batch_size, + flags_obj.log_steps, + summary_writer=train_writer, + batch_size_per_node=flags_obj.batch_size) + profiler_callback = None + if flags_obj.profile_steps is not None: + profiler_callback = keras_utils.get_profiler_callback( + model_dir, + flags_obj.profile_steps, + flags_obj.enable_tensorboard, + per_epoch_steps) + with distribution_utils.get_strategy_scope(strategy): + runnable = resnet_runnable.ResnetRunnable(flags_obj, time_callback, + train_steps, + per_epoch_steps, + profiler_callback,mlperf_mlloger,mlperf_mllog) + + eval_interval = flags_obj.epochs_between_evals * per_epoch_steps + eval_offset = flags_obj.eval_offset_epochs * per_epoch_steps + if eval_offset != 0: + eval_offset -= eval_interval + checkpoint_interval = ( + per_epoch_steps if flags_obj.enable_checkpoint_and_export else None) + summary_interval = per_epoch_steps if flags_obj.enable_tensorboard else None + + checkpoint_manager = tf.train.CheckpointManager( + runnable.checkpoint, + directory=model_dir, + max_to_keep=10, + step_counter=runnable.global_step, + checkpoint_interval=checkpoint_interval) + + device_warmup_steps = ( + flags_obj.device_warmup_steps if flags_obj.enable_device_warmup else 0) + + if flags_obj.enable_device_warmup: + logging.info('Warmup for %d steps.', device_warmup_steps) + + train_steps=per_epoch_steps * train_epochs + + resnet_controller = controller.Controller( + strategy, + runnable.train, + runnable.evaluate, + runnable.warmup, + global_step=runnable.global_step, + steps_per_loop=steps_per_loop, + train_steps=train_steps, + checkpoint_manager=checkpoint_manager, + summary_interval=summary_interval, + eval_steps=eval_steps, + eval_interval=eval_interval, + eval_offset=eval_offset, + device_warmup_steps=device_warmup_steps, + train_summary_writer=train_writer, + eval_summary_writer=eval_writer) + + if flags_obj.enable_device_warmup: + resnet_controller.warmup() + del runnable.warmup_train_iter + del runnable.warmup_train_dataset + del runnable.warmup_eval_iter + del runnable.warmup_eval_dataset + try: + synth_data_dir = f'{model_dir}/resnet_synth_data' + shutil.rmtree(synth_data_dir) + except: + pass + + manifest_path = prepare_dataset_manifest(flags_obj) + + mlperf_mlloger.end(key=mlperf_mllog.constants.INIT_STOP) + + if flags.FLAGS.use_horovod: + hvd.broadcast(0, 0) + time_callback.on_train_begin() + mlperf_mlloger.start(key=mlperf_mllog.constants.RUN_START) + mlperf_mlloger.start( + key=mlperf_mllog.constants.BLOCK_START, value=None, + metadata={ + 'first_epoch_num': 1, + 'epoch_count': + (flags_obj.eval_offset_epochs if flags_obj.eval_offset_epochs > 0 + else flags_obj.epochs_between_evals) + }) + resnet_controller.train(evaluate=not flags_obj.skip_eval, num_acc_steps=flags_obj.num_acc_steps, manifest_path=manifest_path) + if not flags_obj.skip_eval: + eval_accuracy = resnet_controller.last_eval_output['test_accuracy'] + if eval_accuracy >= flags_obj.target_accuracy: + mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP, value=None, metadata={'status': 'success'}) + else: + mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP, value=None, metadata={'status': 'fail'}) + time_callback.on_train_end() + + + stats = build_stats(runnable, time_callback) + return stats + + +def prepare_dataset_manifest(flags_obj): + import glob + import json + import pathlib + + from habana_frameworks.tensorflow.multinode_helpers import comm_rank + + manifest_file_name = f"imagenet_jpeg_manifest_rank_{comm_rank()}.json" + manifest_path = os.path.join('/tmp', manifest_file_name) + + if flags_obj.jpeg_data_dir is not None: + # get files list + dataset_dir = os.path.join(flags_obj.jpeg_data_dir, 'train') + + print(f"dataset dir: {dataset_dir}") + manifest_data = {} + manifest_data["file_list"] = sorted( + glob.glob(dataset_dir + "/*/*.{}".format("JPEG"))) + + # get class list + data_dir = pathlib.Path(dataset_dir) + manifest_data["class_list"] = sorted( + [item.name for item in data_dir.glob('*') if item.is_dir() == True]) + + file_sizes = {} + file_classes = [] + + for filename in manifest_data["file_list"]: + #Everything is in order as file_list is sorted + file_sizes[filename] = os.stat(filename).st_size + file_classes.append(os.path.basename(os.path.dirname(filename))) + + manifest_data['file_sizes'] = file_sizes + manifest_data['file_classes'] = file_classes + + with open(manifest_path, "w") as f: + json.dump(manifest_data, f) + + return manifest_path + + +def main(_): + if flags.FLAGS.use_horovod: + if hvd is None: + logging.error("Problem encountered during Horovod import. Please make sure that habana-horovod package is installed.") + raise _hvd_exc + hvd.init() + else: + synapse_logger_init() + + os.environ['TF_EXPERIMENTAL_BATCH_VARIABLES'] = '1' + os.environ['TF_CLUSTER_VARIABLES'] = '1' + load_habana_module() + + with dump_callback(): + model_helpers.apply_clean(flags.FLAGS) + with logger.benchmark_context(flags.FLAGS): + stats =run (flags.FLAGS) + logging.info('Run stats:\n%s', stats) + + +if __name__ == '__main__': + logging.set_verbosity(logging.INFO) + common.define_keras_flags() + common.define_habana_flags() + lars_util.define_lars_flags() + app.run(main) + diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/__init__.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/backward_compatibility.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/backward_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..6b5a724580cf8d08dbdf19986bd3ff525206c537 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/backward_compatibility.py @@ -0,0 +1,9 @@ +# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company + +from packaging import version +import tensorflow as tf + +if version.parse(tf.__version__) <= version.parse("2.12.0"): + from tensorflow.python.framework.ops import convert_to_tensor_v2 +else: + from tensorflow.python.framework.tensor_conversion import convert_to_tensor_v2 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/lars_optimizer.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/lars_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..3ba1f16b0d1857b1f08eaa30e0f5595528d44921 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/lars_optimizer.py @@ -0,0 +1,225 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Layer-wise Adaptive Rate Scaling optimizer for large-batch training.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.python.training import training_ops + + +class LARSOptimizer(tf.keras.optimizers.legacy.Optimizer): + """Layer-wise Adaptive Rate Scaling for large batch training. + + Introduced by "Large Batch Training of Convolutional Networks" by Y. You, + I. Gitman, and B. Ginsburg. (https://arxiv.org/abs/1708.03888) + + Implements the LARS learning rate scheme presented in the paper above. This + optimizer is useful when scaling the batch size to up to 32K without + significant performance degradation. It is recommended to use the optimizer + in conjunction with: + - Gradual learning rate warm-up + - Linear learning rate scaling + - Poly rule learning rate decay + + Note, LARS scaling is currently only enabled for dense tensors. Sparse tensors + use the default momentum optimizer. + """ + + def __init__( + self, + learning_rate, + momentum=0.9, + weight_decay=0.0001, + # The LARS coefficient is a hyperparameter + eeta=0.001, + epsilon=0.0, + name="LARSOptimizer", + # Enable skipping variables from LARS scaling. + # TODO(sameerkm): Enable a direct mechanism to pass a + # subset of variables to the optimizer. + skip_list=None, + use_nesterov=False, + **kwargs): + """Construct a new LARS Optimizer. + + Args: + learning_rate: A `Tensor`, floating point value, or a schedule that is a + `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable + that takes no arguments and returns the actual value to use. The + learning rate. + momentum: A floating point value. Momentum hyperparameter. + weight_decay: A floating point value. Weight decay hyperparameter. + eeta: LARS coefficient as used in the paper. Dfault set to LARS + coefficient from the paper. (eeta / weight_decay) determines the highest + scaling factor in LARS. + epsilon: Optional epsilon parameter to be set in models that have very + small gradients. Default set to 0.0. + name: Optional name prefix for variables and ops created by LARSOptimizer. + skip_list: List of strings to enable skipping variables from LARS scaling. + If any of the strings in skip_list is a subset of var.name, variable + 'var' is skipped from LARS scaling. For a typical classification model + with batch normalization, the skip_list is ['batch_normalization', + 'bias'] + use_nesterov: when set to True, nesterov momentum will be enabled + **kwargs: keyword arguments. + + Raises: + ValueError: If a hyperparameter is set to a non-sensical value. + """ + if momentum < 0.0: + raise ValueError("momentum should be positive: %s" % momentum) + if weight_decay < 0.0: + raise ValueError("weight_decay should be positive: %s" % weight_decay) + super(LARSOptimizer, self).__init__(name=name, **kwargs) + + self._set_hyper("learning_rate", learning_rate) + + # When directly using class members, instead of + # _set_hyper and _get_hyper (such as learning_rate above), + # the values are fixed after __init(), and not being + # updated during the training process. + # This provides better performance but less flexibility. + self.momentum = momentum + self.weight_decay = weight_decay + self.eeta = eeta + self.epsilon = epsilon or tf.keras.backend.epsilon() + self._skip_list = skip_list + self.use_nesterov = use_nesterov + + def _prepare_local(self, var_device, var_dtype, apply_state): + lr_t = self._get_hyper("learning_rate", var_dtype) + local_step = tf.cast(self.iterations, var_dtype) + lr_t = tf.cast(lr_t(local_step), var_dtype) + learning_rate_t = tf.identity(lr_t) + + apply_state[(var_device, var_dtype)].update( + dict( + learning_rate=learning_rate_t, + )) + + def _create_slots(self, var_list): + for v in var_list: + self.add_slot(v, "momentum") + + def compute_lr(self, grad, var, coefficients): + scaled_lr = coefficients["learning_rate"] + if self._skip_list is None or not any(v in var.name + for v in self._skip_list): + w_norm = tf.norm(var, ord=2) + g_norm = tf.norm(grad, ord=2) + trust_ratio = tf.where( + tf.greater(w_norm, 0), + tf.where( + tf.greater(g_norm, 0), + (self.eeta * w_norm / + (g_norm + self.weight_decay * w_norm + self.epsilon)), 1.0), 1.0) + + scaled_lr = coefficients["learning_rate"] * trust_ratio + # Add the weight regularization gradient + grad = grad + self.weight_decay * var + return scaled_lr, grad + + def _apply_dense(self, grad, var, apply_state=None): + var_device, var_dtype = var.device, var.dtype.base_dtype + coefficients = ((apply_state or {}).get((var_device, var_dtype)) + or self._fallback_apply_state(var_device, var_dtype)) + + scaled_lr, grad = self.compute_lr(grad, var, coefficients) + mom = self.get_slot(var, "momentum") + return training_ops.apply_momentum( + var, + mom, + tf.cast(1.0, var.dtype.base_dtype), + grad * scaled_lr, + self.momentum, + use_locking=False, + use_nesterov=self.use_nesterov) + + def _resource_apply_dense(self, grad, var, apply_state=None): + var_device, var_dtype = var.device, var.dtype.base_dtype + coefficients = ((apply_state or {}).get((var_device, var_dtype)) + or self._fallback_apply_state(var_device, var_dtype)) + + scaled_lr, grad = self.compute_lr(grad, var, coefficients) + mom = self.get_slot(var, "momentum") + + # ============================================================ + return training_ops.resource_apply_keras_momentum( + var.handle, + mom.handle, + scaled_lr, + grad, + self.momentum, + use_locking=False, + use_nesterov=self.use_nesterov) + # ============================================================ + + # ============================================================ + # mom_t = mom * self.momentum - grad * scaled_lr + # mom_t = state_ops.assign(mom, mom_t, use_locking=False) + # if self.use_nesterov: + # var_t = var + mom_t * self.momentum - grad * scaled_lr + # else: + # var_t = var + mom_t + # return state_ops.assign(var, var_t, use_locking=False).op + # ============================================================ + + # Fallback to momentum optimizer for sparse tensors + def _apply_sparse(self, grad, var, apply_state=None): + var_device, var_dtype = var.device, var.dtype.base_dtype + coefficients = ((apply_state or {}).get((var_device, var_dtype)) + or self._fallback_apply_state(var_device, var_dtype)) + + mom = self.get_slot(var, "momentum") + return training_ops.sparse_apply_momentum( + var, + mom, + coefficients["learning_rate"], + grad.values, + grad.indices, + self.momentum, + use_locking=False, + use_nesterov=self.use_nesterov) + + def _resource_apply_sparse(self, grad, var, indices, apply_state=None): + var_device, var_dtype = var.device, var.dtype.base_dtype + coefficients = ((apply_state or {}).get((var_device, var_dtype)) + or self._fallback_apply_state(var_device, var_dtype)) + + mom = self.get_slot(var, "momentum") + return training_ops.resource_sparse_apply_keras_momentum( + var.handle, + mom.handle, + coefficients["learning_rate"], + grad, + indices, + self.momentum, + use_locking=False, + use_nesterov=self.use_nesterov) + + def get_config(self): + config = super(LARSOptimizer, self).get_config() + config.update({ + "learning_rate": self._serialize_hyperparameter("learning_rate"), + "momentum": self.momentum, + "weight_decay": self.weight_decay, + "eeta": self.eeta, + "epsilon": self.epsilon, + "use_nesterov": self.use_nesterov, + }) + return config diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/lars_util.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/lars_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c6f70059d08d3c2175b56a82b9bdaa14b95054c6 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/utils/optimizers/keras/lars_util.py @@ -0,0 +1,183 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company +# ============================================================================== +"""Enable Layer-wise Adaptive Rate Scaling optimizer in ResNet.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +import tensorflow as tf +from TensorFlow.computer_vision.Resnets.utils.optimizers.keras import backward_compatibility + +from tensorflow.python.eager import context +from tensorflow.python.framework import ops +from tensorflow.python.ops import math_ops +FLAGS = flags.FLAGS + + +def define_lars_flags(): + """Defines flags needed by LARS optimizer.""" + + flags.DEFINE_float( + 'end_learning_rate', + default=None, + help=('Polynomial decay end learning rate.')) + + flags.DEFINE_float( + 'lars_epsilon', default=0.0, help=('Override autoselected LARS epsilon.')) + + flags.DEFINE_float( + 'warmup_epochs', + default=None, + help=('Override autoselected polynomial decay warmup epochs.')) + + flags.DEFINE_float( + 'momentum', + default=0.9, + help=('Momentum parameter used in the MomentumOptimizer.')) + + flags.DEFINE_float( + 'lars_decay_epochs', + default=None, + help=('Momentum parameter used in the MomentumOptimizer.')) + + +class PolynomialDecayWithWarmup( + tf.keras.optimizers.schedules.LearningRateSchedule): + """A LearningRateSchedule that uses a polynomial decay with warmup.""" + + def __init__(self, + batch_size, + steps_per_epoch, + train_steps, + initial_learning_rate=None, + end_learning_rate=None, + warmup_epochs=None, + compute_lr_on_cpu=False, + name=None, + mlperf_mlloger=None, + mlperf_mllog=None): + """Applies a polynomial decay to the learning rate with warmup.""" + super(PolynomialDecayWithWarmup, self).__init__() + + self.batch_size = batch_size + self.steps_per_epoch = steps_per_epoch + self.train_steps = train_steps + self.name = name + self.learning_rate_ops_cache = {} + self.compute_lr_on_cpu = compute_lr_on_cpu + + if batch_size < 16384: + self.initial_learning_rate = 10.0 + warmup_epochs_ = 5 + elif batch_size < 32768: + self.initial_learning_rate = 25.0 + warmup_epochs_ = 5 + else: + self.initial_learning_rate = 31.2 + warmup_epochs_ = 25 + + # Override default poly learning rate and warmup epochs + if initial_learning_rate: + self.initial_learning_rate = initial_learning_rate + + if end_learning_rate: + self.end_learning_rate = end_learning_rate + else: + self.end_learning_rate = 0.0001 + + if warmup_epochs is not None: + warmup_epochs_ = warmup_epochs + self.warmup_epochs = warmup_epochs_ + + opt_name = FLAGS.optimizer.lower() + mlperf_mlloger.event(key=mlperf_mllog.constants.OPT_NAME, value=opt_name) + + warmup_steps = warmup_epochs_ * steps_per_epoch + self.warmup_steps = tf.cast(warmup_steps, tf.float32) + if (FLAGS.lars_decay_epochs is None): + self.decay_steps = train_steps + else: + self.decay_steps = FLAGS.lars_decay_epochs * steps_per_epoch + self.decay_steps = self.decay_steps - warmup_steps + 1 + + if opt_name == 'lars': + mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_EPSILON, value=FLAGS.lars_epsilon) + mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_OPT_WEIGHT_DECAY, value=FLAGS.weight_decay) + mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_OPT_END_LR, value=self.end_learning_rate) + mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_OPT_LR_DECAY_STEPS, value=int(self.decay_steps)) + mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_OPT_LR_DECAY_POLY_POWER, value=2.0) + mlperf_mlloger.event(key='lars_opt_momentum', value=FLAGS.momentum) + elif opt_name == 'sgd': + mlperf_mlloger.event(key=mlperf_mllog.constants.OPT_WEIGHT_DECAY, value=FLAGS.weight_decay) + mlperf_mlloger.event(key='opt_momentum', value=FLAGS.momentum) + else: + print('NOT Supported') + mlperf_mlloger.event(key=opt_name+'_'+mlperf_mllog.constants.OPT_LR_WARMUP_EPOCHS, value=warmup_epochs_) + mlperf_mlloger.event(key=opt_name+'_'+mlperf_mllog.constants.OPT_BASE_LR, value=self.initial_learning_rate) + + self.poly_rate_scheduler = tf.keras.optimizers.schedules.PolynomialDecay( + initial_learning_rate=self.initial_learning_rate, + decay_steps=self.decay_steps, + end_learning_rate=self.end_learning_rate, + power=2.0) + + def __call__(self, step): + if tf.executing_eagerly(): + return self._get_learning_rate(step) + + # In an eager function or graph, the current implementation of optimizer + # repeatedly call and thus create ops for the learning rate schedule. To + # avoid this, we cache the ops if not executing eagerly. + graph = tf.compat.v1.get_default_graph() + if graph not in self.learning_rate_ops_cache: + if self.compute_lr_on_cpu: + with tf.device('/device:CPU:0'): + self.learning_rate_ops_cache[graph] = self._get_learning_rate(step) + else: + self.learning_rate_ops_cache[graph] = self._get_learning_rate(step) + return self.learning_rate_ops_cache[graph] + + def _get_learning_rate(self, step): + with ops.name_scope_v2(self.name or 'PolynomialDecayWithWarmup') as name: + + initial_learning_rate = backward_compatibility.convert_to_tensor_v2( + self.initial_learning_rate, name='initial_learning_rate') + warmup_steps = backward_compatibility.convert_to_tensor_v2( + self.warmup_steps, name='warmup_steps') + + warmup_rate = ( + initial_learning_rate * step / warmup_steps) + + poly_steps = math_ops.maximum(math_ops.subtract(step, warmup_steps), 1) + poly_rate = self.poly_rate_scheduler(poly_steps) + + decay_rate = tf.where(step <= warmup_steps, + warmup_rate, poly_rate, name=name) + return decay_rate + + def get_config(self): + return { + 'batch_size': self.batch_size, + 'steps_per_epoch': self.steps_per_epoch, + 'train_steps': self.train_steps, + 'initial_learning_rate': self.initial_learning_rate, + 'end_learning_rate': self.end_learning_rate, + 'warmup_epochs': self.warmup_epochs, + 'name': self.name, + } diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/__init__.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_benchmark.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..492128647ea080dff12a6036aecc2979460958fd --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_benchmark.py @@ -0,0 +1,109 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flags for benchmarking models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags + +from TensorFlow.utils.flags._conventions import help_wrap + + +def define_log_steps(): + flags.DEFINE_integer( + name="log_steps", default=100, + help="Frequency with which to log timing information with TimeHistory.") + + return [] + + +def define_benchmark(benchmark_log_dir=True, bigquery_uploader=True): + """Register benchmarking flags. + + Args: + benchmark_log_dir: Create a flag to specify location for benchmark logging. + bigquery_uploader: Create flags for uploading results to BigQuery. + + Returns: + A list of flags for core.py to marks as key flags. + """ + + key_flags = [] + + flags.DEFINE_enum( + name="benchmark_logger_type", default="BaseBenchmarkLogger", + enum_values=["BaseBenchmarkLogger", "BenchmarkFileLogger", + "BenchmarkBigQueryLogger"], + help=help_wrap("The type of benchmark logger to use. Defaults to using " + "BaseBenchmarkLogger which logs to STDOUT. Different " + "loggers will require other flags to be able to work.")) + flags.DEFINE_string( + name="benchmark_test_id", short_name="bti", default=None, + help=help_wrap("The unique test ID of the benchmark run. It could be the " + "combination of key parameters. It is hardware " + "independent and could be used compare the performance " + "between different test runs. This flag is designed for " + "human consumption, and does not have any impact within " + "the system.")) + + define_log_steps() + + if benchmark_log_dir: + flags.DEFINE_string( + name="benchmark_log_dir", short_name="bld", default=None, + help=help_wrap("The location of the benchmark logging.") + ) + + if bigquery_uploader: + flags.DEFINE_string( + name="gcp_project", short_name="gp", default=None, + help=help_wrap( + "The GCP project name where the benchmark will be uploaded.")) + + flags.DEFINE_string( + name="bigquery_data_set", short_name="bds", default="test_benchmark", + help=help_wrap( + "The Bigquery dataset name where the benchmark will be uploaded.")) + + flags.DEFINE_string( + name="bigquery_run_table", short_name="brt", default="benchmark_run", + help=help_wrap("The Bigquery table name where the benchmark run " + "information will be uploaded.")) + + flags.DEFINE_string( + name="bigquery_run_status_table", short_name="brst", + default="benchmark_run_status", + help=help_wrap("The Bigquery table name where the benchmark run " + "status information will be uploaded.")) + + flags.DEFINE_string( + name="bigquery_metric_table", short_name="bmt", + default="benchmark_metric", + help=help_wrap("The Bigquery table name where the benchmark metric " + "information will be uploaded.")) + + @flags.multi_flags_validator( + ["benchmark_logger_type", "benchmark_log_dir"], + message="--benchmark_logger_type=BenchmarkFileLogger will require " + "--benchmark_log_dir being set") + def _check_benchmark_log_dir(flags_dict): + benchmark_logger_type = flags_dict["benchmark_logger_type"] + if benchmark_logger_type == "BenchmarkFileLogger": + return flags_dict["benchmark_log_dir"] + return True + + return key_flags diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_conventions.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_conventions.py new file mode 100644 index 0000000000000000000000000000000000000000..81ad21b0c4c9a58fb7aa40402ae91c62a4c51352 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_conventions.py @@ -0,0 +1,54 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Central location for shared argparse convention definitions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import codecs +import functools + +from absl import app as absl_app +from absl import flags + + +# This codifies help string conventions and makes it easy to update them if +# necessary. Currently the only major effect is that help bodies start on the +# line after flags are listed. All flag definitions should wrap the text bodies +# with help wrap when calling DEFINE_*. +_help_wrap = functools.partial(flags.text_wrap, length=80, indent="", + firstline_indent="\n") + + +# Pretty formatting causes issues when utf-8 is not installed on a system. +def _stdout_utf8(): + try: + codecs.lookup("utf-8") + except LookupError: + return False + return sys.stdout.encoding == "UTF-8" + + +if _stdout_utf8(): + help_wrap = _help_wrap +else: + def help_wrap(text, *args, **kwargs): + return _help_wrap(text, *args, **kwargs).replace(u"\ufeff", u"") + + +# Replace None with h to also allow -h +absl_app.HelpshortFlag.SHORT_NAME = "h" diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_distribution.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..d96140c7c3d4590fd4003b4695904353a284ba94 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_distribution.py @@ -0,0 +1,54 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flags related to distributed execution.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags +import tensorflow as tf + +from TensorFlow.utils.flags._conventions import help_wrap + + +def define_distribution(worker_hosts=True, task_index=True): + """Register distributed execution flags. + + Args: + worker_hosts: Create a flag for specifying comma-separated list of workers. + task_index: Create a flag for specifying index of task. + + Returns: + A list of flags for core.py to marks as key flags. + """ + key_flags = [] + + if worker_hosts: + flags.DEFINE_string( + name='worker_hosts', default=None, + help=help_wrap( + 'Comma-separated list of worker ip:port pairs for running ' + 'multi-worker models with DistributionStrategy. The user would ' + 'start the program on each host with identical value for this ' + 'flag.')) + + if task_index: + flags.DEFINE_integer( + name='task_index', default=-1, + help=help_wrap('If multi-worker training, the task_index of this ' + 'worker.')) + + return key_flags diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_misc.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..8dc49b436a5ac35e190e9f8ced8039cce6cd521c --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_misc.py @@ -0,0 +1,50 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Misc flags.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import flags + +from TensorFlow.utils.flags._conventions import help_wrap + + +def define_image(data_format=True): + """Register image specific flags. + + Args: + data_format: Create a flag to specify image axis convention. + + Returns: + A list of flags for core.py to marks as key flags. + """ + + key_flags = [] + + if data_format: + flags.DEFINE_enum( + name="data_format", short_name="df", default=None, + enum_values=["channels_first", "channels_last"], + help=help_wrap( + "A flag to override the data format used in the model. " + "channels_first provides a performance boost on GPU but is not " + "always compatible with CPU. If left unspecified, the data format " + "will be chosen automatically based on whether TensorFlow was " + "built for CPU or GPU.")) + key_flags.append("data_format") + + return key_flags diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/__init__.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/metric_hook.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/metric_hook.py new file mode 100644 index 0000000000000000000000000000000000000000..f408e3e95f09bd48373564389f4c9f1c28f698a5 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/metric_hook.py @@ -0,0 +1,97 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Session hook for logging benchmark metric.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf # pylint: disable=g-bad-import-order + + +class LoggingMetricHook(tf.estimator.LoggingTensorHook): + """Hook to log benchmark metric information. + + This hook is very similar as tf.train.LoggingTensorHook, which logs given + tensors every N local steps, every N seconds, or at the end. The metric + information will be logged to given log_dir or via metric_logger in JSON + format, which can be consumed by data analysis pipeline later. + + Note that if `at_end` is True, `tensors` should not include any tensor + whose evaluation produces a side effect such as consuming additional inputs. + """ + + def __init__(self, tensors, metric_logger=None, + every_n_iter=None, every_n_secs=None, at_end=False): + """Initializer for LoggingMetricHook. + + Args: + tensors: `dict` that maps string-valued tags to tensors/tensor names, + or `iterable` of tensors/tensor names. + metric_logger: instance of `BenchmarkLogger`, the benchmark logger that + hook should use to write the log. + every_n_iter: `int`, print the values of `tensors` once every N local + steps taken on the current worker. + every_n_secs: `int` or `float`, print the values of `tensors` once every N + seconds. Exactly one of `every_n_iter` and `every_n_secs` should be + provided. + at_end: `bool` specifying whether to print the values of `tensors` at the + end of the run. + + Raises: + ValueError: + 1. `every_n_iter` is non-positive, or + 2. Exactly one of every_n_iter and every_n_secs should be provided. + 3. Exactly one of log_dir and metric_logger should be provided. + """ + super(LoggingMetricHook, self).__init__( + tensors=tensors, + every_n_iter=every_n_iter, + every_n_secs=every_n_secs, + at_end=at_end) + + if metric_logger is None: + raise ValueError("metric_logger should be provided.") + self._logger = metric_logger + + def begin(self): + super(LoggingMetricHook, self).begin() + self._global_step_tensor = tf.compat.v1.train.get_global_step() + if self._global_step_tensor is None: + raise RuntimeError( + "Global step should be created to use LoggingMetricHook.") + if self._global_step_tensor.name not in self._current_tensors: + self._current_tensors[self._global_step_tensor.name] = ( + self._global_step_tensor) + + def after_run(self, unused_run_context, run_values): + # should_trigger is a internal state that populated at before_run, and it is + # using self_timer to determine whether it should trigger. + if self._should_trigger: + self._log_metric(run_values.results) + + self._iter_count += 1 + + def end(self, session): + if self._log_at_end: + values = session.run(self._current_tensors) + self._log_metric(values) + + def _log_metric(self, tensor_values): + self._timer.update_last_triggered_step(self._iter_count) + global_step = tensor_values[self._global_step_tensor.name] + # self._tag_order is populated during the init of LoggingTensorHook + for tag in self._tag_order: + self._logger.log_metric(tag, tensor_values[tag], global_step=global_step) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/__init__.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/distribution_utils.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/distribution_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a3d662f4bc9e499da119b348acefd642e4a0941f --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/distribution_utils.py @@ -0,0 +1,346 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper functions for running models in a distributed setting.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +import random +import string +import tensorflow.compat.v2 as tf + +from TensorFlow.utils.misc import tpu_lib + +from habana_frameworks.tensorflow.distribute import HPUStrategy + + +def _collective_communication(all_reduce_alg): + """Return a CollectiveCommunication based on all_reduce_alg. + + Args: + all_reduce_alg: a string specifying which collective communication to pick, + or None. + + Returns: + tf.distribute.experimental.CollectiveCommunication object + + Raises: + ValueError: if `all_reduce_alg` not in [None, 'ring', 'nccl'] + """ + collective_communication_options = { + None: tf.distribute.experimental.CollectiveCommunication.AUTO, + "ring": tf.distribute.experimental.CollectiveCommunication.RING, + "nccl": tf.distribute.experimental.CollectiveCommunication.NCCL + } + if all_reduce_alg not in collective_communication_options: + raise ValueError( + "When used with `multi_worker_mirrored`, valid values for " + "all_reduce_alg are ['ring', 'nccl']. Supplied value: {}".format( + all_reduce_alg)) + return collective_communication_options[all_reduce_alg] + + +def _mirrored_cross_device_ops(all_reduce_alg, num_packs): + """Return a CrossDeviceOps based on all_reduce_alg and num_packs. + + Args: + all_reduce_alg: a string specifying which cross device op to pick, or None. + num_packs: an integer specifying number of packs for the cross device op. + + Returns: + tf.distribute.CrossDeviceOps object or None. + + Raises: + ValueError: if `all_reduce_alg` not in [None, 'nccl', 'hierarchical_copy']. + """ + if all_reduce_alg is None: + return None + mirrored_all_reduce_options = { + "nccl": tf.distribute.NcclAllReduce, + "hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce + } + if all_reduce_alg not in mirrored_all_reduce_options: + raise ValueError( + "When used with `mirrored`, valid values for all_reduce_alg are " + "['nccl', 'hierarchical_copy']. Supplied value: {}".format( + all_reduce_alg)) + cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg] + return cross_device_ops_class(num_packs=num_packs) + + +def get_distribution_strategy(distribution_strategy="mirrored", + num_gpus=0, + num_hpus=0, + all_reduce_alg=None, + num_packs=1, + tpu_address=None): + """Return a DistributionStrategy for running the model. + + Args: + distribution_strategy: a string specifying which distribution strategy to + use. Accepted values are 'off', 'one_device', 'mirrored', + 'parameter_server', 'multi_worker_mirrored', and 'tpu' -- case insensitive. + 'off' means not to use Distribution Strategy; 'tpu' means to use + TPUStrategy using `tpu_address`. + num_gpus: Number of GPUs to run this model. + all_reduce_alg: Optional. Specifies which algorithm to use when performing + all-reduce. For `MirroredStrategy`, valid values are "nccl" and + "hierarchical_copy". For `MultiWorkerMirroredStrategy`, valid values are + "ring" and "nccl". If None, DistributionStrategy will choose based on + device topology. + num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce` + or `tf.distribute.HierarchicalCopyAllReduce` for `MirroredStrategy`. + tpu_address: Optional. String that represents TPU to connect to. Must not + be None if `distribution_strategy` is set to `tpu`. + Returns: + tf.distribute.DistibutionStrategy object. + Raises: + ValueError: if `distribution_strategy` is 'off' or 'one_device' and + `num_gpus` is larger than 1; or `num_gpus` is negative or if + `distribution_strategy` is `tpu` but `tpu_address` is not specified. + """ + if num_gpus < 0: + raise ValueError("`num_gpus` can not be negative.") + + distribution_strategy = distribution_strategy.lower() + if distribution_strategy == "off": + if num_gpus > 1: + raise ValueError( + "When {} GPUs are specified, distribution_strategy " + "flag cannot be set to 'off'.".format(num_gpus)) + return None + + if distribution_strategy == "hpu": + return HPUStrategy() + + if distribution_strategy == "tpu": + # When tpu_address is an empty string, we communicate with local TPUs. + cluster_resolver = tpu_lib.tpu_initialize(tpu_address) + return tf.distribute.experimental.TPUStrategy(cluster_resolver) + + if distribution_strategy == "multi_worker_mirrored": + return tf.distribute.experimental.MultiWorkerMirroredStrategy( + communication=_collective_communication(all_reduce_alg)) + + if distribution_strategy == "one_device": + if num_gpus == 0 and num_hpus == 0: + return tf.distribute.OneDeviceStrategy("device:CPU:0") + if num_hpus == 1: + return tf.distribute.OneDeviceStrategy("device:HPU:0") + if num_gpus > 1 or num_hpus > 1: + raise ValueError("`OneDeviceStrategy` can not be used for more than " + "one device.") + return tf.distribute.OneDeviceStrategy("device:GPU:0") + + if distribution_strategy == "mirrored": + if num_gpus == 0: + devices = ["device:CPU:0"] + else: + devices = ["device:GPU:%d" % i for i in range(num_gpus)] + return tf.distribute.MirroredStrategy( + devices=devices, + cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs)) + + if distribution_strategy == "parameter_server": + return tf.distribute.experimental.ParameterServerStrategy() + + raise ValueError( + "Unrecognized Distribution Strategy: %r" % distribution_strategy) + + +def per_replica_batch_size(batch_size, num_gpus): + """For multi-gpu, batch-size must be a multiple of the number of GPUs. + + + Note that distribution strategy handles this automatically when used with + Keras. For using with Estimator, we need to get per GPU batch. + + Args: + batch_size: Global batch size to be divided among devices. This should be + equal to num_gpus times the single-GPU batch_size for multi-gpu training. + num_gpus: How many GPUs are used with DistributionStrategies. + + Returns: + Batch size per device. + + Raises: + ValueError: if batch_size is not divisible by number of devices + """ + if num_gpus <= 1: + return batch_size + + remainder = batch_size % num_gpus + if remainder: + err = ('When running with multiple GPUs, batch size ' + 'must be a multiple of the number of available GPUs. Found {} ' + 'GPUs with a batch size of {}; try --batch_size={} instead.' + ).format(num_gpus, batch_size, batch_size - remainder) + raise ValueError(err) + return int(batch_size / num_gpus) + + +# The `SyntheticDataset` is a temporary solution for generating synthetic data +# directly on devices. It is only useful for Keras with Distribution +# Strategies. We will have better support in `tf.data` or Distribution Strategy +# later. +class SyntheticDataset(object): + """A dataset that generates synthetic data on each device.""" + + def __init__(self, dataset, split_by=1): + # dataset.take(1) doesn't have GPU kernel. + with tf.device('device:CPU:0'): + tensor = tf.data.experimental.get_single_element(dataset.take(1)) + flat_tensor = tf.nest.flatten(tensor) + variable_data = [] + initializers = [] + for t in flat_tensor: + rebatched_t = tf.split(t, num_or_size_splits=split_by, axis=0)[0] + assert rebatched_t.shape.is_fully_defined(), rebatched_t.shape + v = tf.compat.v1.get_local_variable(self._random_name(), + initializer=rebatched_t) + variable_data.append(v) + initializers.append(v.initializer) + input_data = tf.nest.pack_sequence_as(tensor, variable_data) + self._iterator = SyntheticIterator(input_data, initializers) + + def _random_name(self, size=10, chars=string.ascii_uppercase + string.digits): + return ''.join(random.choice(chars) for _ in range(size)) + + def __iter__(self): + return self._iterator + + def make_one_shot_iterator(self): + return self._iterator + + def make_initializable_iterator(self): + return self._iterator + + +class SyntheticIterator(object): + """A dataset that generates synthetic data on each device.""" + + def __init__(self, input_data, initializers): + self._input_data = input_data + self._initializers = initializers + + def get_next(self): + return self._input_data + + def next(self): + return self.__next__() + + def __next__(self): + try: + return self.get_next() + except tf.errors.OutOfRangeError: + raise StopIteration + + def initialize(self): + if tf.executing_eagerly(): + return tf.no_op() + else: + return self._initializers + + +def _monkey_patch_dataset_method(strategy): + """Monkey-patch `strategy`'s `make_dataset_iterator` method.""" + def make_dataset(self, dataset): + tf.compat.v1.logging.info('Using pure synthetic data.') + with self.scope(): + if self.extended._global_batch_size: # pylint: disable=protected-access + return SyntheticDataset(dataset, self.num_replicas_in_sync) + else: + return SyntheticDataset(dataset) + + def make_iterator(self, dataset): + dist_dataset = make_dataset(self, dataset) + return iter(dist_dataset) + + strategy.orig_make_dataset_iterator = strategy.make_dataset_iterator + strategy.make_dataset_iterator = make_iterator + strategy.orig_distribute_dataset = strategy.experimental_distribute_dataset + strategy.experimental_distribute_dataset = make_dataset + + +def _undo_monkey_patch_dataset_method(strategy): + if hasattr(strategy, 'orig_make_dataset_iterator'): + strategy.make_dataset_iterator = strategy.orig_make_dataset_iterator + if hasattr(strategy, 'orig_distribute_dataset'): + strategy.make_dataset_iterator = strategy.orig_distribute_dataset + + +def set_up_synthetic_data(): + _monkey_patch_dataset_method(tf.distribute.OneDeviceStrategy) + _monkey_patch_dataset_method(tf.distribute.MirroredStrategy) + _monkey_patch_dataset_method( + tf.distribute.experimental.MultiWorkerMirroredStrategy) + + +def undo_set_up_synthetic_data(): + _undo_monkey_patch_dataset_method(tf.distribute.OneDeviceStrategy) + _undo_monkey_patch_dataset_method(tf.distribute.MirroredStrategy) + _undo_monkey_patch_dataset_method( + tf.distribute.experimental.MultiWorkerMirroredStrategy) + + +def configure_cluster(worker_hosts=None, task_index=-1): + """Set multi-worker cluster spec in TF_CONFIG environment variable. + + Args: + worker_hosts: comma-separated list of worker ip:port pairs. + + Returns: + Number of workers in the cluster. + """ + tf_config = json.loads(os.environ.get('TF_CONFIG', '{}')) + if tf_config: + num_workers = (len(tf_config['cluster'].get('chief', [])) + + len(tf_config['cluster'].get('worker', []))) + elif worker_hosts: + workers = worker_hosts.split(',') + num_workers = len(workers) + if num_workers > 1 and task_index < 0: + raise ValueError('Must specify task_index when number of workers > 1') + task_index = 0 if num_workers == 1 else task_index + os.environ['TF_CONFIG'] = json.dumps({ + 'cluster': { + 'worker': workers + }, + 'task': {'type': 'worker', 'index': task_index} + }) + else: + num_workers = 1 + return num_workers + + +def get_strategy_scope(strategy): + if strategy: + strategy_scope = strategy.scope() + else: + strategy_scope = DummyContextManager() + + return strategy_scope + + +class DummyContextManager(object): + + def __enter__(self): + pass + + def __exit__(self, *args): + pass diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/keras_utils.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/keras_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..afa7d00aee0b13238ecc8432f72ea50a9b8489fb --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/keras_utils.py @@ -0,0 +1,273 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helper functions for the Keras implementations of models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import multiprocessing +import os +import time + +from absl import logging +import tensorflow.compat.v2 as tf +from tensorflow.python import tf2 +from tensorflow.python.profiler import profiler_v2 as profiler + + +class BatchTimestamp(object): + """A structure to store batch time stamp.""" + + def __init__(self, batch_index, timestamp): + self.batch_index = batch_index + self.timestamp = timestamp + + def __repr__(self): + return "'BatchTimestamp'".format( + self.batch_index, self.timestamp) + + +class TimeHistory(tf.keras.callbacks.Callback): + """Callback for Keras models.""" + + def __init__(self, batch_size, log_steps, logdir=None, summary_writer=None, + batch_size_per_node=None): + """Callback for logging performance. + + Args: + batch_size: Total batch size. + log_steps: Interval of steps between logging of batch level stats. + logdir: Optional directory to write TensorBoard summaries. + """ + # TODO(wcromar): remove this parameter and rely on `logs` parameter of + # on_train_batch_end() + self.batch_size = batch_size + self.batch_size_per_node = batch_size_per_node + super(TimeHistory, self).__init__() + self.log_steps = log_steps + self.last_log_step = 0 + self.steps_before_epoch = 0 + self.steps_in_epoch = 0 + self.start_time = None + + if summary_writer is not None: + self.summary_writer = summary_writer + elif logdir: + self.summary_writer = tf.summary.create_file_writer(logdir) + else: + self.summary_writer = None + + # Logs start of step 1 then end of each step based on log_steps interval. + self.timestamp_log = [] + + # Records the time each epoch takes to run from start to finish of epoch. + self.epoch_runtime_log = [] + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + @property + def average_steps_per_second(self): + """The average training steps per second across all epochs.""" + return self.global_steps / sum(self.epoch_runtime_log) + + @property + def average_examples_per_second(self): + """The average number of training examples per second across all epochs.""" + return self.average_steps_per_second * self.batch_size + + def on_train_end(self, logs=None): + self.train_finish_time = time.time() + + if self.summary_writer: + self.summary_writer.flush() + + def on_epoch_begin(self, epoch, logs=None): + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + # Record the timestamp of the first global step + if not self.timestamp_log: + self.timestamp_log.append(BatchTimestamp(self.global_steps, + self.start_time)) + + def on_batch_end(self, batch, logs=None): + """Records elapse time of the batch and calculates examples per second.""" + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + global_examples_per_second = steps_per_second * self.batch_size + if self.batch_size_per_node is not None: + examples_per_second = steps_per_second * self.batch_size_per_node + + self.timestamp_log.append(BatchTimestamp(self.global_steps, now)) + logging.info( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d', elapsed_time, global_examples_per_second, self.last_log_step, + self.global_steps) + + if self.summary_writer: + with self.summary_writer.as_default(): + tf.summary.scalar('global_step/sec', steps_per_second, + self.global_steps) + tf.summary.scalar('global_examples/sec', global_examples_per_second, + self.global_steps) + if examples_per_second: + # for consistency + tf.summary.scalar('examples/sec', examples_per_second, + self.global_steps) + + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.epoch_runtime_log.append(epoch_run_time) + + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + + +def get_profiler_callback(model_dir, profile_steps, enable_tensorboard, + steps_per_epoch): + """Validate profile_steps flag value and return profiler callback.""" + profile_steps_error_message = ( + 'profile_steps must be a comma separated pair of positive integers, ' + 'specifying the first and last steps to be profiled.' + ) + try: + profile_steps = [int(i) for i in profile_steps.split(',')] + except ValueError: + raise ValueError(profile_steps_error_message) + if len(profile_steps) != 2: + raise ValueError(profile_steps_error_message) + start_step, stop_step = profile_steps + if start_step < 0 or start_step > stop_step: + raise ValueError(profile_steps_error_message) + if enable_tensorboard: + logging.warning( + 'Both TensorBoard and profiler callbacks are used. Note that the ' + 'TensorBoard callback profiles the 2nd step (unless otherwise ' + 'specified). Please make sure the steps profiled by the two callbacks ' + 'do not overlap.') + return ProfilerCallback(model_dir, start_step, stop_step, steps_per_epoch) + + +class ProfilerCallback(tf.keras.callbacks.Callback): + """Save profiles in specified step range to log directory.""" + + def __init__(self, log_dir, start_step, stop_step, steps_per_epoch): + super(ProfilerCallback, self).__init__() + self.log_dir = log_dir + self.start_step = start_step + self.stop_step = stop_step + self.start_epoch = start_step // steps_per_epoch + self.stop_epoch = stop_step // steps_per_epoch + self.start_step_in_epoch = start_step % steps_per_epoch + self.stop_step_in_epoch = stop_step % steps_per_epoch + self.should_start = False + self.should_stop = False + + def on_epoch_begin(self, epoch, logs=None): + if epoch == self.start_epoch: + self.should_start = True + if epoch == self.stop_epoch: + self.should_stop = True + + def on_batch_begin(self, batch, logs=None): + if batch == self.start_step_in_epoch and self.should_start: + self.should_start = False + profiler.start(self.log_dir) + logging.info('Profiler started at Step %s', self.start_step) + + def on_batch_end(self, batch, logs=None): + if batch == self.stop_step_in_epoch and self.should_stop: + self.should_stop = False + profiler.stop() + logging.info('Profiler saved profiles for steps between %s and %s to %s', + self.start_step, self.stop_step, self.log_dir) + + +def set_session_config(enable_eager=False, + enable_xla=False): + """Sets the session config.""" + if is_v2_0(): + set_config_v2(enable_xla=enable_xla) + else: + config = get_config_proto_v1(enable_xla=enable_xla) + if enable_eager: + tf.compat.v1.enable_eager_execution(config=config) + else: + sess = tf.compat.v1.Session(config=config) + tf.compat.v1.keras.backend.set_session(sess) + + +def get_config_proto_v1(enable_xla=False): + """Return config proto according to flag settings, or None to use default.""" + config = None + if enable_xla: + config = tf.compat.v1.ConfigProto() + config.graph_options.optimizer_options.global_jit_level = ( + tf.OptimizerOptions.ON_2) + return config + + +def set_config_v2(enable_xla=False): + """Config eager context according to flag values using TF 2.0 API.""" + if enable_xla: + tf.config.optimizer.set_jit(True) + + +def is_v2_0(): + """Returns true if using tf 2.0.""" + return tf2.enabled() + + +def set_gpu_thread_mode_and_count(gpu_thread_mode, + datasets_num_private_threads, + num_gpus, per_gpu_thread_count): + """Set GPU thread mode and count, and adjust dataset threads count.""" + cpu_count = multiprocessing.cpu_count() + logging.info('Logical CPU cores: %s', cpu_count) + + # Allocate private thread pool for each GPU to schedule and launch kernels + per_gpu_thread_count = per_gpu_thread_count or 2 + os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode + os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count) + logging.info('TF_GPU_THREAD_COUNT: %s', + os.environ['TF_GPU_THREAD_COUNT']) + logging.info('TF_GPU_THREAD_MODE: %s', + os.environ['TF_GPU_THREAD_MODE']) + + # Limit data preprocessing threadpool to CPU cores minus number of total GPU + # private threads and memory copy threads. + total_gpu_thread_count = per_gpu_thread_count * num_gpus + num_runtime_threads = num_gpus + if not datasets_num_private_threads: + datasets_num_private_threads = min( + cpu_count - total_gpu_thread_count - num_runtime_threads, + num_gpus * 8) + logging.info('Set datasets_num_private_threads to %s', + datasets_num_private_threads) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/model_helpers.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/model_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..c112bacd4200aa7ec555933be6bedd33ed94df75 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/model_helpers.py @@ -0,0 +1,93 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Miscellaneous functions that can be called by models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numbers + +import tensorflow as tf +from tensorflow.python.util import nest + + +def past_stop_threshold(stop_threshold, eval_metric): + """Return a boolean representing whether a model should be stopped. + + Args: + stop_threshold: float, the threshold above which a model should stop + training. + eval_metric: float, the current value of the relevant metric to check. + + Returns: + True if training should stop, False otherwise. + + Raises: + ValueError: if either stop_threshold or eval_metric is not a number + """ + if stop_threshold is None: + return False + + if not isinstance(stop_threshold, numbers.Number): + raise ValueError("Threshold for checking stop conditions must be a number.") + if not isinstance(eval_metric, numbers.Number): + raise ValueError("Eval metric being checked against stop conditions " + "must be a number.") + + if eval_metric >= stop_threshold: + tf.compat.v1.logging.info( + "Stop threshold of {} was passed with metric value {}.".format( + stop_threshold, eval_metric)) + return True + + return False + + +def generate_synthetic_data( + input_shape, input_value=0, input_dtype=None, label_shape=None, + label_value=0, label_dtype=None): + """Create a repeating dataset with constant values. + + Args: + input_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of + the input data. + input_value: Value of each input element. + input_dtype: Input dtype. If None, will be inferred by the input value. + label_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of + the label data. + label_value: Value of each input element. + label_dtype: Input dtype. If None, will be inferred by the target value. + + Returns: + Dataset of tensors or tuples of tensors (if label_shape is set). + """ + # TODO(kathywu): Replace with SyntheticDataset once it is in contrib. + element = input_element = nest.map_structure( + lambda s: tf.constant(input_value, input_dtype, s), input_shape) + + if label_shape: + label_element = nest.map_structure( + lambda s: tf.constant(label_value, label_dtype, s), label_shape) + element = (input_element, label_element) + + return tf.data.Dataset.from_tensors(element).repeat() + + +def apply_clean(flags_obj): + if flags_obj.clean and tf.io.gfile.exists(flags_obj.model_dir): + tf.compat.v1.logging.info("--clean flag set. Removing existing model dir:" + " {}".format(flags_obj.model_dir)) + tf.io.gfile.rmtree(flags_obj.model_dir) diff --git a/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/tpu_lib.py b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/tpu_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..4d4cddb1c6b015091ed2da57df49277e3008c252 --- /dev/null +++ b/docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/misc/tpu_lib.py @@ -0,0 +1,34 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Initializes TPU system for TF 2.0.""" + +import tensorflow as tf + + +def tpu_initialize(tpu_address): + """Initializes TPU for TF 2.0 training. + + Args: + tpu_address: string, bns address of master TPU worker. + + Returns: + A TPUClusterResolver. + """ + cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( + tpu=tpu_address) + if tpu_address not in ('', 'local'): + tf.config.experimental_connect_to_cluster(cluster_resolver) + tf.tpu.experimental.initialize_tpu_system(cluster_resolver) + return cluster_resolver