applied-ai-018 commited on
Commit
14a7d24
·
verified ·
1 Parent(s): eb49b41

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. docker/bloom13b/Model-References/.github/PULL_REQUEST_TEMPLATE.md +31 -0
  2. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/README.md +725 -0
  3. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/LICENSE +30 -0
  4. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/mlp_log.py +60 -0
  5. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/mlperf_variable_map.json +163 -0
  6. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/optimizer.py +59 -0
  7. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/resnet.py +369 -0
  8. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/ops_bf16_Resnet.txt +11 -0
  9. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/ops_fp32_Resnet.txt +5 -0
  10. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/requirements.txt +4 -0
  11. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/train.py +815 -0
  12. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/utils.py +273 -0
  13. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/batch_256.cfg +20 -0
  14. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/launch_resnet.sh +203 -0
  15. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/mlp_log.py +57 -0
  16. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/requirements.txt +7 -0
  17. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/resnet_model.py +323 -0
  18. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/resnet_runnable.py +545 -0
  19. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/__init__.py +0 -0
  20. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/common/imagenet_preprocessing.py +680 -0
  21. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/__init__.py +0 -0
  22. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_base.py +168 -0
  23. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_device.py +85 -0
  24. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_performance.py +289 -0
  25. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/core.py +133 -0
  26. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/cloud_lib.py +34 -0
  27. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/hooks.py +130 -0
  28. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/hooks_helper.py +172 -0
  29. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/logger.py +423 -0
  30. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/scripts/launch_keras_resnet_hvd.sh +618 -0
  31. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/scripts/run.sh +168 -0
  32. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/scripts/unpack_imagenet.sh +164 -0
  33. docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N16-PT.json +35 -0
  34. docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N2-PT.json +35 -0
  35. docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N32-PT.json +35 -0
  36. docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N4-PT.json +35 -0
  37. docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N48-PT.json +35 -0
  38. docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N8-PT.json +35 -0
  39. docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-PT.json +35 -0
  40. docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-TF.json +35 -0
  41. docker/bloom13b/Model-References/TensorFlow/common/debug.py +134 -0
  42. docker/bloom13b/Model-References/TensorFlow/common/tb_utils.py +474 -0
  43. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/pack_sequences_ops.cc +168 -0
  44. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/pack_sequences_ops_test.py +78 -0
  45. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder.cc +127 -0
  46. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder.h +44 -0
  47. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder_ops.cc +66 -0
  48. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder_ops_test.py +40 -0
  49. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder_test.cc +44 -0
  50. docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/testdata/subwords +31 -0
docker/bloom13b/Model-References/.github/PULL_REQUEST_TEMPLATE.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Description
2
+
3
+ > :memo: Please include a summary of the changes.
4
+ >
5
+ > * List any dependencies that are required for the changes.
6
+
7
+ ## Type of changes
8
+
9
+ Please specify the type of changes, and delete the options that are not relevant.
10
+
11
+ - [ ] Documentation update
12
+ - [ ] Bug fix (changes which fix an issue)
13
+ - [ ] Others (please specify)
14
+
15
+ ## Tests
16
+
17
+ > :memo: Please describe the tests that you ran to verify your changes.
18
+ >
19
+ > * Provide the instructions so that we can reproduce.
20
+ > * Please also list any relevant details for your test configuration.
21
+
22
+ ## Checklist
23
+
24
+ - [ ] I agree with the [Developer Certificate of Origin](https://developercertificate.org/).
25
+ - [ ] My code conforms to the following coding guidelines:
26
+ - [ ] Use Python 3
27
+ - [ ] Python code follows [PEP 8 Coding Styles](https://www.python.org/dev/peps/pep-0008/)
28
+ - [ ] For TensorFlow models, use TensorFlow 2 high-level APIs
29
+ - [ ] I have performed a self code review.
30
+ - [ ] I have made corresponding changes to the documentation.
31
+ - [ ] I have added tests that prove my fix is effective or that my feature works.
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/README.md ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Running Habana MLPerf™ Benchmarks
2
+
3
+ This directory provides instructions to reproduce Habana's results for [MLPerf Training v3.1](https://habana.ai/since-habanas-last-mlperf-submission/) **on 1 to 48 servers configurations with 8 Gaudi2 cards each.**
4
+
5
+ For more information on training deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/)
6
+
7
+ MLPerf™ is a trademark and service mark of MLCommons Association in the United States and other countries. All rights reserved. Unauthorized use is strictly prohibited.
8
+
9
+ - [Running Habana MLPerf™ Benchmarks](#running-habana-mlperf-benchmarks)
10
+ - [Setup](#setup)
11
+ - [Prepare MLPerf Directory](#prepare-mlperf-directory)
12
+ - [Build and Deploy HabanaLabs MLPerf Training 3.1 Container](#build-and-deploy-habanalabs-mlperf-training-31-container)
13
+ - [Training Data for TensorFlow BERT](#training-data-for-tensorflow-bert)
14
+ - [Training Data for PyTorch BERT](#training-data-for-pytorch-bert)
15
+ - [Training Data for ResNet50](#training-data-for-resnet50)
16
+ - [Training Data for GPT3-175B](#training-data-for-gpt3-175b)
17
+ - [Dataset Preparation for PyTorch Stable Diffusion](#dataset-preparation-for-pytorch-stable-diffusion)
18
+ - [Training BERT](#training-bert)
19
+ - [Training ResNet50](#training-resnet50)
20
+ - [Training GPT3-175B](#training-gpt3-175b)
21
+ - [Training PyTorch Stable Diffusion](#training-pytorch-stable-diffusion)
22
+ - [Supported Configurations](#supported-configurations)
23
+ - [Changelog](#changelog)
24
+
25
+ ## Setup
26
+
27
+ ### Prepare MLPerf Directory
28
+
29
+ On each compute node, perform the following:
30
+
31
+ 1. Follow the instructions provided in the [Gaudi Installation
32
+ Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) to set up the
33
+ environment including the `$PYTHON` environment variable.
34
+ The guide will walk you through the process of setting up your system to run the benchmarks on Gaudi.
35
+
36
+ 2. Create directories for scratch and dataset folders:
37
+ ```
38
+ export MLPERF_ROOT=/path/to/mlperf/root
39
+ export SCRATCH_DIR=$MLPERF_ROOT/scratch
40
+ export DATASETS_DIR=$MLPERF_ROOT/datasets
41
+ mkdir -p $SCRATCH_DIR
42
+ mkdir -p $DATASETS_DIR
43
+ ```
44
+
45
+ **Note:** If training is to be conducted on multiple nodes, it is essential to place the $DATASETS_DIR on a shared filesystem that is accessible by all the nodes. This allows for dataset preparation to be performed only once in the `Training Data for <configuration>` sections, enabling all nodes to access the prepared dataset during training.
46
+
47
+ 3. Clone Model-References repository and switch to the branch that matches your SynapseAI version. You can run the
48
+ [`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options)
49
+ utility to determine the SynapseAI version.
50
+
51
+ ```bash
52
+ cd $MLPERF_ROOT
53
+ git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References
54
+ export MLPERF_DIR=$MLPERF_ROOT/Model-References/MLPERF3.1/Training
55
+ ```
56
+
57
+ ### Build and Deploy HabanaLabs MLPerf Training 3.1 Container
58
+
59
+ To build MLPerf training 3.1 container, perform the following:
60
+
61
+ 1. Copy ssh keys to enable passwordless ssh to /root/.ssh/
62
+ 2. Set the environment variables for the docker command.
63
+ * To find a docker image, go to [gaudi-docker](https://vault.habana.ai/ui/repos/tree/General/gaudi-docker).
64
+ * Open gaudi-docker directory, and select the folder that matches the SynapseAI version (determined by running [`hl-smi`](https://docs.habana.ai/en/latest/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options)).
65
+ * Navigate to subdirectories, choose system and framework version.
66
+ * Choose the docker build version. Most often 'latest' will be used.
67
+ * Navigate to "Docker Info" tab and note "Title" string.
68
+ * Set `DOCKER_IMAGE` to "Title" string with `vault.habana.ai/gaudi-docker/` prefix. See the examples below.
69
+ * Example on TensorFlow Container:
70
+ ```bash
71
+ # NOTE: The below is only an example value. Replace [SynapseAI version] and [TF version] to match your setup and Supported Configuration.
72
+ export DOCKER_IMAGE=vault.habana.ai/gaudi-docker/[SynapseAI version]/ubuntu20.04/habanalabs/tensorflow-installer-tf-cpu-[TF version]:latest
73
+ export CONTAINER_NAME=mlperf3_1
74
+ ```
75
+ * Example on PyTorch Container:
76
+ ```bash
77
+ # NOTE: The below is only an example value. Replace [SynapseAI version] and [PT version] to match your setup and Supported Configuration.
78
+ export DOCKER_IMAGE=vault.habana.ai/gaudi-docker/[SynapseAI version]/ubuntu20.04/habanalabs/pytorch-installer-[PT Version]:latest
79
+ export CONTAINER_NAME=mlperf3_1
80
+ ```
81
+
82
+ 3. Create `mlperf3.1` container by running the following command.
83
+
84
+ ```bash
85
+ docker run --privileged --security-opt seccomp=unconfined \
86
+ --name $CONTAINER_NAME -td \
87
+ -v /dev:/dev \
88
+ --device=/dev:/dev \
89
+ -e LOG_LEVEL_ALL=6 \
90
+ -v /sys/kernel/debug:/sys/kernel/debug \
91
+ -v /tmp:/tmp \
92
+ -v $MLPERF_DIR:/root/MLPERF \
93
+ -v $DATASETS_DIR:/root/datasets \
94
+ -v $SCRATCH_DIR:/root/scratch \
95
+ --cap-add=sys_nice --cap-add=SYS_PTRACE \
96
+ --user root --workdir=/root --net=host \
97
+ --ulimit memlock=-1:-1 ${DOCKER_IMAGE}
98
+ ```
99
+
100
+ 4. Start the docker.
101
+
102
+ ```bash
103
+ docker exec $CONTAINER_NAME bash -c "service ssh start"
104
+ docker exec -it $CONTAINER_NAME bash
105
+ ```
106
+
107
+ **Note:** The following two steps are only necessary for training on multiple nodes.
108
+
109
+ 5. In the docker, create `/root/shared/hosts` file that contains a list of all host IPs in the cluster. Add one IP per line. Below is an example for 4 nodes (32 devices).
110
+ ```
111
+ mkdir /root/shared
112
+ echo '10.10.100.101' > /root/shared/hosts
113
+ echo '10.10.100.102' >> /root/shared/hosts
114
+ echo '10.10.100.103' >> /root/shared/hosts
115
+ echo '10.10.100.104' >> /root/shared/hosts
116
+ ```
117
+
118
+ 6. SSH is used to spawn local and remote processes. In order to allow communication between machines it is required to provide a passwordless _ssh_ communication and set default port for connection. It has to be done on all of the machines:
119
+ ```
120
+ mkdir .ssh
121
+ printf 'Host *\n StrictHostKeyChecking no\nPort 3022' >> .ssh/config
122
+ ```
123
+ It also may be necessary to setup SSH keys and add them to `~/.ssh/authorized_keys`.
124
+
125
+ ### Training Data for TensorFlow BERT
126
+
127
+ 1. Log into mlperf3.1 TensorFlow container and install the requirements:
128
+ <!-- DATASET download_mlperf_bert_tensorflow -->
129
+ <!-- DATASET process_mlperf_bert_tensorflow -->
130
+ ```bash
131
+ export BERT_PATH=/root/MLPERF/benchmarks/bert/implementations/TensorFlow/nlp/bert
132
+ cd $BERT_PATH
133
+ pip install -r requirements.txt
134
+ ```
135
+ <!-- /DATASET process_mlperf_bert_tensorflow -->
136
+ <!-- /DATASET download_mlperf_bert_tensorflow -->
137
+
138
+ 2. Download the required files from Google drives.
139
+ <!-- DATASET download_mlperf_bert_tensorflow -->
140
+ ```bash
141
+ export TENSORFLOW_BERT_DATA=/root/datasets/tensorflow_bert
142
+ bash pretraining/prepare_dataset.sh \
143
+ --data-path $TENSORFLOW_BERT_DATA \
144
+ --only-download
145
+ ```
146
+ <!-- /DATASET download_mlperf_bert_tensorflow -->
147
+
148
+ After completing this step, there should be a `$TENSORFLOW_BERT_DATA/input` folder containing the following files:
149
+ ```
150
+ bert_config.json
151
+ model.ckpt-28252.data-00000-of-00001
152
+ model.ckpt-28252.index
153
+ model.ckpt-28252.meta
154
+ results_text.tar.gz
155
+ vocab.txt
156
+ ```
157
+
158
+ 3. Prepare the packed dataset by running the command below:
159
+ <!-- DATASET process_mlperf_bert_tensorflow -->
160
+ ```bash
161
+ bash pretraining/prepare_dataset.sh \
162
+ --scripts-path $BERT_PATH \
163
+ --data-path $TENSORFLOW_BERT_DATA \
164
+ --only-preprocessing \
165
+ --jobs-limit 25
166
+ ```
167
+ <!-- /DATASET process_mlperf_bert_tensorflow -->
168
+
169
+ This step will take multiple hours to complete.
170
+ The exact time depends on the machine setup and the speed of storage that contains the dataset.
171
+ The `--jobs-limit` option limits the number of pararell processes for converting and packing tfrecords.
172
+ This step is resource consuming,
173
+ and the machine running it must have a minimum of 32 CPUs and 755GB of RAM to ensure proper functioning.
174
+
175
+ 4. `$TENSORFLOW_BERT_DATA` should now contain following folders:
176
+ ```
177
+ checkpoint
178
+ eval_dataset
179
+ input
180
+ packed_data_500
181
+ unpacked_data
182
+ ```
183
+
184
+ `input` folder can be removed if the preprocessing has been successfully completed.
185
+ By default, TensorFlow BERT uses only packed data for training,
186
+ as described in the scenario mentioned described [here](#training-for-tensorflow-bert).
187
+ In such cases, the `unpacked_data` is unnecessary and can be deleted.
188
+
189
+ ### Training Data for PyTorch BERT
190
+
191
+ #### Dataset Preparation
192
+
193
+ Log into mlperf3.1 PyTorch container and run:
194
+ <!-- DATASET download_mlperf_bert_pytorch -->
195
+ <!-- DATASET process_mlperf_bert_pytorch -->
196
+ ```bash
197
+ cd /root/MLPERF/benchmarks/bert/implementations/PyTorch
198
+ pip install -r requirements.txt
199
+ export PYTORCH_BERT_DATA=/root/datasets/pytorch_bert
200
+ ```
201
+ <!-- /DATASET process_mlperf_bert_pytorch -->
202
+ ```bash
203
+ bash input_preprocessing/prepare_data.sh -o $PYTORCH_BERT_DATA
204
+ ```
205
+ <!-- /DATASET download_mlperf_bert_pytorch -->
206
+
207
+ At this stage, ```$PYTORCH_BERT_DATA/phase1``` checkpoint and ```$PYTORCH_BERT_DATA/hdf5/eval_varlength``` evaluation data are ready, while ```$PYTORCH_BERT_DATA/hdf5/training_4320/hdf5_4320_shards_uncompressed``` training data requires packing as described in the following section.
208
+
209
+ #### Training Data Packing
210
+
211
+ Once the training data is ready, pack it using a similar code as described in [GraphCore for v1.0 Submission](https://github.com/mlcommons/training_results_v1.0/tree/master/Graphcore/benchmarks/bert/implementations/popart/bert_data).
212
+
213
+ <!-- DATASET process_mlperf_bert_pytorch -->
214
+ ```bash
215
+ mkdir $PYTORCH_BERT_DATA/packed
216
+ python3 pack_pretraining_data_pytorch.py \
217
+ --input_dir=$PYTORCH_BERT_DATA/hdf5/training-4320/hdf5_4320_shards_uncompressed \
218
+ --output_dir=$PYTORCH_BERT_DATA/packed \
219
+ --max_predictions_per_seq=76
220
+ ```
221
+ <!-- /DATASET process_mlperf_bert_pytorch -->
222
+
223
+ For further details, refer to [Packing: Towards 2x NLP BERT Acceleration](https://arxiv.org/abs/2107.02027).
224
+
225
+ ### Training Data for ResNet50
226
+
227
+ The instructions for the ImageNet dataset is applicable for both PyTorch and TensorFlow ResNet50.
228
+
229
+ 1. Sign up with [image-net.org](http://image-net.org/download-images) and acquire the rights to download original images.
230
+ 2. Follow the link to the 2012 ILSVRC and download ILSVRC2012_img_val.tar and ILSVRC2012_img_train.tar.
231
+ Place the files in the folder that will be mapped in mlperf3.1 container (for example, `$DATASETS_DIR`).
232
+ 3. Run the script below in mlperf3.1 container (PyTorch or TensorFlow) to unpack the dataset:
233
+
234
+ ```
235
+ bash /root/MLPERF/benchmarks/resnet/scripts/unpack_imagenet.sh \
236
+ --train-archive /path/to/ILSVRC2012_img_train.tar \
237
+ --validation-archive /path/to/ILSVRC2012_img_val.tar \
238
+ --output-path /root/datasets/imagenet \
239
+ --jobs-number 16
240
+ ```
241
+
242
+ The script unpacks training and validation packages in parallel.
243
+ In addition, when upacking subarchives from ILSVRC2012_img_train.tar,
244
+ `--jobs-number` defines number of pararell processes allocated for the task.
245
+ Scripts runtime is dependent in large part on the data access speed of the storage where $DATASETS_DIR is located.
246
+
247
+ ### Training Data for GPT3-175B
248
+
249
+ #### Dataset Preparation for GPT3-175B
250
+
251
+ Dataset preparation should be done in the following docker:
252
+
253
+ ```
254
+ docker run --ipc=host -it -v $DATASETS_DIR:/root/datasets -v $MLPERF_DIR:/root/MLPERF nvcr.io/nvidia/pytorch:22.11-py3 bash
255
+ ```
256
+
257
+ MLPerf GPT3 is trained using C4/en/3.0.1 dataset. It can be downloaded from https://huggingface.co/datasets/allenai/c4. Instruction is clear on how to select precisely the files for downloading.
258
+
259
+ ```
260
+ apt-get update
261
+ apt-get install git-lfs
262
+ mkdir -p /root/datasets/gpt3
263
+ cd /root/datasets/gpt3
264
+ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/datasets/allenai/c4
265
+ cd c4
266
+ git lfs pull --include "en/*"
267
+ ```
268
+
269
+ Out of all the files, only 256 will be required for training, and 8 for validation.
270
+ You can merge them into three .json.gz files using the following commands, which are taken from https://github.com/mlcommons/training/blob/master/large_language_model/megatron-lm/README.md.
271
+
272
+ ```
273
+ # create softlinks to store each shard before merging
274
+ mkdir -p softlinks
275
+ for shard in {6..7}; do
276
+ start=$((shard * 128))
277
+ end=$((shard * 128 + 127))
278
+ mkdir -p softlinks/en_$shard
279
+ for ind in $(seq -f "%05g" $start $end); do
280
+ ln -s ../../en/c4-train.${ind}-of-01024.json.gz softlinks/en_${shard}/c4-train.${ind}-of-01024.json.gz
281
+ done
282
+ done
283
+
284
+ # merge
285
+ mkdir -p en_merge
286
+ for shard in {6..7}; do
287
+ cat softlinks/en_${shard}/*gz > en_merge/c4-train.en_${shard}.json.gz
288
+ done
289
+ cat en/c4-validation.0000* > en_merge/c4-validation.json.gz
290
+ ```
291
+
292
+ To tokenize the prepared files, you need to download the tokenizer model, vocab_c4_en_301_5Mexp2_spm.model, and the vocabulary file, vocab_c4_en_301_5Mexp2_spm.vocab, from the following location:
293
+ https://console.cloud.google.com/storage/browser/mlperf-llm-public2;tab=objects?prefix=&forceOnObjectsSortingFiltering=false.
294
+ Please note that registration is required to access these files. Tokenization can be performed using the following commands.
295
+ Please be aware that this conversion process may take several hours.
296
+
297
+ ```
298
+ git clone https://github.com/NVIDIA/NeMo.git
299
+ cd NeMo && git checkout f3ad584b94170bc3ea197df29eb9ef9c96061730 && bash ./reinstall.sh && cd ..
300
+
301
+ mkdir -p preprocessed_c4_spm
302
+ for shard in {6..7}; do
303
+ python3 NeMo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py \
304
+ --input en_merge/c4-train.en_${shard}.json.gz \
305
+ --tokenizer-library sentencepiece \
306
+ --tokenizer-model vocab_c4_en_301_5Mexp2_spm.model \
307
+ --output-prefix preprocessed_c4_spm/c4_en_${shard}_c4_spm \
308
+ --dataset-impl mmap \
309
+ --workers 128
310
+ done
311
+
312
+ python3 NeMo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py \
313
+ --input en_merge/c4-validation.json.gz \
314
+ --tokenizer-library sentencepiece \
315
+ --tokenizer-model vocab_c4_en_301_5Mexp2_spm.model \
316
+ --output-prefix preprocessed_c4_spm/c4_en_validation_c4_spm \
317
+ --dataset-impl mmap \
318
+ --workers 128
319
+ ```
320
+
321
+ The resulting files to be used during training are as follows:
322
+ * ```preprocessed_c4_spm/c4_en_6_c4_spm_text_document.bin```
323
+ * ```preprocessed_c4_spm/c4_en_6_c4_spm_text_document.idx```
324
+ * ```preprocessed_c4_spm/c4_en_7_c4_spm_text_document.bin```
325
+ * ```preprocessed_c4_spm/c4_en_7_c4_spm_text_document.idx```
326
+ * ```preprocessed_c4_spm/c4_en_validation_c4_spm_text_document.bin```
327
+ * ```preprocessed_c4_spm/c4_en_validation_c4_spm_text_document.idx```
328
+
329
+ In addition to the dataset, GPT3 implementation requires https://huggingface.co/gpt2/resolve/main/vocab.json and https://huggingface.co/gpt2/resolve/main/merges.txt files:
330
+
331
+ ```
332
+ wget "https://huggingface.co/gpt2/resolve/main/vocab.json" -P preprocessed_c4_spm
333
+ wget "https://huggingface.co/gpt2/resolve/main/merges.txt" -P preprocessed_c4_spm
334
+ ```
335
+
336
+ In order to exclude graph compilation time from Time To Train, you need to prepare a synthetic dataset for device warmup:
337
+ ```
338
+ python3 /root/MLPERF/benchmarks/gpt3/tools/create_synthetic_dataset.py \
339
+ --valid_files_path preprocessed_c4_spm/c4_en_validation_c4_spm_text_document \
340
+ --output_path preprocessed_c4_spm/
341
+ ```
342
+
343
+ The commandline above will create synthetic files:
344
+ * ```preprocessed_c4_spm/synthetic_text_document.bin```
345
+ * ```preprocessed_c4_spm/synthetic_text_document.idx```
346
+
347
+ #### Checkpoint Preparation for GPT3-175B
348
+
349
+ Log into mlperf3.1 PyTorch container. Install DeepSpeed and other requirements:
350
+ ```
351
+ pip install git+https://github.com/HabanaAI/DeepSpeed.git
352
+ pip install -r /root/MLPERF/benchmarks/gpt3/requirements.txt
353
+ ```
354
+
355
+ The checkpoint for MLPerf GPT3 in the paxml format can be downloaded from
356
+ [gs://mlperf-llm-public2/gpt3_spmd1x64x24_tpuv4-3072_v84_20221101/checkpoints/checkpoint_00004000](gs://mlperf-llm-public2/gpt3_spmd1x64x24_tpuv4-3072_v84_20221101/checkpoints/checkpoint_00004000).
357
+ The common_bf16.json can be downloaded from: https://github.com/ShriyaPalsamudram/training/tree/LLM-NVIDIA-reference-draft/large_language_model/megatron-lm/scripts.
358
+ At one stage, there will be a merged directory and a universal directory, each requiring 2 TB of disk space for 96L. Therefore, to complete all the steps, it is necessary to have over 4TB of free disk space.
359
+ Additionally, the machine must have a minimum of 32 CPUs and 755GB of RAM to ensure proper functioning.
360
+ Before the checkpoint can be used, it must be converted by following the steps below:
361
+
362
+ 1. Convert the paxml checkpoint to Megatron distributed using /root/MLPERF/benchmarks/gpt3/tools/convert_checkpoint/convert_paxml_optimizer.py
363
+
364
+ ```
365
+ python3 /root/MLPERF/benchmarks/gpt3/tools/convert_checkpoint/convert_paxml_optimizer.py \
366
+ --google_ckpts checkpoint_00004000/ \
367
+ --output_dir megatron_merged_ckpt \
368
+ --num_layers 96 \
369
+ --params_file common_bf16.json \
370
+ --pool 1
371
+ ```
372
+
373
+ 2. Convert Megatron merged checkpoint to DeepSpeed universal.
374
+
375
+ To generate the mp-rank-files required in megatron_optim_merged_to_ds_universal_convert.py, the user needs to run GPT-3, which will generate these files based on the configuration used in the run.
376
+ This can be obtained by running a single step of GPT-3 and saving the checkpoint.
377
+ Please note that only this particular step of checkpoint peparation must be done using 8 HLS2 machines. The remaining steps can be performed on a CPU-only machine.
378
+ Please make sure /root/shared/hosts file contains a list of 8 IPs for HLS2 machines and SSH communication is properly configured.
379
+ For further details, refer to points 5 and 6 [here](#build-and-deploy-habanalabs-mlperf-training-31-container).
380
+ Once the setup is ready, proceed to run the single step for GPT3 as follows:
381
+ ```
382
+ mkdir checkpoint_with_mp_rank_files
383
+ bash /root/MLPERF/benchmarks/gpt3/run_gpt.sh --hosts /root/shared/hosts --data-dir /root/datasets/ --output-dir /root/scratch --num-nodes 8 --data-parallel-size 1 --start-from-ckpt false --save-checkpoints-dir checkpoint_with_mp_rank_files --exit-interval 1 --global-batch-size 2048
384
+ ```
385
+
386
+ Run megatron_optim_merged_to_ds_universal_convert.py to create the universal checkpoint:
387
+
388
+ ```
389
+ mkdir -p /root/datasets/gpt3/universal-checkpoint
390
+ python3 /root/MLPERF/benchmarks/gpt3/tools/convert_checkpoint/megatron_optim_merged_to_ds_universal_convert.py \
391
+ --o /root/datasets/gpt3/universal-checkpoint/ --ds-mp-rank-files-dir checkpoint_with_mp_rank_files --megatron-lm-merged-input-dir megatron_merged_ckpt \
392
+ --tp 8 --pp 8 --nl 96 --iteration 3000 --global-batch-size 2048 --seq_length 2048 --lr-decay-samples 166809600 --lr-warmup-samples 407040 \
393
+ --pool 64 --model-parallel-same-config False --update-only-mp-rank-files False
394
+ ```
395
+
396
+ ### Dataset Preparation for PyTorch Stable Diffusion
397
+
398
+ The instruction for preparing the dataset is based on original MLCommons instruction.
399
+ Please follow instructions under the following link for more details:
400
+ https://github.com/mlcommons/training/tree/master/stable_diffusion
401
+
402
+ #### Generate training dataset (Preprocessed moments dataset):
403
+
404
+ Log into mlperf3.1 PyTorch container
405
+ ```
406
+ export DATASET_PATH=/root/datasets/stable_diffusion/datasets/laion-400m/webdataset-moments-filtered
407
+
408
+ bash /root/MLPERF/benchmarks/stable_diffusion/scripts/datasets/laion400m-filtered-download-moments.sh --output-dir $DATASET_PATH
409
+ ```
410
+
411
+ #### Generate validation dataset
412
+
413
+ Log into mlperf3.1 PyTorch container
414
+ ```
415
+ export DATASET_DIR=/root/datasets/stable_diffusion/datasets/coco2014
416
+
417
+ bash /root/MLPERF/benchmarks/stable_diffusion/scripts/datasets/coco2014-validation-download-prompts.sh --output-dir $DATASET_DIR
418
+ ```
419
+ ##### Set ANNOTATION_FILE to the downloaded annotation file
420
+
421
+ ```
422
+ export ANNOTATION_FILE=$DATASET_DIR/val2014_30k.tsv
423
+ ```
424
+
425
+ ```
426
+ bash /root/MLPERF/benchmarks/stable_diffusion/scripts/datasets/coco2014-validation-download-stats.sh --output-dir $DATASET_DIR
427
+ ```
428
+
429
+ ##### Set FID_GT_PATH to the downloaded npz file used for inception
430
+ ```
431
+ export FID_GT_PATH=$DATASET_DIR/val2014_30k_stats.npz
432
+ ```
433
+
434
+ #### Download the base checkpoint for Stable Diffusion: UNet, VAE, and OpenCLIP text embedder:
435
+ Reference: https://github.com/mlcommons/training/tree/master/stable_diffusion#downloading-the-checkpoints
436
+
437
+ Log into mlperf3.1 PyTorch container
438
+ ```
439
+ export DATASET_DIR=/root/datasets/stable_diffusion/datasets/checkpoints/sd
440
+
441
+ bash /root/MLPERF/benchmarks/stable_diffusion/scripts/checkpoints/download_sd.sh --output-dir $DATASET_DIR
442
+
443
+ export BASE_CKPT=$DATASET_DIR/512-base-ema.ckpt
444
+ ```
445
+
446
+
447
+ #### Generate the synthetic dataset for Stable Diffusion (warmup)
448
+
449
+ Uncompress any one data tar file from training data "example: $DATASET_PATH/00001.tar" and keep it in the input directory path. Set environment variables for input and output path and run the below script to generate the synthetic data at the output directory
450
+
451
+ Log into mlperf3.1 PyTorch container
452
+ ```
453
+ cp /root/datasets/stable_diffusion/datasets/laion-400m/webdataset-moments-filtered/00001.tar /root/datasets/stable_diffusion/datasets/input_uncompressed_file/
454
+
455
+ cd /root/datasets/stable_diffusion/datasets/input_uncompressed_file/
456
+
457
+ tar -xvf 00001.tar; cd -;
458
+
459
+ export DATASET_PATH_UNCOMPRESSED=/root/datasets/stable_diffusion/datasets/input_uncompressed_file
460
+ export DATASET_PATH_OUTPUT=/root/datasets/stable_diffusion/datasets/
461
+
462
+ cd /root/MLPERF/benchmarks/stable_diffusion/scripts;
463
+ bash prepare_synthetic_data.sh; cd -;
464
+ ```
465
+
466
+ After synthetic data preparation, copy generated SD_synthetic_data_10001.tar file to the path used via WARMUP_FILE in training
467
+
468
+ ```
469
+ export WARMUP_FILE=$DATASET_PATH_OUTPUT//SD_synthetic_data_10001.tar
470
+ ```
471
+
472
+ ## Training BERT
473
+
474
+ ### Training TensorFlow BERT
475
+
476
+ 1. Inside the mlperf3.1 TensorFlow container, install BERT requirements.
477
+ ```bash
478
+ export BERT_IMPLEMENTATIONS=/root/MLPERF/benchmarks/bert/implementations
479
+ pip install -r $BERT_IMPLEMENTATIONS/TensorFlow/nlp/bert/requirements.txt
480
+ ```
481
+
482
+ 2. Run the training.
483
+ ```bash
484
+ cd $BERT_IMPLEMENTATIONS/HLS-Gaudi2-TF
485
+ ./launch_bert_hvd.sh --config defaults.cfg
486
+ ```
487
+
488
+ ### Training PyTorch BERT
489
+
490
+ 1. Inside the mlperf3.1 PyTorch container, install BERT requirements.
491
+ ```bash
492
+ export BERT_IMPLEMENTATIONS=/root/MLPERF/benchmarks/bert/implementations
493
+ pip install -r $BERT_IMPLEMENTATIONS/PyTorch/requirements.txt
494
+ ```
495
+
496
+ 2. Run the training.
497
+ ```bash
498
+ export PYTORCH_BERT_DATA=/root/datasets/pytorch_bert
499
+ cd $BERT_IMPLEMENTATIONS/HLS-Gaudi2-PT
500
+ ./launch_bert_pytorch.sh --data-dir $PYTORCH_BERT_DATA
501
+ ```
502
+ ### TTT (Time to Train) Calculation for BERT
503
+
504
+ Results can be found in following output files:
505
+ * /tmp/bert_pretrain/phase_2/result_rank_0.txt for TensorFlow BERT
506
+ * /tmp/BERT_PRETRAINING/results/checkpoints/result_rank_0.txt for PyTorch BERT
507
+
508
+ To get the TTT from the training script output, run following command:
509
+
510
+ ```bash
511
+ grep 'run_start\|run_stop' /path/to/output/file | grep worker0 | awk '{print $5}' | tr -d ',' | paste -sd " " - | awk '{print ($2 - $1) / 1000 / 60}'
512
+ ```
513
+
514
+
515
+ ## Training ResNet50
516
+
517
+ ### Training TensorFlow ResNet50
518
+
519
+ 1. Inside the mlperf3.1 TensorFlow container, install Resnet50 requirements.
520
+ ```bash
521
+ export RESNET_IMPLEMENTATIONS=/root/MLPERF/benchmarks/resnet/implementations
522
+ pip install -r $RESNET_IMPLEMENTATIONS/TensorFlow/computer_vision/Resnets/resnet_keras/requirements.txt
523
+ ```
524
+
525
+ 2. Run the training.
526
+ ```bash
527
+ cd $RESNET_IMPLEMENTATIONS/HLS-Gaudi2-TF
528
+ ./launch_keras_resnet_hvd.sh --config $(pwd)/batch_256.cfg --jpeg-data-dir /root/datasets/imagenet --log_dir /tmp/resnet_log
529
+ ```
530
+
531
+ ### Training PyTorch ResNet50
532
+
533
+ 1. Inside the mlperf3.1 PyTorch container, install Resnet50 requirements.
534
+ ```bash
535
+ export RESNET_IMPLEMENTATIONS=/root/MLPERF/benchmarks/resnet/implementations
536
+ pip install -r $RESNET_IMPLEMENTATIONS/HLS-Gaudi2-PT/PyTorch/requirements.txt
537
+ ```
538
+
539
+ 2. Run the training.
540
+ ```bash
541
+ cd $RESNET_IMPLEMENTATIONS/HLS-Gaudi2-PT
542
+ ./launch_resnet.sh --config batch_256.cfg --data-dir /root/datasets/imagenet
543
+ ```
544
+
545
+ ### TTT (Time to Train) Calculation for ResNet50
546
+
547
+ To get the TTT from the training script output, run following command:
548
+
549
+ ```bash
550
+ grep 'run_start\|run_stop' /tmp/resnet_log/result_rank_0.txt | grep worker0 | awk '{print $5}' | tr -d ',' | paste -sd " " - | awk '{print ($2 - $1) / 1000 / 60}'
551
+ ```
552
+
553
+ ## Training GPT3-175B
554
+
555
+ All the training steps for GPT3-175B should be performed in mlperf3.1 PyTorch container.
556
+
557
+ ### Installing Requirements
558
+
559
+ The following requirements need to be installed on all machines participating in the training:
560
+ ```
561
+ pip install git+https://github.com/HabanaAI/DeepSpeed.git
562
+ pip install -r /root/MLPERF/benchmarks/gpt3/requirements.txt
563
+ ```
564
+
565
+ ### Run and time
566
+
567
+ The latest Intel-HabanaLabs's software supports 8-bit floating-point precision (FP8) training for GPT3 model and MLPerf3.1 submissions for GPT3 have been conducted using FP8 precision.
568
+ Running the GPT3 model requires multiple machines. For example, 32 HLS2 machines: `HLS-Gaudi2-N32-PT system` or 48 HLS2 machines `HLS-Gaudi2-N48-PT system`.
569
+
570
+ Please set the paths for the dataset and the universal checkpoint, which should be created during [setup phase](#training-data-for-gpt3-175b).
571
+ ```
572
+ export DATASET_DIR=/root/datasets/gpt3/c4/preprocessed_c4_spm
573
+ export CHECKPOINT_DIR=/root/datasets/gpt3/universal-checkpoint
574
+ ```
575
+
576
+ Please make sure /root/shared/hosts file contains a list of IPs for HLS2 machines, and that SSH communication is properly configured.
577
+ For further details, refer to points 5 and 6 [here](#build-and-deploy-habanalabs-mlperf-training-31-container).
578
+
579
+ #### Running GPT3 on HLS-Gaudi2-N32-PT System
580
+ ```
581
+ bash /root/MLPERF/benchmarks/gpt3/run_gpt.sh --data-dir $DATASET_DIR/ --universal-ckpt-path $CHECKPOINT_DIR/ \
582
+ --hosts /root/shared/hosts --output-dir /root/scratch --num-nodes 32 --data-parallel-size 4 --save-checkpoints false --mllog-output-path /root/scratch/result.txt --train-samples 6782976 --use-fp8-transformer-engine --global-batch-size 2048 --micro-batch-size 2 --eval-interval 12 --device-warmup true --device-warmup-dataset-path $DATASET_DIR/synthetic_text_document
583
+ ```
584
+
585
+ #### Running GPT3 on HLS-Gaudi2-N48-PT System
586
+ ```
587
+ bash /root/MLPERF/benchmarks/gpt3/run_gpt.sh --data-dir $DATASET_DIR/ --universal-ckpt-path $CHECKPOINT_DIR/ \
588
+ --hosts /root/shared/hosts --output-dir /root/scratch --num-nodes 48 --data-parallel-size 8 --pipeline-model-parallel-size 6 --save-checkpoints false --mllog-output-path /root/scratch/result.txt --train-samples 6782976 --global-batch-size 2048 --micro-batch-size 2 --eval-interval 12 --device-warmup true --device-warmup-dataset-path $DATASET_DIR/synthetic_text_document --use-fp8-transformer-engine
589
+ ```
590
+
591
+ Training results will be stored in `/root/scratch` folder.
592
+
593
+ The `--save-checkpoints` is set to `false` as 96l checkpoints take a lot of disc space. In order to save the checkpoint after the run or save it with some frequency, please use `--save-checkpoints true` and manipulate `--save-interval` parameter.
594
+ The script will start from universal checkpoint and train up to 312 steps or the time, when validation log perplexity is below 2.69. According to the convergence point of GPT3 on HLS system, it should approximately run for 288 steps in order to reach 2.69 validation log perplexity. To reduce number of steps, you can use `--exit-interval` parameter or reduce train samples by `--train-samples` parameter.
595
+
596
+ ### TTT (Time to Train) Calculation for GPT3-175B
597
+
598
+ To get the TTT from the training script output, run the following command:
599
+
600
+ ```bash
601
+ grep 'run_start\|run_stop' /root/scratch/result.txt | awk '{print $5}' | tr -d ',' | paste -sd " " - | awk '{print ($2 - $1) / 1000 / 60}'
602
+ ```
603
+
604
+ ## Training PyTorch Stable Diffusion
605
+
606
+ #### Run the traning and validation steps
607
+ Following environment variables will be used to specify before training:
608
+
609
+ ```
610
+ DATASET_PATH:= to the path where preprocessed data is located
611
+ ANNOTATION_FILE:= is the annotation file used for validation
612
+ FID_GT_PATH:= is the path for npz file used for inception
613
+ RESULTS_DIR:= to the path you want to save the results and checkpoint
614
+ POSTFIX_LOG_DIR:= postfix for logdir
615
+ WARMUP_FILE:= is the file used only in the warmup of the training
616
+ BASE_CKPT:= is the base checkpoint
617
+ ```
618
+
619
+ ```
620
+ For example:
621
+ export DATASET_PATH="/root/datasets/stable_diffusion/datasets/laion-400m/webdataset-moments-filtered/{00000..00831}.tar"
622
+ export ANNOTATION_FILE="/root/datasets/stable_diffusion/datasets/coco2014/val2014_30k.tsv"
623
+ export FID_GT_PATH="/root/datasets/stable_diffusion/datasets/coco2014/val2014_30k_stats.npz"
624
+ export RESULTS_DIR="/tmp/"
625
+ export POSTFIX_LOG_DIR="64x_run"
626
+ export WARMUP_FILE="/root/datasets/stable_diffusion/datasets/SD_synthetic_data_10001.tar"
627
+ export BASE_CKPT="/root/datasets/stable_diffusion/datasets/checkpoints/sd/512-base-ema.ckpt"
628
+ ```
629
+
630
+ ### Running Stable Difussion training on HLS-Gaudi2-N8-PT System
631
+
632
+ #### Follow below steps in sequence on each worker:
633
+
634
+ #### Step1: Installing Requirements
635
+ Log into mlperf3.1 PyTorch container and install the requirements
636
+ ```
637
+ pip install -r /root/MLPERF/benchmarks/stable_diffusion/scripts/requirements.txt
638
+ ```
639
+
640
+ #### Step2: Initialization to Clear Cache and Initialize MLLogging
641
+ ```
642
+ bash /root/MLPERF/benchmarks/stable_diffusion/scripts/run_init.sh
643
+ ```
644
+
645
+ #### Step3: Training
646
+ Each worker will have training command:
647
+
648
+ ```
649
+ For example:
650
+ MASTER_PORT=${MASTER_PORT} MASTER_ADDR=${MASTER_ADDR} NODE_RANK={NODE_RANK} python3 -u root/MLPERF/benchmarks/stable_diffusion/main.py \
651
+ lightning.trainer.num_nodes=8 data.params.train.params.urls=${DATASET_PATH} lightning.modelcheckpoint.params.every_n_train_steps=1000 \
652
+ data.params.validation.params.annotations_file=${ANNOTATION_FILE} \
653
+ lightning.trainer.max_steps=5000 lightning.trainer.val_check_interval=<Greater_than_max_steps_to_avoid_online_val> \
654
+ lightning.modelcheckpoint.params.save_last=False model.params.hpu_graph=True -m train --ckpt {BASE_CKPT} \
655
+ -b configs/train_08x08x08.yaml -l {RESULTS_DIR} --autocast --warmup {WARMUP_FILE} --async_checkpoint -n {POSTFIX_LOG_DIR}
656
+ ```
657
+
658
+ #### Step4: Offline validation for the checkpoints generated
659
+ ```
660
+ For example:
661
+ MASTER_PORT=${MASTER_PORT} MASTER_ADDR=${MASTER_ADDR} NODE_RANK={NODE_RANK} python3 -u root/MLPERF/benchmarks/stable_diffusion/main.py \
662
+ lightning.trainer.num_nodes=8 data.params.validation.params.annotations_file=${ANNOTATION_FILE} \
663
+ model.params.validation_config.fid.gt_path=${FID_GT_PATH} model.params.load_unet=True -m validate \
664
+ --ckpt {RESULTS_DIR}/checkpoints/'epoch=000000-step=00000x000.ckpt' -b {BASE_CKPT} -b configs/train_08x08x08.yaml \
665
+ --current_validation_iter {Current_validation_iteration_number} --validation_iters {Total_validation_iteration_numbers}
666
+ ```
667
+
668
+ ## Supported Configurations
669
+ ### TensorFlow ResNet-50, PyTorch BERT, PyTorch ResNet-50, PyTorch GPT3-175B
670
+
671
+ | Validated on | SynapseAI Version | Framework Version(s) | Mode |
672
+ | :----------: | :---------------: | :------------------: | :------: |
673
+ | Gaudi2 | 1.14.0 | TensorFlow 2.15.0 | Training |
674
+ | Gaudi2 | 1.14.0 | PyTorch 2.1.1 | Training |
675
+
676
+ ### TensorFlow BERT, PyTorch Stable Diffusion
677
+
678
+ | Validated on | SynapseAI Version | Framework Version(s) | Mode |
679
+ | :----------: | :---------------: | :------------------: | :------: |
680
+ | Gaudi2 | 1.13.0 | TensorFlow 2.13.1 | Training |
681
+ | Gaudi2 | 1.13.0 | PyTorch 2.1.0 | Training |
682
+
683
+ ## Changelog
684
+ ### 1.14.0
685
+ - Updated scripts to enable dynamic shapes support for topologies:
686
+ - PyTorch Bert
687
+ ### 1.13.0
688
+ - Updated scripts to cover MLPerf 3.1 submission, including but not limited to:
689
+ - Optimized GPT3 code by:
690
+ - FP8 support;
691
+ - Sequence Parallelism support;
692
+ - Fused Scaled Dot Product Attention;
693
+ - Device warmup.
694
+ - Added new benchmark: Stable Diffusion;
695
+ - Enabled using HPU Graphs by default for PyTorch ResNet50;
696
+ - Removed UNet3D and Bert 64xcards from submission.
697
+ ### 1.12.0
698
+ - Removed the setting of the PT_HPU_LAZY_MODE environment variable in the script for Bert and ResNet50.
699
+ - Removed unused PT_HPU_ENABLE_SYNC_OUTPUT_HOST environment variable.
700
+ ### 1.11.0
701
+ - Updated scripts to cover MLPerf 3.0 submission.
702
+ - Switched UNet3D, Bert, ResNet50 from HMP to Autocast.
703
+ - Added script for ImageNet unpacking.
704
+ - Reworked scripts and instruction for TensorFlow BERT data preprocessing.
705
+ - Add clearing deepspeed_config to force deepspeed to take config from args.deepspeed_configuration at initialize()
706
+ ### 1.10.0
707
+ - Updated scripts to cover MLPerf 3.0 submission.
708
+ ### 1.9.0
709
+ - Disabled auto dynamic shape support for Habana devices for PyTorch ResNet50.
710
+ ### 1.8.0
711
+ - Prepared new scripts for PyTorch BERT data preprocessing.
712
+ - Moved data preprocessing instructions to docker environment.
713
+ ### 1.7.0
714
+ - Updated scripts to cover MLPerf 2.1 submission.
715
+ ### 1.6.0
716
+ - Removed obsolete files from TensorFlow/nlp/bert.
717
+ ### 1.5.0
718
+ - Updated scripts to cover MLPerf 2.0 submission.
719
+ - Cleaned up ResNet requirements compared to the originally submitted ones.
720
+ - Removed run_bert_docker.sh and run_resnet50_docker.sh scripts.
721
+ ### 1.4.0
722
+ - Switched from the deprecated TF_ENABLE_BF16_CONVERSION to TF_BF16_CONVERSION.
723
+ - Added TF_ENABLE_DYNAMIC_SHAPES to MLPerf launchers.
724
+ ### 1.3.0
725
+ - Updated requirements.txt file for BERT and ResNet.
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/LICENSE ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2021 Habana Labs, Ltd. an Intel Company
4
+ Copyright (c) Soumith Chintala 2016,
5
+ All rights reserved.
6
+
7
+ Redistribution and use in source and binary forms, with or without
8
+ modification, are permitted provided that the following conditions are met:
9
+
10
+ * Redistributions of source code must retain the above copyright notice, this
11
+ list of conditions and the following disclaimer.
12
+
13
+ * Redistributions in binary form must reproduce the above copyright notice,
14
+ this list of conditions and the following disclaimer in the documentation
15
+ and/or other materials provided with the distribution.
16
+
17
+ * Neither the name of the copyright holder nor the names of its
18
+ contributors may be used to endorse or promote products derived from
19
+ this software without specific prior written permission.
20
+
21
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/mlp_log.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 MLBenchmark Group. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ ###############################################################################
16
+ # Copyright (C) 2022 Habana Labs, Ltd. an Intel Company
17
+ ###############################################################################
18
+ # List of changes:
19
+ # - Added pytorch worker ranks using utils
20
+
21
+ """Convenience function for logging compliance tags to stdout.
22
+ """
23
+
24
+ from __future__ import absolute_import
25
+ from __future__ import division
26
+ from __future__ import print_function
27
+
28
+
29
+ import inspect
30
+ import json
31
+ import logging
32
+ import os
33
+ import re
34
+ import sys
35
+ import time
36
+ import utils
37
+
38
+ def get_mllog_mlloger(output_dir=None):
39
+ from mlperf_logging import mllog
40
+
41
+ if utils.get_rank() is not None:
42
+ str_worker_rank = str(utils.get_rank())
43
+ else:
44
+ str_worker_rank = "0"
45
+
46
+ mllogger = mllog.get_mllogger()
47
+ mllogger.propagate = False
48
+ mllog.propagate=False
49
+ if output_dir is None: output_dir='./log'
50
+ filenames = os.path.normpath(output_dir) + "/result_rank_" + str_worker_rank + ".txt"
51
+ mllog.config(filename=filenames)
52
+ workername = "worker" + str_worker_rank
53
+ mllog.config(
54
+ default_namespace = workername,
55
+ default_stack_offset = 1,
56
+ default_clear_line = False,
57
+ root_dir = os.path.normpath(
58
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")))
59
+
60
+ return mllogger, mllog
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/mlperf_variable_map.json ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "conv1.weight": "conv0_weight",
3
+ "bn1.weight": "bn0_gamma",
4
+ "bn1.bias": "bn0_beta",
5
+ "layer1.0.conv1.weight": "stage1_unit1_conv1_weight",
6
+ "layer1.0.bn1.weight": "stage1_unit1_bn1_gamma",
7
+ "layer1.0.bn1.bias": "stage1_unit1_bn1_beta",
8
+ "layer1.0.conv2.weight": "stage1_unit1_conv2_weight",
9
+ "layer1.0.bn2.weight": "stage1_unit1_bn2_gamma",
10
+ "layer1.0.bn2.bias": "stage1_unit1_bn2_beta",
11
+ "layer1.0.conv3.weight": "stage1_unit1_conv3_weight",
12
+ "layer1.0.bn3.weight": "stage1_unit1_bn3_gamma",
13
+ "layer1.0.bn3.bias": "stage1_unit1_bn3_beta",
14
+ "layer1.0.downsample.0.weight": "stage1_unit1_conv1sc_weight",
15
+ "layer1.0.downsample.1.weight": "stage1_unit1_bnsc_gamma",
16
+ "layer1.0.downsample.1.bias": "stage1_unit1_bnsc_beta",
17
+ "layer1.1.conv1.weight": "stage1_unit2_conv1_weight",
18
+ "layer1.1.bn1.weight": "stage1_unit2_bn1_gamma",
19
+ "layer1.1.bn1.bias": "stage1_unit2_bn1_beta",
20
+ "layer1.1.conv2.weight": "stage1_unit2_conv2_weight",
21
+ "layer1.1.bn2.weight": "stage1_unit2_bn2_gamma",
22
+ "layer1.1.bn2.bias": "stage1_unit2_bn2_beta",
23
+ "layer1.1.conv3.weight": "stage1_unit2_conv3_weight",
24
+ "layer1.1.bn3.weight": "stage1_unit2_bn3_gamma",
25
+ "layer1.1.bn3.bias": "stage1_unit2_bn3_beta",
26
+ "layer1.2.conv1.weight": "stage1_unit3_conv1_weight",
27
+ "layer1.2.bn1.weight": "stage1_unit3_bn1_gamma",
28
+ "layer1.2.bn1.bias": "stage1_unit3_bn1_beta",
29
+ "layer1.2.conv2.weight": "stage1_unit3_conv2_weight",
30
+ "layer1.2.bn2.weight": "stage1_unit3_bn2_gamma",
31
+ "layer1.2.bn2.bias": "stage1_unit3_bn2_beta",
32
+ "layer1.2.conv3.weight": "stage1_unit3_conv3_weight",
33
+ "layer1.2.bn3.weight": "stage1_unit3_bn3_gamma",
34
+ "layer1.2.bn3.bias": "stage1_unit3_bn3_beta",
35
+ "layer2.0.conv1.weight": "stage2_unit1_conv1_weight",
36
+ "layer2.0.bn1.weight": "stage2_unit1_bn1_gamma",
37
+ "layer2.0.bn1.bias": "stage2_unit1_bn1_beta",
38
+ "layer2.0.conv2.weight": "stage2_unit1_conv2_weight",
39
+ "layer2.0.bn2.weight": "stage2_unit1_bn2_gamma",
40
+ "layer2.0.bn2.bias": "stage2_unit1_bn2_beta",
41
+ "layer2.0.conv3.weight": "stage2_unit1_conv3_weight",
42
+ "layer2.0.bn3.weight": "stage2_unit1_bn3_gamma",
43
+ "layer2.0.bn3.bias": "stage2_unit1_bn3_beta",
44
+ "layer2.0.downsample.0.weight": "stage2_unit1_conv1sc_weight",
45
+ "layer2.0.downsample.1.weight": "stage2_unit1_bnsc_gamma",
46
+ "layer2.0.downsample.1.bias": "stage2_unit1_bnsc_beta",
47
+ "layer2.1.conv1.weight": "stage2_unit2_conv1_weight",
48
+ "layer2.1.bn1.weight": "stage2_unit2_bn1_gamma",
49
+ "layer2.1.bn1.bias": "stage2_unit2_bn1_beta",
50
+ "layer2.1.conv2.weight": "stage2_unit2_conv2_weight",
51
+ "layer2.1.bn2.weight": "stage2_unit2_bn2_gamma",
52
+ "layer2.1.bn2.bias": "stage2_unit2_bn2_beta",
53
+ "layer2.1.conv3.weight": "stage2_unit2_conv3_weight",
54
+ "layer2.1.bn3.weight": "stage2_unit2_bn3_gamma",
55
+ "layer2.1.bn3.bias": "stage2_unit2_bn3_beta",
56
+ "layer2.2.conv1.weight": "stage2_unit3_conv1_weight",
57
+ "layer2.2.bn1.weight": "stage2_unit3_bn1_gamma",
58
+ "layer2.2.bn1.bias": "stage2_unit3_bn1_beta",
59
+ "layer2.2.conv2.weight": "stage2_unit3_conv2_weight",
60
+ "layer2.2.bn2.weight": "stage2_unit3_bn2_gamma",
61
+ "layer2.2.bn2.bias": "stage2_unit3_bn2_beta",
62
+ "layer2.2.conv3.weight": "stage2_unit3_conv3_weight",
63
+ "layer2.2.bn3.weight": "stage2_unit3_bn3_gamma",
64
+ "layer2.2.bn3.bias": "stage2_unit3_bn3_beta",
65
+ "layer2.3.conv1.weight": "stage2_unit4_conv1_weight",
66
+ "layer2.3.bn1.weight": "stage2_unit4_bn1_gamma",
67
+ "layer2.3.bn1.bias": "stage2_unit4_bn1_beta",
68
+ "layer2.3.conv2.weight": "stage2_unit4_conv2_weight",
69
+ "layer2.3.bn2.weight": "stage2_unit4_bn2_gamma",
70
+ "layer2.3.bn2.bias": "stage2_unit4_bn2_beta",
71
+ "layer2.3.conv3.weight": "stage2_unit4_conv3_weight",
72
+ "layer2.3.bn3.weight": "stage2_unit4_bn3_gamma",
73
+ "layer2.3.bn3.bias": "stage2_unit4_bn3_beta",
74
+ "layer3.0.conv1.weight": "stage3_unit1_conv1_weight",
75
+ "layer3.0.bn1.weight": "stage3_unit1_bn1_gamma",
76
+ "layer3.0.bn1.bias": "stage3_unit1_bn1_beta",
77
+ "layer3.0.conv2.weight": "stage3_unit1_conv2_weight",
78
+ "layer3.0.bn2.weight": "stage3_unit1_bn2_gamma",
79
+ "layer3.0.bn2.bias": "stage3_unit1_bn2_beta",
80
+ "layer3.0.conv3.weight": "stage3_unit1_conv3_weight",
81
+ "layer3.0.bn3.weight": "stage3_unit1_bn3_gamma",
82
+ "layer3.0.bn3.bias": "stage3_unit1_bn3_beta",
83
+ "layer3.0.downsample.0.weight": "stage3_unit1_conv1sc_weight",
84
+ "layer3.0.downsample.1.weight": "stage3_unit1_bnsc_gamma",
85
+ "layer3.0.downsample.1.bias": "stage3_unit1_bnsc_beta",
86
+ "layer3.1.conv1.weight": "stage3_unit2_conv1_weight",
87
+ "layer3.1.bn1.weight": "stage3_unit2_bn1_gamma",
88
+ "layer3.1.bn1.bias": "stage3_unit2_bn1_beta",
89
+ "layer3.1.conv2.weight": "stage3_unit2_conv2_weight",
90
+ "layer3.1.bn2.weight": "stage3_unit2_bn2_gamma",
91
+ "layer3.1.bn2.bias": "stage3_unit2_bn2_beta",
92
+ "layer3.1.conv3.weight": "stage3_unit2_conv3_weight",
93
+ "layer3.1.bn3.weight": "stage3_unit2_bn3_gamma",
94
+ "layer3.1.bn3.bias": "stage3_unit2_bn3_beta",
95
+ "layer3.2.conv1.weight": "stage3_unit3_conv1_weight",
96
+ "layer3.2.bn1.weight": "stage3_unit3_bn1_gamma",
97
+ "layer3.2.bn1.bias": "stage3_unit3_bn1_beta",
98
+ "layer3.2.conv2.weight": "stage3_unit3_conv2_weight",
99
+ "layer3.2.bn2.weight": "stage3_unit3_bn2_gamma",
100
+ "layer3.2.bn2.bias": "stage3_unit3_bn2_beta",
101
+ "layer3.2.conv3.weight": "stage3_unit3_conv3_weight",
102
+ "layer3.2.bn3.weight": "stage3_unit3_bn3_gamma",
103
+ "layer3.2.bn3.bias": "stage3_unit3_bn3_beta",
104
+ "layer3.3.conv1.weight": "stage3_unit4_conv1_weight",
105
+ "layer3.3.bn1.weight": "stage3_unit4_bn1_gamma",
106
+ "layer3.3.bn1.bias": "stage3_unit4_bn1_beta",
107
+ "layer3.3.conv2.weight": "stage3_unit4_conv2_weight",
108
+ "layer3.3.bn2.weight": "stage3_unit4_bn2_gamma",
109
+ "layer3.3.bn2.bias": "stage3_unit4_bn2_beta",
110
+ "layer3.3.conv3.weight": "stage3_unit4_conv3_weight",
111
+ "layer3.3.bn3.weight": "stage3_unit4_bn3_gamma",
112
+ "layer3.3.bn3.bias": "stage3_unit4_bn3_beta",
113
+ "layer3.4.conv1.weight": "stage3_unit5_conv1_weight",
114
+ "layer3.4.bn1.weight": "stage3_unit5_bn1_gamma",
115
+ "layer3.4.bn1.bias": "stage3_unit5_bn1_beta",
116
+ "layer3.4.conv2.weight": "stage3_unit5_conv2_weight",
117
+ "layer3.4.bn2.weight": "stage3_unit5_bn2_gamma",
118
+ "layer3.4.bn2.bias": "stage3_unit5_bn2_beta",
119
+ "layer3.4.conv3.weight": "stage3_unit5_conv3_weight",
120
+ "layer3.4.bn3.weight": "stage3_unit5_bn3_gamma",
121
+ "layer3.4.bn3.bias": "stage3_unit5_bn3_beta",
122
+ "layer3.5.conv1.weight": "stage3_unit6_conv1_weight",
123
+ "layer3.5.bn1.weight": "stage3_unit6_bn1_gamma",
124
+ "layer3.5.bn1.bias": "stage3_unit6_bn1_beta",
125
+ "layer3.5.conv2.weight": "stage3_unit6_conv2_weight",
126
+ "layer3.5.bn2.weight": "stage3_unit6_bn2_gamma",
127
+ "layer3.5.bn2.bias": "stage3_unit6_bn2_beta",
128
+ "layer3.5.conv3.weight": "stage3_unit6_conv3_weight",
129
+ "layer3.5.bn3.weight": "stage3_unit6_bn3_gamma",
130
+ "layer3.5.bn3.bias": "stage3_unit6_bn3_beta",
131
+ "layer4.0.conv1.weight": "stage4_unit1_conv1_weight",
132
+ "layer4.0.bn1.weight": "stage4_unit1_bn1_gamma",
133
+ "layer4.0.bn1.bias": "stage4_unit1_bn1_beta",
134
+ "layer4.0.conv2.weight": "stage4_unit1_conv2_weight",
135
+ "layer4.0.bn2.weight": "stage4_unit1_bn2_gamma",
136
+ "layer4.0.bn2.bias": "stage4_unit1_bn2_beta",
137
+ "layer4.0.conv3.weight": "stage4_unit1_conv3_weight",
138
+ "layer4.0.bn3.weight": "stage4_unit1_bn3_gamma",
139
+ "layer4.0.bn3.bias": "stage4_unit1_bn3_beta",
140
+ "layer4.0.downsample.0.weight": "stage4_unit1_conv1sc_weight",
141
+ "layer4.0.downsample.1.weight": "stage4_unit1_bnsc_gamma",
142
+ "layer4.0.downsample.1.bias": "stage4_unit1_bnsc_beta",
143
+ "layer4.1.conv1.weight": "stage4_unit2_conv1_weight",
144
+ "layer4.1.bn1.weight": "stage4_unit2_bn1_gamma",
145
+ "layer4.1.bn1.bias": "stage4_unit2_bn1_beta",
146
+ "layer4.1.conv2.weight": "stage4_unit2_conv2_weight",
147
+ "layer4.1.bn2.weight": "stage4_unit2_bn2_gamma",
148
+ "layer4.1.bn2.bias": "stage4_unit2_bn2_beta",
149
+ "layer4.1.conv3.weight": "stage4_unit2_conv3_weight",
150
+ "layer4.1.bn3.weight": "stage4_unit2_bn3_gamma",
151
+ "layer4.1.bn3.bias": "stage4_unit2_bn3_beta",
152
+ "layer4.2.conv1.weight": "stage4_unit3_conv1_weight",
153
+ "layer4.2.bn1.weight": "stage4_unit3_bn1_gamma",
154
+ "layer4.2.bn1.bias": "stage4_unit3_bn1_beta",
155
+ "layer4.2.conv2.weight": "stage4_unit3_conv2_weight",
156
+ "layer4.2.bn2.weight": "stage4_unit3_bn2_gamma",
157
+ "layer4.2.bn2.bias": "stage4_unit3_bn2_beta",
158
+ "layer4.2.conv3.weight": "stage4_unit3_conv3_weight",
159
+ "layer4.2.bn3.weight": "stage4_unit3_bn3_gamma",
160
+ "layer4.2.bn3.bias": "stage4_unit3_bn3_beta",
161
+ "fc.weight": "fc1_weight",
162
+ "fc.bias": "fc1_bias"
163
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/optimizer.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2022 Habana Labs, Ltd. an Intel Company
2
+
3
+ import torch
4
+ from torch.optim.lr_scheduler import _LRScheduler
5
+
6
+ class PolynomialDecayWithWarmup(_LRScheduler):
7
+ """Polynomial learning rate decay until step reach to max_decay_step
8
+
9
+ Args
10
+ optimizer (Optimizer): Wrapped optimizer.
11
+ max_decay_steps: after this step, we stop decreasing learning rate
12
+ end_learning_rate: scheduler stoping learning rate decay, value of learning rate must be this value
13
+ power: The power of the polynomial.
14
+ """
15
+
16
+ def __init__(self, optimizer, batch_size, steps_per_epoch, train_steps, initial_learning_rate=9.0, warmup_epochs=3,
17
+ end_learning_rate=0.0001, power=2.0, lars_decay_epochs=36, mlperf_mlloger=None, mlperf_mllog=None, opt_name=None):
18
+ self.last_step = 0
19
+ self.steps_per_epoch = steps_per_epoch
20
+ self.train_steps = train_steps
21
+ self.initial_learning_rate = initial_learning_rate
22
+ self.warmup_epochs = warmup_epochs
23
+ self.end_learning_rate = end_learning_rate
24
+ self.power = power
25
+ self.warmup_steps = warmup_epochs * (steps_per_epoch - 1)
26
+ self.decay_steps = lars_decay_epochs * (steps_per_epoch - 1) - self.warmup_steps + 1
27
+ self.opt_name = opt_name.lower()
28
+
29
+ mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_OPT_END_LR, value=self.end_learning_rate)
30
+ mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_OPT_LR_DECAY_STEPS, value=int(self.decay_steps))
31
+ mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_OPT_LR_DECAY_POLY_POWER, value=power)
32
+ mlperf_mlloger.event(key=self.opt_name+'_'+mlperf_mllog.constants.OPT_LR_WARMUP_EPOCHS, value=float(self.warmup_epochs))
33
+ mlperf_mlloger.event(key=self.opt_name+'_'+mlperf_mllog.constants.OPT_BASE_LR, value=self.initial_learning_rate)
34
+
35
+ super().__init__(optimizer)
36
+
37
+ def get_lr(self):
38
+ warmup_steps = self.warmup_steps
39
+ warmup_rate = (
40
+ self.initial_learning_rate * self.last_step / warmup_steps)
41
+
42
+ poly_steps = self.last_step - warmup_steps
43
+ poly_steps = poly_steps if poly_steps > 1 else 1
44
+
45
+ poly_rate = (self.initial_learning_rate - self.end_learning_rate) * \
46
+ ((1 - poly_steps / self.decay_steps) **
47
+ (self.power)) + self.end_learning_rate
48
+
49
+ decay_rate = warmup_rate if self.last_step <= warmup_steps else poly_rate
50
+ return decay_rate
51
+
52
+ def step(self, step=None):
53
+ if step is None:
54
+ step = self.last_step + 1
55
+ self.last_step = step if step != 0 else 1
56
+ if self.last_step <= self.decay_steps + self.warmup_steps:
57
+ decay_lrs = [self.get_lr()]
58
+ for param_group, lr in zip(self.optimizer.param_groups, decay_lrs):
59
+ param_group['lr'] = lr
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/model/resnet.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, Habana Labs Ltd. All rights reserved.
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ from .utils import load_state_dict_from_url
6
+
7
+
8
+ __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
9
+ 'resnet152', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_32x8d',
10
+ 'wide_resnet50_2', 'wide_resnet101_2']
11
+
12
+
13
+ model_urls = {
14
+ 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
15
+ 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
16
+ 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
17
+ 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
18
+ 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
19
+ 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
20
+ 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
21
+ 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
22
+ 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
23
+ }
24
+
25
+
26
+ def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
27
+ """3x3 convolution with padding"""
28
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
29
+ padding=dilation, groups=groups, bias=False, dilation=dilation)
30
+
31
+
32
+ def conv1x1(in_planes, out_planes, stride=1):
33
+ """1x1 convolution"""
34
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
35
+
36
+
37
+ class BasicBlock(nn.Module):
38
+ expansion = 1
39
+ __constants__ = ['downsample']
40
+
41
+ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
42
+ base_width=64, dilation=1, norm_layer=None):
43
+ super(BasicBlock, self).__init__()
44
+ if norm_layer is None:
45
+ norm_layer = nn.BatchNorm2d
46
+ if groups != 1 or base_width != 64:
47
+ raise ValueError('BasicBlock only supports groups=1 and base_width=64')
48
+ if dilation > 1:
49
+ raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
50
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
51
+ self.conv1 = conv3x3(inplanes, planes, stride)
52
+ self.bn1 = norm_layer(planes)
53
+ self.relu = nn.ReLU(inplace=True)
54
+ self.conv2 = conv3x3(planes, planes)
55
+ self.bn2 = norm_layer(planes)
56
+ self.downsample = downsample
57
+ self.stride = stride
58
+
59
+ def forward(self, x):
60
+ identity = x
61
+
62
+ out = self.conv1(x)
63
+ out = self.bn1(out)
64
+ out = self.relu(out)
65
+
66
+ out = self.conv2(out)
67
+ out = self.bn2(out)
68
+
69
+ if self.downsample is not None:
70
+ identity = self.downsample(x)
71
+
72
+ out = out + identity
73
+ out = self.relu(out)
74
+
75
+ return out
76
+
77
+
78
+ class Bottleneck(nn.Module):
79
+ expansion = 4
80
+ __constants__ = ['downsample']
81
+
82
+ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
83
+ base_width=64, dilation=1, norm_layer=None):
84
+ super(Bottleneck, self).__init__()
85
+ if norm_layer is None:
86
+ norm_layer = nn.BatchNorm2d
87
+ width = int(planes * (base_width / 64.)) * groups
88
+ # Both self.conv2 and self.downsample layers downsample the input when stride != 1
89
+ self.conv1 = conv1x1(inplanes, width)
90
+ self.bn1 = norm_layer(width)
91
+ self.conv2 = conv3x3(width, width, stride, groups, dilation)
92
+ self.bn2 = norm_layer(width)
93
+ self.conv3 = conv1x1(width, planes * self.expansion)
94
+ self.bn3 = norm_layer(planes * self.expansion)
95
+ self.relu = nn.ReLU(inplace=True)
96
+ self.downsample = downsample
97
+ self.stride = stride
98
+
99
+ def forward(self, x):
100
+ identity = x
101
+
102
+ out = self.conv1(x)
103
+ out = self.bn1(out)
104
+ out = self.relu(out)
105
+
106
+ out = self.conv2(out)
107
+ out = self.bn2(out)
108
+ out = self.relu(out)
109
+
110
+ out = self.conv3(out)
111
+ out = self.bn3(out)
112
+
113
+ if self.downsample is not None:
114
+ identity = self.downsample(x)
115
+
116
+ out = out + identity
117
+ out = self.relu(out)
118
+
119
+ return out
120
+
121
+
122
+ class ResNet(nn.Module):
123
+
124
+ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
125
+ groups=1, width_per_group=64, replace_stride_with_dilation=None,
126
+ norm_layer=None):
127
+ super(ResNet, self).__init__()
128
+ if norm_layer is None:
129
+ norm_layer = nn.BatchNorm2d
130
+ self._norm_layer = norm_layer
131
+
132
+ self.inplanes = 64
133
+ self.dilation = 1
134
+ if replace_stride_with_dilation is None:
135
+ # each element in the tuple indicates if we should replace
136
+ # the 2x2 stride with a dilated convolution instead
137
+ replace_stride_with_dilation = [False, False, False]
138
+ if len(replace_stride_with_dilation) != 3:
139
+ raise ValueError("replace_stride_with_dilation should be None "
140
+ "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
141
+ self.groups = groups
142
+ self.base_width = width_per_group
143
+ self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
144
+ bias=False)
145
+ self.bn1 = norm_layer(self.inplanes)
146
+ self.relu = nn.ReLU(inplace=True)
147
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
148
+ self.layer1 = self._make_layer(block, 64, layers[0])
149
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
150
+ dilate=replace_stride_with_dilation[0])
151
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
152
+ dilate=replace_stride_with_dilation[1])
153
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
154
+ dilate=replace_stride_with_dilation[2])
155
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
156
+
157
+ self.fc = nn.Linear(512 * block.expansion, num_classes)
158
+
159
+ for m in self.modules():
160
+ if isinstance(m, nn.Conv2d):
161
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
162
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
163
+ nn.init.constant_(m.weight, 1)
164
+ nn.init.constant_(m.bias, 0)
165
+
166
+ # Zero-initialize the last BN in each residual branch,
167
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
168
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
169
+ if zero_init_residual:
170
+ for m in self.modules():
171
+ if isinstance(m, Bottleneck):
172
+ nn.init.constant_(m.bn3.weight, 0)
173
+ elif isinstance(m, BasicBlock):
174
+ nn.init.constant_(m.bn2.weight, 0)
175
+
176
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
177
+ norm_layer = self._norm_layer
178
+ downsample = None
179
+ previous_dilation = self.dilation
180
+ if dilate:
181
+ self.dilation *= stride
182
+ stride = 1
183
+ if stride != 1 or self.inplanes != planes * block.expansion:
184
+ downsample = nn.Sequential(
185
+ conv1x1(self.inplanes, planes * block.expansion, stride),
186
+ norm_layer(planes * block.expansion),
187
+ )
188
+
189
+ layers = []
190
+ layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
191
+ self.base_width, previous_dilation, norm_layer))
192
+ self.inplanes = planes * block.expansion
193
+ for _ in range(1, blocks):
194
+ layers.append(block(self.inplanes, planes, groups=self.groups,
195
+ base_width=self.base_width, dilation=self.dilation,
196
+ norm_layer=norm_layer))
197
+
198
+ return nn.Sequential(*layers)
199
+
200
+ def _forward_impl(self, x):
201
+ # See note [TorchScript super()]
202
+ x = self.conv1(x)
203
+ x = self.bn1(x)
204
+ x = self.relu(x)
205
+ x = self.maxpool(x)
206
+
207
+ x = self.layer1(x)
208
+ x = self.layer2(x)
209
+ x = self.layer3(x)
210
+ x = self.layer4(x)
211
+
212
+ x = self.avgpool(x)
213
+ x = torch.flatten(x, 1)
214
+ x = self.fc(x)
215
+
216
+ return x
217
+
218
+ def forward(self, x):
219
+ return self._forward_impl(x)
220
+
221
+
222
+ def _resnet(arch, block, layers, pretrained, progress, **kwargs):
223
+ model = ResNet(block, layers, **kwargs)
224
+ if pretrained:
225
+ state_dict = load_state_dict_from_url(model_urls[arch],
226
+ progress=progress)
227
+ model.load_state_dict(state_dict)
228
+ return model
229
+
230
+
231
+ def resnet18(pretrained=False, progress=True, **kwargs):
232
+ r"""ResNet-18 model from
233
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
234
+
235
+ Args:
236
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
237
+ progress (bool): If True, displays a progress bar of the download to stderr
238
+ """
239
+ return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
240
+ **kwargs)
241
+
242
+
243
+ def resnet34(pretrained=False, progress=True, **kwargs):
244
+ r"""ResNet-34 model from
245
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
246
+
247
+ Args:
248
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
249
+ progress (bool): If True, displays a progress bar of the download to stderr
250
+ """
251
+ return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
252
+ **kwargs)
253
+
254
+
255
+ def resnet50(pretrained=False, progress=True, **kwargs):
256
+ r"""ResNet-50 model from
257
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
258
+
259
+ Args:
260
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
261
+ progress (bool): If True, displays a progress bar of the download to stderr
262
+ """
263
+ return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
264
+ **kwargs)
265
+
266
+
267
+ def resnet101(pretrained=False, progress=True, **kwargs):
268
+ r"""ResNet-101 model from
269
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
270
+
271
+ Args:
272
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
273
+ progress (bool): If True, displays a progress bar of the download to stderr
274
+ """
275
+ return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
276
+ **kwargs)
277
+
278
+
279
+ def resnet152(pretrained=False, progress=True, **kwargs):
280
+ r"""ResNet-152 model from
281
+ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
282
+
283
+ Args:
284
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
285
+ progress (bool): If True, displays a progress bar of the download to stderr
286
+ """
287
+ return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
288
+ **kwargs)
289
+
290
+
291
+ def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
292
+ r"""ResNeXt-50 32x4d model from
293
+ `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
294
+
295
+ Args:
296
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
297
+ progress (bool): If True, displays a progress bar of the download to stderr
298
+ """
299
+ kwargs['groups'] = 32
300
+ kwargs['width_per_group'] = 4
301
+ return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
302
+ pretrained, progress, **kwargs)
303
+
304
+
305
+ def resnext101_32x4d(pretrained=False, progress=True, **kwargs):
306
+ r"""ResNeXt-101 32x4d model from
307
+ `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
308
+
309
+ Args:
310
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
311
+ progress (bool): If True, displays a progress bar of the download to stderr
312
+ """
313
+ if pretrained:
314
+ raise AssertionError("Pretrained models are not available for resnext101_32x4d")
315
+
316
+ kwargs['groups'] = 32
317
+ kwargs['width_per_group'] = 4
318
+ return _resnet('resnext101_32x4d', Bottleneck, [3, 4, 23, 3],
319
+ pretrained, progress, **kwargs)
320
+
321
+
322
+ def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
323
+ r"""ResNeXt-101 32x8d model from
324
+ `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
325
+
326
+ Args:
327
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
328
+ progress (bool): If True, displays a progress bar of the download to stderr
329
+ """
330
+ kwargs['groups'] = 32
331
+ kwargs['width_per_group'] = 8
332
+ return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
333
+ pretrained, progress, **kwargs)
334
+
335
+
336
+ def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
337
+ r"""Wide ResNet-50-2 model from
338
+ `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
339
+
340
+ The model is the same as ResNet except for the bottleneck number of channels
341
+ which is twice larger in every block. The number of channels in outer 1x1
342
+ convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
343
+ channels, and in Wide ResNet-50-2 has 2048-1024-2048.
344
+
345
+ Args:
346
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
347
+ progress (bool): If True, displays a progress bar of the download to stderr
348
+ """
349
+ kwargs['width_per_group'] = 64 * 2
350
+ return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
351
+ pretrained, progress, **kwargs)
352
+
353
+
354
+ def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
355
+ r"""Wide ResNet-101-2 model from
356
+ `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
357
+
358
+ The model is the same as ResNet except for the bottleneck number of channels
359
+ which is twice larger in every block. The number of channels in outer 1x1
360
+ convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
361
+ channels, and in Wide ResNet-50-2 has 2048-1024-2048.
362
+
363
+ Args:
364
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
365
+ progress (bool): If True, displays a progress bar of the download to stderr
366
+ """
367
+ kwargs['width_per_group'] = 64 * 2
368
+ return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
369
+ pretrained, progress, **kwargs)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/ops_bf16_Resnet.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ addmm
2
+ avg_pool2d
3
+ bmm
4
+ conv2d
5
+ dot
6
+ max_pool2d
7
+ mm
8
+ mv
9
+ relu
10
+ t
11
+ linear
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/ops_fp32_Resnet.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ cross_entropy_loss
2
+ log_softmax
3
+ nll_loss
4
+ softmax
5
+ topk
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ mpi4py>=3.0.3
2
+ scipy>=1.7.1
3
+ colorlog==6.6.0
4
+ git+https://github.com/mlperf/[email protected]
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/train.py ADDED
@@ -0,0 +1,815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021-2023, Habana Labs Ltd. All rights reserved.
2
+
3
+
4
+ from __future__ import print_function
5
+ import copy
6
+ from math import ceil
7
+ import shutil
8
+ import uuid
9
+
10
+ # Import local copy of the model only for ResNext101_32x4d
11
+ # which is not part of standard torchvision package.
12
+ import model as resnet_models
13
+ import datetime
14
+ import os
15
+ import time
16
+ import sys
17
+ import json
18
+
19
+ import torch
20
+ import torch.utils.data
21
+ from torch import nn
22
+ import torchvision
23
+ from torchvision import transforms
24
+ import random
25
+ import utils
26
+ import habana_frameworks.torch.core as htcore
27
+ import habana_dataloader
28
+ from mlp_log import get_mllog_mlloger
29
+
30
+ try:
31
+ from apex import amp
32
+ except ImportError:
33
+ amp = None
34
+
35
+ DEFAULT_IMAGE_SIZE = 224
36
+ NUM_CHANNELS = 3
37
+
38
+ def get_mlperf_variable_map():
39
+ try:
40
+ script_path = os.path.realpath(__file__)
41
+ head_tail = os.path.split(script_path)
42
+ mlperf_map_file = head_tail[0] + '/mlperf_variable_map.json'
43
+ with open(mlperf_map_file, mode='r') as file_handle:
44
+ json_content = file_handle.read()
45
+ mlperf_map = json.loads(json_content)
46
+ except IOError:
47
+ raise IOError(f"MLPerf variable map file: {mlperf_map_file} not accesible")
48
+ return mlperf_map
49
+
50
+
51
+ def train_one_epoch(lr_scheduler, model, criterion, optimizer, data_loader, device, epoch,
52
+ print_freq, args, apex=False, warmup=False):
53
+ model.train()
54
+ metric_logger = utils.MetricLogger(delimiter=" ", device=device)
55
+ metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))
56
+ metric_logger.add_meter('img/s', utils.SmoothedValue(window_size=10, fmt='{value}'))
57
+
58
+ header = f'Warmup epoch: [{epoch}]' if warmup else f'Epoch: [{epoch}]'
59
+ step_count = 0
60
+ last_print_time = time.time()
61
+
62
+ profiler = None
63
+ if args.profile_steps is not None and not warmup:
64
+ profile_steps = [int(i) for i in args.profile_steps.split(',')]
65
+ profiling_duration = profile_steps[1] - profile_steps[0]
66
+ profiler = torch.profiler.profile(
67
+ activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.HPU],
68
+ schedule=torch.profiler.schedule(wait=0, warmup=profile_steps[0], active=profiling_duration, repeat=1),
69
+ on_trace_ready=torch.profiler.tensorboard_trace_handler(args.output_dir,
70
+ worker_name=f"worker_{utils.get_rank()}",
71
+ use_gzip=True),
72
+ record_shapes=True,
73
+ with_stack=True)
74
+ profiler.start()
75
+
76
+ for image, target in metric_logger.log_every(data_loader, print_freq, header):
77
+ image, target = image.to(device, non_blocking=True), target.to(device, non_blocking=True)
78
+ dl_ex_start_time = time.time()
79
+ if args.channels_last:
80
+ image = image.contiguous(memory_format=torch.channels_last)
81
+
82
+ with torch.autocast(device_type="hpu", dtype=torch.bfloat16, enabled=args.use_autocast):
83
+ output = model(image)
84
+ loss = criterion(output, target)
85
+ loss = loss / image.shape[0]
86
+ optimizer.zero_grad(set_to_none=True)
87
+
88
+ if apex:
89
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
90
+ scaled_loss.backward()
91
+ else:
92
+ loss.backward()
93
+
94
+ if args.run_lazy_mode and not args.use_torch_compile:
95
+ htcore.mark_step()
96
+
97
+ optimizer.step()
98
+
99
+ if args.run_lazy_mode and not args.use_torch_compile:
100
+ htcore.mark_step()
101
+
102
+ if step_count % print_freq == 0:
103
+ output_cpu = output.detach().to('cpu')
104
+ acc1, acc5 = utils.accuracy(output_cpu, target, topk=(1, 5))
105
+ batch_size = image.shape[0]
106
+ metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
107
+ metric_logger.meters['acc1'].update(acc1.item(), n=batch_size * print_freq)
108
+ metric_logger.meters['acc5'].update(acc5.item(), n=batch_size * print_freq)
109
+ current_time = time.time()
110
+ last_print_time = dl_ex_start_time if args.dl_time_exclude else last_print_time
111
+ images_processed = batch_size * print_freq if step_count != 0 else batch_size
112
+ metric_logger.meters['img/s'].update(images_processed / (current_time - last_print_time))
113
+ last_print_time = time.time()
114
+
115
+ step_count = step_count + 1
116
+ if profiler is not None:
117
+ profiler.step()
118
+
119
+ if step_count >= args.num_train_steps:
120
+ break
121
+
122
+ if lr_scheduler is not None:
123
+ lr_scheduler.step()
124
+
125
+ if profiler is not None:
126
+ profiler.stop()
127
+
128
+
129
+ def evaluate(model, criterion, data_loader, device, print_freq=100, warmup=False):
130
+ model.eval()
131
+ metric_logger = utils.MetricLogger(delimiter=" ", device=device)
132
+ header = 'Warmup test:' if warmup else 'Test:'
133
+ step_count = 0
134
+ with torch.no_grad():
135
+ for image, target in metric_logger.log_every(data_loader, print_freq, header):
136
+ image = image.to(device, non_blocking=True)
137
+
138
+ if args.channels_last:
139
+ image = image.contiguous(memory_format=torch.channels_last)
140
+
141
+ target = target.to(device, non_blocking=True)
142
+ with torch.autocast(device_type="hpu", dtype=torch.bfloat16, enabled=args.use_autocast):
143
+ output = model(image)
144
+ loss = criterion(output, target)
145
+ loss = loss / image.shape[0]
146
+
147
+ acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
148
+ # FIXME need to take into account that the datasets
149
+ # could have been padded in distributed setup
150
+ batch_size = image.shape[0]
151
+ loss_cpu = loss.to('cpu').detach()
152
+ metric_logger.update(loss=loss_cpu.item())
153
+ metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
154
+ metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
155
+ step_count = step_count + 1
156
+ if step_count >= args.num_eval_steps:
157
+ break
158
+ # gather the stats from all processes
159
+ metric_logger.synchronize_between_processes()
160
+
161
+ # Return from here if evaluation phase does not go through any iterations.(eg, The data set is so small that
162
+ # there is only one eval batch, but that was skipped in data loader due to drop_last=True)
163
+ if len(metric_logger.meters) == 0:
164
+ return
165
+
166
+ print(' * Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}'
167
+ .format(top1=metric_logger.acc1, top5=metric_logger.acc5))
168
+ return metric_logger.acc1.global_avg
169
+
170
+
171
+ def _get_cache_path(filepath):
172
+ import hashlib
173
+ h = hashlib.sha1(filepath.encode()).hexdigest()
174
+ cache_path = os.path.join("~", ".torch", "vision", "datasets", "imagefolder", h[:10] + ".pt")
175
+ cache_path = os.path.expanduser(cache_path)
176
+ return cache_path
177
+
178
+ def warmup(model_for_train, model_for_eval, device, criterion, optimizer, args, data_loader_type, pin_memory_device, pin_memory):
179
+ state_backup = copy.deepcopy(optimizer.optim.state)
180
+ dataset, dataset_test, train_sampler, test_sampler = load_data(f'{args.output_dir}/resnet_synth_data/train', f'{args.output_dir}/resnet_synth_data/val', args=args, synthetic=True)
181
+ data_loader = data_loader_type(
182
+ dataset, args.batch_size, sampler=train_sampler,
183
+ num_workers=args.workers, pin_memory=pin_memory, pin_memory_device=pin_memory_device)
184
+ data_loader_test = data_loader_type(
185
+ dataset_test, args.batch_size, sampler=test_sampler,
186
+ num_workers=args.workers, pin_memory=pin_memory, pin_memory_device=pin_memory_device)
187
+ train_one_epoch(None, model_for_train, criterion, optimizer, data_loader, device, 0,
188
+ 1, args, apex=args.apex, warmup=True)
189
+ evaluate(model_for_eval, criterion, data_loader_test, device, print_freq=1, warmup=True)
190
+ optimizer.zero_grad(True)
191
+ optimizer.optim.state = state_backup
192
+ optimizer.state = optimizer.optim.__getstate__()['state']
193
+
194
+ def load_data(traindir, valdir, args, manifest=None, synthetic=False):
195
+ # Data loading code
196
+ print("Loading data")
197
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
198
+ std=[0.229, 0.224, 0.225])
199
+ dataset_transforms = transforms.Compose([
200
+ transforms.RandomResizedCrop(224),
201
+ transforms.RandomHorizontalFlip(),
202
+ transforms.ToTensor(),
203
+ normalize,
204
+ ])
205
+ dataset_test_transforms = transforms.Compose([
206
+ transforms.Resize(256),
207
+ transforms.CenterCrop(224),
208
+ transforms.ToTensor(),
209
+ normalize,
210
+ ])
211
+ dataset_loader_train = habana_dataloader.habana_dataset.ImageFolderWithManifest if args.dl_worker_type == "HABANA" and not synthetic else torchvision.datasets.ImageFolder
212
+ dataset_loader_eval = torchvision.datasets.ImageFolder
213
+ loader_params = {'root': traindir, 'transform': dataset_transforms}
214
+ loader_test_params = {'root': valdir, 'transform': dataset_test_transforms}
215
+ if args.dl_worker_type == "HABANA" and not synthetic:
216
+ loader_params['manifest'] = manifest
217
+ if (synthetic):
218
+ steps = 4
219
+ size = steps*args.batch_size
220
+ img_shape = (NUM_CHANNELS, DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE)
221
+ all_images_shape = (size, NUM_CHANNELS, DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE)
222
+ chunks = torch.ones(all_images_shape, dtype=torch.uint8).chunk(size)
223
+ images = [img.reshape(img_shape) for img in chunks]
224
+ for i, image in enumerate(images):
225
+ if (i % args.batch_size == 0):
226
+ batch_class_name = uuid.uuid4()
227
+ utils.mkdir(f'{traindir}/{batch_class_name}')
228
+ utils.mkdir(f'{valdir}/{batch_class_name}')
229
+ torchvision.io.write_jpeg(image, f'{traindir}/{batch_class_name}/{i}.JPEG')
230
+ torchvision.io.write_jpeg(image, f'{valdir}/{batch_class_name}/{i}.JPEG')
231
+
232
+ print("Loading training data")
233
+ st = time.time()
234
+ cache_path = _get_cache_path(traindir)
235
+ cache_dataset = args.cache_dataset and not synthetic
236
+ if cache_dataset and os.path.exists(cache_path):
237
+ # Attention, as the transforms are also cached!
238
+ print("Loading dataset_train from {}".format(cache_path))
239
+ dataset, _ = torch.load(cache_path)
240
+ else:
241
+ dataset = dataset_loader_train(**loader_params)
242
+ if cache_dataset:
243
+ print("Saving dataset_train to {}".format(cache_path))
244
+ utils.mkdir(os.path.dirname(cache_path))
245
+ utils.save_on_master((dataset, traindir), cache_path)
246
+ print("Took", time.time() - st)
247
+
248
+ print("Loading validation data")
249
+ cache_path = _get_cache_path(valdir)
250
+ if cache_dataset and os.path.exists(cache_path):
251
+ # Attention, as the transforms are also cached!
252
+ print("Loading dataset_test from {}".format(cache_path))
253
+ dataset_test, _ = torch.load(cache_path)
254
+ else:
255
+ dataset_test = dataset_loader_eval(**loader_test_params)
256
+ if cache_dataset:
257
+ print("Saving dataset_test to {}".format(cache_path))
258
+ utils.mkdir(os.path.dirname(cache_path))
259
+ utils.save_on_master((dataset_test, valdir), cache_path)
260
+
261
+ print("Creating data loaders")
262
+ if args.distributed:
263
+ train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
264
+ test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
265
+ else:
266
+ train_sampler = torch.utils.data.RandomSampler(dataset)
267
+ test_sampler = torch.utils.data.SequentialSampler(dataset_test)
268
+
269
+ return dataset, dataset_test, train_sampler, test_sampler
270
+
271
+
272
+ def lr_vec_fcn(values, milestones):
273
+ lr_vec = []
274
+ for n in range(len(milestones) - 1):
275
+ lr_vec += [values[n]] * (milestones[n + 1] - milestones[n])
276
+ return lr_vec
277
+
278
+
279
+ def adjust_learning_rate(optimizer, epoch, lr_vec):
280
+ """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
281
+ lr = lr_vec[epoch]
282
+ for param_group in optimizer.param_groups:
283
+ param_group['lr'] = lr
284
+
285
+
286
+ def main(args):
287
+
288
+ if args.eval_offset_epochs < 0:
289
+ assert False, "Eval offset has to be 0 or bigger"
290
+
291
+ if args.epochs_between_evals < 1:
292
+ assert False, "Epochs between evaluations has to be 1 or bigger"
293
+
294
+ if args.dl_worker_type == "MP":
295
+ try:
296
+ # Default 'fork' doesn't work with synapse. Use 'forkserver' or 'spawn'
297
+ torch.multiprocessing.set_start_method('spawn')
298
+ except RuntimeError:
299
+ pass
300
+ elif args.dl_worker_type == "HABANA":
301
+ try:
302
+ import habana_dataloader
303
+ except ImportError:
304
+ assert False, "Could Not import habana dataloader package"
305
+
306
+ if args.device == 'hpu':
307
+ if args.run_lazy_mode:
308
+ assert os.getenv('PT_HPU_LAZY_MODE') == '1', f"run-lazy-mode == True, but PT_HPU_LAZY_MODE={os.getenv('PT_HPU_LAZY_MODE')}"
309
+ else:
310
+ assert os.getenv('PT_HPU_LAZY_MODE') == '0' or os.getenv('PT_HPU_LAZY_MODE')== '2', f"args.use_lazy_mode == False, but PT_HPU_LAZY_MODE={os.getenv('PT_HPU_LAZY_MODE')}"
311
+
312
+ if args.run_lazy_mode and not args.use_torch_compile:
313
+ try:
314
+ import habana_frameworks.torch.hpu as hthpu
315
+ hthpu.enable_dynamic_shape()
316
+ except ImportError:
317
+ logger.info("habana_frameworks could not be loaded")
318
+
319
+ if args.apex:
320
+ if sys.version_info < (3, 0):
321
+ raise RuntimeError(
322
+ "Apex currently only supports Python 3. Aborting.")
323
+ if amp is None:
324
+ raise RuntimeError("Failed to import apex. Please install apex from https://www.github.com/nvidia/apex "
325
+ "to enable mixed-precision training.")
326
+
327
+ utils.init_distributed_mode(args)
328
+ print(args)
329
+
330
+ synth_data_dir = (args.output_dir if args.output_dir else '/tmp') + '/resnet_synth_data'
331
+
332
+ if utils.get_rank() == 0:
333
+ if args.output_dir:
334
+ utils.mkdir(args.output_dir)
335
+ if args.log_dir:
336
+ utils.mkdir(args.log_dir)
337
+
338
+ try:
339
+ shutil.rmtree(synth_data_dir)
340
+ except:
341
+ pass
342
+
343
+ utils.mkdir(f'{synth_data_dir}')
344
+ utils.mkdir(f'{synth_data_dir}/train')
345
+ utils.mkdir(f'{synth_data_dir}/val')
346
+
347
+ if utils.get_world_size() > 1:
348
+ utils.barrier()
349
+
350
+ mlperf_mlloger, mlperf_mllog = get_mllog_mlloger(args.log_dir if args.log_dir else args.output_dir)
351
+ mlperf_mlloger.event(key=mlperf_mllog.constants.CACHE_CLEAR, value=True)
352
+ mlperf_mlloger.start(key=mlperf_mllog.constants.INIT_START, value=None)
353
+ mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_BENCHMARK, value=mlperf_mllog.constants.RESNET)
354
+ mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_ORG, value='Habana')
355
+ mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_DIVISION, value='closed')
356
+ mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_PLATFORM, value='gaudi-{}'.format(args.num_gpus))
357
+ mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_STATUS, value='onprem')
358
+
359
+ device = torch.device(args.device)
360
+
361
+ torch.backends.cudnn.benchmark = True
362
+
363
+ if args.device == 'hpu' and utils.get_world_size() > 0:
364
+ # patch torch cuda functions that are being unconditionally invoked
365
+ # in the multiprocessing data loader
366
+ torch.cuda.current_device = lambda: None
367
+ torch.cuda.set_device = lambda x: None
368
+
369
+ if args.dl_worker_type == "MP":
370
+ data_loader_type = torch.utils.data.DataLoader
371
+ elif args.dl_worker_type == "HABANA":
372
+ data_loader_type = habana_dataloader.HabanaDataLoader
373
+
374
+ pin_memory_device = None
375
+ pin_memory = False
376
+ if args.device == 'cuda' or args.device == 'hpu':
377
+ pin_memory_device = args.device
378
+ pin_memory = True
379
+
380
+ print("Creating model")
381
+ # Import only resnext101_32x4d from a local copy since torchvision
382
+ # package doesn't support resnext101_32x4d variant
383
+ if 'resnext101_32x4d' in args.model:
384
+ model = resnet_models.__dict__[args.model](pretrained=args.pretrained)
385
+ else:
386
+ model = torchvision.models.__dict__[
387
+ args.model](pretrained=args.pretrained)
388
+ model.to(device)
389
+ if args.device=='hpu' and args.run_lazy_mode and args.hpu_graphs:
390
+ import habana_frameworks.torch.hpu.graphs as htgraphs
391
+ htgraphs.ModuleCacher()(model, have_grad_accumulation=True)
392
+ if args.channels_last:
393
+ if(device == torch.device('cuda')):
394
+ print('Converting model to channels_last format on CUDA')
395
+ model.to(memory_format=torch.channels_last)
396
+ elif(args.device == 'hpu'):
397
+ print('Converting model params to channels_last format on Habana')
398
+ # TODO:
399
+ # model.to(device).to(memory_format=torch.channels_last)
400
+ # The above model conversion doesn't change the model params
401
+ # to channels_last for many components - e.g. convolution.
402
+ # So we are forced to rearrange such tensors ourselves.
403
+
404
+ if args.distributed and args.sync_bn:
405
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
406
+
407
+ criterion = nn.CrossEntropyLoss(label_smoothing=args.label_smoothing, reduction='sum')
408
+
409
+ print("************* Running FusedResourceApplyMomentum optimizer ************")
410
+ from habana_frameworks.torch.hpex.optimizers import FusedResourceApplyMomentum
411
+ optimizer = FusedResourceApplyMomentum(
412
+ model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
413
+
414
+ mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_OPT_WEIGHT_DECAY, value=args.weight_decay)
415
+ mlperf_mlloger.event(key='lars_opt_momentum', value=args.momentum)
416
+
417
+ skip_list = ['batch_normalization', 'bias', 'bn', 'downsample.1']
418
+ skip_mask = []
419
+ for n_param, param in zip(model.named_parameters(), model.parameters()):
420
+ assert n_param[1].shape == param.shape
421
+ skip_mask.append(not any(v in n_param[0] for v in skip_list))
422
+
423
+ mlperf_variable_map = get_mlperf_variable_map()
424
+ for model_weight, param in model.named_parameters():
425
+ mlperf_mlloger.event(key=mlperf_mllog.constants.WEIGHTS_INITIALIZATION, metadata={'tensor': mlperf_variable_map[model_weight]})
426
+
427
+ print("************* Running FusedLARS optimizer ************")
428
+ from habana_frameworks.torch.hpex.optimizers import FusedLars
429
+ optimizer = FusedLars(optimizer, skip_mask, eps=0.0)
430
+
431
+ mlperf_mlloger.event(key=mlperf_mllog.constants.OPT_NAME, value='lars')
432
+ mlperf_mlloger.event(key=mlperf_mllog.constants.LARS_EPSILON, value=0.0)
433
+
434
+ if args.apex:
435
+ model, optimizer = amp.initialize(model, optimizer,
436
+ opt_level=args.apex_opt_level
437
+ )
438
+ NUM_IMAGES = {
439
+ 'train': 1281167,
440
+ 'validation': 50000,
441
+ }
442
+ steps_per_epoch = ceil(NUM_IMAGES['train'] / utils.get_world_size() / args.batch_size)
443
+ steps_per_eval = ceil(NUM_IMAGES['validation'] / utils.get_world_size() / args.batch_size)
444
+ train_print_freq = min(args.print_freq, steps_per_epoch - 1)
445
+ eval_print_freq = min(args.print_freq, steps_per_eval - 1)
446
+
447
+ print("************* PolynomialDecayWithWarmup ************")
448
+ from model.optimizer import PolynomialDecayWithWarmup
449
+ train_steps = steps_per_epoch * args.epochs
450
+ lr_scheduler = PolynomialDecayWithWarmup(optimizer,
451
+ batch_size=args.batch_size,
452
+ steps_per_epoch=steps_per_epoch,
453
+ train_steps=train_steps,
454
+ initial_learning_rate=args.base_learning_rate,
455
+ warmup_epochs=args.warmup_epochs,
456
+ end_learning_rate=args.end_learning_rate,
457
+ power=2.0,
458
+ lars_decay_epochs=args.lars_decay_epochs,
459
+ mlperf_mllog=mlperf_mllog,
460
+ mlperf_mlloger=mlperf_mlloger,
461
+ opt_name='lars')
462
+
463
+ model_for_eval = model
464
+
465
+ # TBD: pass the right module for ddp
466
+ model_without_ddp = model
467
+
468
+ if args.distributed:
469
+ if args.device == 'hpu':
470
+ # To improve resnext101 dist performance,
471
+ # decrease number of all_reduce calls to 1 by increasing bucket size to 200
472
+ bucket_size_mb = 200
473
+ is_grad_view = True
474
+ model = torch.nn.parallel.DistributedDataParallel(model,
475
+ bucket_cap_mb=bucket_size_mb,
476
+ broadcast_buffers=False,
477
+ gradient_as_bucket_view=is_grad_view)
478
+ else:
479
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
480
+ model_without_ddp = model.module
481
+
482
+ model_for_train = model
483
+
484
+ if args.resume:
485
+ checkpoint = torch.load(args.resume, map_location='cpu')
486
+ model_without_ddp.load_state_dict(checkpoint['model'])
487
+ optimizer.load_state_dict(checkpoint['optimizer'])
488
+ if lr_scheduler is not None:
489
+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
490
+
491
+ args.start_epoch = checkpoint['epoch'] + 1
492
+
493
+ if args.test_only:
494
+ evaluate(model_for_eval, criterion, data_loader_test, device=device,
495
+ print_freq=eval_print_freq)
496
+ return
497
+
498
+ if (args.enable_warmup):
499
+ warmup_start_time = time.time()
500
+ print("Start warmup")
501
+ warmup(model_for_train, model_for_eval, device, criterion, optimizer, args, data_loader_type, pin_memory_device, pin_memory)
502
+ warmup_total_time = time.time() - warmup_start_time
503
+ warmup_total_time_str = str(datetime.timedelta(seconds=int(warmup_total_time)))
504
+ print(f'Warmup time {warmup_total_time_str}')
505
+
506
+ mlperf_mlloger.event(key=mlperf_mllog.constants.GLOBAL_BATCH_SIZE, value=args.batch_size*utils.get_world_size())
507
+ mlperf_mlloger.event(key=mlperf_mllog.constants.TRAIN_SAMPLES, value=NUM_IMAGES['train'])
508
+ mlperf_mlloger.event(key=mlperf_mllog.constants.EVAL_SAMPLES, value=NUM_IMAGES['validation'])
509
+ group_batch_norm = 1
510
+ mlperf_mlloger.event(key=mlperf_mllog.constants.MODEL_BN_SPAN, value= args.batch_size * group_batch_norm)
511
+ mlperf_mlloger.event(key=mlperf_mllog.constants.GRADIENT_ACCUMULATION_STEPS, value=args.num_acc_steps)
512
+
513
+ next_eval_epoch = args.eval_offset_epochs - 1 + args.start_epoch
514
+ if next_eval_epoch < 0:
515
+ next_eval_epoch += args.epochs_between_evals
516
+
517
+ train_dir = os.path.join(args.data_path, 'train')
518
+ val_dir = os.path.join(args.data_path, 'val')
519
+
520
+ if utils.get_rank() == 0:
521
+ try:
522
+ shutil.rmtree(synth_data_dir)
523
+ except:
524
+ pass
525
+
526
+ dataset_manifest = prepare_dataset_manifest(args)
527
+
528
+ print("Start training")
529
+ top1_acc = 0
530
+ start_time = time.time()
531
+ mlperf_mlloger.end(key=mlperf_mllog.constants.INIT_STOP)
532
+
533
+ if utils.get_world_size() > 1:
534
+ utils.barrier()
535
+
536
+ mlperf_mlloger.start(key=mlperf_mllog.constants.RUN_START)
537
+
538
+ dataset, dataset_test, train_sampler, test_sampler = load_data(train_dir, val_dir, args, dataset_manifest)
539
+ data_loader = data_loader_type(
540
+ dataset, batch_size=args.batch_size, sampler=train_sampler,
541
+ num_workers=args.workers, pin_memory=pin_memory, pin_memory_device=pin_memory_device)
542
+
543
+ data_loader_test = data_loader_type(
544
+ dataset_test, batch_size=args.batch_size, sampler=test_sampler,
545
+ num_workers=args.workers, pin_memory=pin_memory, pin_memory_device=pin_memory_device)
546
+
547
+ if args.use_torch_compile:
548
+ model_for_train = torch.compile(model_for_train, backend="aot_hpu_training_backend")
549
+ model_for_eval = torch.compile(model_for_eval, backend="aot_hpu_training_backend")
550
+
551
+ mlperf_mlloger.start(
552
+ key=mlperf_mllog.constants.BLOCK_START,
553
+ value=None,
554
+ metadata={
555
+ 'first_epoch_num': 1,
556
+ 'epoch_count':
557
+ (args.eval_offset_epochs if args.eval_offset_epochs > 0
558
+ else args.epochs_between_evals)
559
+ })
560
+ for epoch in range(args.start_epoch, args.epochs):
561
+ # Setting epoch is done by Habana dataloader internally
562
+ if args.distributed and args.dl_worker_type != "HABANA":
563
+ train_sampler.set_epoch(epoch)
564
+
565
+ train_one_epoch(lr_scheduler, model_for_train, criterion, optimizer, data_loader,
566
+ device, epoch, print_freq=train_print_freq, args=args, apex=args.apex)
567
+
568
+ if epoch == next_eval_epoch:
569
+ mlperf_mlloger.start(
570
+ key=mlperf_mllog.constants.EVAL_START, value=None, metadata={'epoch_num': epoch + 1})
571
+ top1_acc = evaluate(model_for_eval, criterion, data_loader_test, device=device,
572
+ print_freq=eval_print_freq) / 100.
573
+ mlperf_mlloger.end(
574
+ key=mlperf_mllog.constants.EVAL_STOP, value=None, metadata={'epoch_num': epoch + 1})
575
+ mlperf_mlloger.event(
576
+ key=mlperf_mllog.constants.EVAL_ACCURACY, value=top1_acc, metadata={'epoch_num': epoch + 1})
577
+
578
+ first_epoch_num = max(epoch - args.epochs_between_evals + 1, 0)
579
+ epoch_count = args.epochs_between_evals
580
+ if first_epoch_num == 0:
581
+ epoch_count = args.eval_offset_epochs
582
+ if epoch_count == 0:
583
+ epoch_count = args.epochs_between_evals
584
+ mlperf_mlloger.end(
585
+ key=mlperf_mllog.constants.BLOCK_STOP,
586
+ value=None,
587
+ metadata={
588
+ 'first_epoch_num': first_epoch_num + 1,
589
+ 'epoch_count': epoch_count
590
+ })
591
+
592
+ if top1_acc >= args.target_accuracy:
593
+ break
594
+
595
+ next_eval_epoch += args.epochs_between_evals
596
+
597
+ if next_eval_epoch < args.epochs:
598
+ mlperf_mlloger.start(
599
+ key=mlperf_mllog.constants.BLOCK_START,
600
+ value=None,
601
+ metadata={
602
+ 'first_epoch_num': epoch + 2,
603
+ 'epoch_count': args.epochs_between_evals
604
+ })
605
+
606
+
607
+ if (args.output_dir and args.save_checkpoint):
608
+ if args.device == 'hpu':
609
+ checkpoint = {
610
+ 'model': model_without_ddp.state_dict(),
611
+ 'optimizer': optimizer.state_dict(),
612
+ 'lr_scheduler': None if lr_scheduler is None else lr_scheduler.state_dict(),
613
+ 'epoch': epoch,
614
+ 'args': args}
615
+
616
+ utils.save_on_master(
617
+ checkpoint,
618
+ os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
619
+ utils.save_on_master(
620
+ checkpoint,
621
+ os.path.join(args.output_dir, 'checkpoint.pth'))
622
+
623
+ else:
624
+ checkpoint = {
625
+ 'model': model_without_ddp.state_dict(),
626
+ 'optimizer': optimizer.state_dict(),
627
+ 'lr_scheduler': None if lr_scheduler is None else lr_scheduler.state_dict(),
628
+ 'epoch': epoch,
629
+ 'args': args}
630
+ utils.save_on_master(
631
+ checkpoint,
632
+ os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
633
+ utils.save_on_master(
634
+ checkpoint,
635
+ os.path.join(args.output_dir, 'checkpoint.pth'))
636
+
637
+ if top1_acc >= args.target_accuracy:
638
+ mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP, value=None, metadata={'status': 'success'})
639
+ else:
640
+ mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP, value=None, metadata={'status': 'fail'})
641
+
642
+ total_time = time.time() - start_time
643
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
644
+ print('Training time {}'.format(total_time_str))
645
+
646
+
647
+ def set_env_params():
648
+ os.environ["MAX_WAIT_ATTEMPTS"] = "50"
649
+ os.environ['HCL_CPU_AFFINITY'] = '1'
650
+
651
+
652
+ def prepare_dataset_manifest(args):
653
+ import glob
654
+ import pathlib
655
+
656
+ if args.data_path is not None:
657
+ # get files list
658
+ dataset_dir = os.path.join(args.data_path, 'train')
659
+
660
+ print(f"dataset dir: {dataset_dir}")
661
+ manifest_data = {}
662
+ manifest_data["file_list"] = sorted(
663
+ glob.glob(dataset_dir + "/*/*.{}".format("JPEG")))
664
+
665
+ # get class list
666
+ data_dir = pathlib.Path(dataset_dir)
667
+ manifest_data["class_list"] = sorted(
668
+ [item.name for item in data_dir.glob('*') if item.is_dir() == True])
669
+
670
+ file_sizes = {}
671
+
672
+ for filename in manifest_data["file_list"]:
673
+ file_sizes[filename] = os.stat(filename).st_size
674
+
675
+ manifest_data['file_sizes'] = file_sizes
676
+
677
+ return manifest_data
678
+
679
+
680
+ def parse_args():
681
+ import argparse
682
+ parser = argparse.ArgumentParser(
683
+ description='PyTorch Classification Training')
684
+
685
+ parser.add_argument('--data-path', default='/root/software/lfs/data/data/pytorch/imagenet/ILSVRC2012/',
686
+ help='dataset')
687
+ parser.add_argument('--dl-time-exclude', default='True', type=lambda x: x.lower() == 'true',
688
+ help='Set to False to include data load time')
689
+ parser.add_argument('--model', default='resnet18',
690
+ help='select Resnet models from resnet18, resnet34, resnet50, resnet101, resnet152,'
691
+ 'resnext50_32x4d, resnext101_32x4d, resnext101_32x8d, wide_resnet50_2, wide_resnet101_2')
692
+ parser.add_argument('--device', default='hpu', help='device')
693
+ parser.add_argument('-b', '--batch-size', default=128, type=int)
694
+ parser.add_argument('--epochs', default=90, type=int, metavar='N',
695
+ help='number of total epochs to run')
696
+ parser.add_argument('-ebe', '--epochs_between_evals', default=4, type=int, metavar='N',
697
+ help='number of epochs to be completed before evaluation (default: 4)')
698
+ parser.add_argument('-eoe', '--eval_offset_epochs', default=0, type=int, metavar='N',
699
+ help='offsets the epoch on which the evaluation starts (default: 0)')
700
+ parser.add_argument('--dl-worker-type', default='HABANA', type=lambda x: x.upper(),
701
+ choices=["MP", "HABANA"], help='select multiprocessing or habana accelerated')
702
+ parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
703
+ help='number of data loading workers (default: 10)')
704
+ parser.add_argument('--process-per-node', default=8, type=int, metavar='N',
705
+ help='Number of process per node')
706
+ parser.add_argument('--hls_type', default='HLS2', help='Node type')
707
+ parser.add_argument('--lars_decay_epochs', default='36', type=int, help='number of decay epochs')
708
+ parser.add_argument('--warmup_epochs', default='3', type=int, help='number of warmup epochs')
709
+ parser.add_argument('--base_learning_rate', default='9', type=float, help='base learning rate')
710
+ parser.add_argument('--end_learning_rate', default='0.0001', type=float, help='end learning rate')
711
+ parser.add_argument('--lr', default=0.1, type=float, help='initial learning rate')
712
+ parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
713
+ help='momentum')
714
+ parser.add_argument('--wd', '--weight-decay', default=5e-5, type=float,
715
+ metavar='W', help='weight decay (default: 5e-5)',
716
+ dest='weight_decay')
717
+ parser.add_argument('--lr-step-size', default=30, type=int,
718
+ help='decrease lr every step-size epochs')
719
+ parser.add_argument('--custom-lr-values', default=None, metavar='N', type=float, nargs='+',
720
+ help='custom lr values list')
721
+ parser.add_argument('--custom-lr-milestones', default=None, metavar='N', type=int, nargs='+',
722
+ help='custom lr milestones list')
723
+ parser.add_argument('--lr-gamma', default=0.1, type=float,
724
+ help='decrease lr by a factor of lr-gamma')
725
+ parser.add_argument('--label-smoothing', default=0.1, type=float,
726
+ help='Apply label smoothing to the loss. This applies to'
727
+ 'CrossEntropyLoss, when label_smoothing is greater than 0.')
728
+ parser.add_argument('--print-freq', default=1, type=int, help='print frequency')
729
+ parser.add_argument('--output-dir', default='.', help='path where to save')
730
+ parser.add_argument('--log-dir', default='', help='destination path for mllogs')
731
+ parser.add_argument('--profile-steps', default=None,
732
+ help='Profile steps range separated by comma (e.g. `--profile_steps 100,105`)')
733
+
734
+ parser.add_argument('--channels-last', default='False', type=lambda x: x.lower() == 'true',
735
+ help='Whether input is in channels last format.'
736
+ 'Any value other than True(case insensitive) disables channels-last')
737
+ parser.add_argument('--resume', default='', help='resume from checkpoint')
738
+ parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
739
+ help='start epoch')
740
+ parser.add_argument('--num_acc_steps', type=int, default=1, help='Number of gradient accumulation steps')
741
+ parser.add_argument('--num_gpus', type=int, default=0, help='Number of used gpus to run the model')
742
+ parser.add_argument('--target_accuracy', default=0.759, type=float, help='Quality target of training')
743
+ parser.add_argument(
744
+ "--cache-dataset",
745
+ dest="cache_dataset",
746
+ help="Cache the datasets for quicker initialization. It also serializes the transforms",
747
+ action="store_true",
748
+ )
749
+ parser.add_argument(
750
+ "--use_torch_compile",
751
+ dest="use_torch_compile",
752
+ help="Use torch.compile feature to run the model",
753
+ action="store_true",
754
+ )
755
+ parser.add_argument(
756
+ "--sync-bn",
757
+ dest="sync_bn",
758
+ help="Use sync batch norm",
759
+ action="store_true",
760
+ )
761
+ parser.add_argument(
762
+ "--test-only",
763
+ dest="test_only",
764
+ help="Only test the model",
765
+ action="store_true",
766
+ )
767
+ parser.add_argument(
768
+ "--pretrained",
769
+ dest="pretrained",
770
+ help="Use pre-trained models from the modelzoo",
771
+ action="store_true",
772
+ )
773
+ parser.add_argument(
774
+ "--hpu_graphs",
775
+ dest="hpu_graphs",
776
+ help="Use HPU graphs feature to run the model by default",
777
+ default='True', type=lambda x: x.lower() == 'true',
778
+ )
779
+ parser.add_argument('--enable-warmup', default='True', type=lambda x: x.lower() == 'true',
780
+ help='Whether the warmup is enabled')
781
+
782
+ # Mixed precision training parameters
783
+ parser.add_argument('--apex', action='store_true',
784
+ help='Use apex for mixed precision training')
785
+ parser.add_argument('--apex-opt-level', default='O1', type=str,
786
+ help='For apex mixed precision training'
787
+ 'O0 for FP32 training, O1 for mixed precision training.'
788
+ 'For further detail, see https://github.com/NVIDIA/apex/tree/master/examples/imagenet'
789
+ )
790
+
791
+ # distributed training parameters
792
+ parser.add_argument('--world-size', default=1, type=int,
793
+ help='number of distributed processes')
794
+ parser.add_argument('--dist-url', default='env://',
795
+ help='url used to set up distributed training')
796
+ parser.add_argument('--num-train-steps', type=int, default=sys.maxsize, metavar='T',
797
+ help='number of steps a.k.a iterations to run in training phase')
798
+ parser.add_argument('--num-eval-steps', type=int, default=sys.maxsize, metavar='E',
799
+ help='number of steps a.k.a iterations to run in evaluation phase')
800
+ parser.add_argument('--save-checkpoint', action="store_true",
801
+ help='Whether or not to save model/checkpont; True: to save, False to avoid saving')
802
+ parser.add_argument('--run-lazy-mode', default='True', type=lambda x: x.lower() == 'true',
803
+ help='run model in lazy execution mode(enabled by default).'
804
+ 'Any value other than True(case insensitive) disables lazy mode')
805
+ parser.add_argument('--use_autocast', action='store_true', help='enable autocast')
806
+
807
+ args = parser.parse_args()
808
+
809
+ return args
810
+
811
+
812
+ if __name__ == "__main__":
813
+ set_env_params()
814
+ args = parse_args()
815
+ main(args)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/PyTorch/utils.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, Habana Labs Ltd. All rights reserved.
2
+
3
+
4
+ from __future__ import print_function
5
+ from collections import defaultdict, deque
6
+ import datetime
7
+ import time
8
+ import torch
9
+ import torch.distributed as dist
10
+ import errno
11
+ import os
12
+ mpi_comm = None
13
+
14
+
15
+ class SmoothedValue(object):
16
+ """Track a series of values and provide access to smoothed values over a
17
+ window or the global series average.
18
+ """
19
+
20
+ def __init__(self, window_size=20, fmt=None):
21
+ if fmt is None:
22
+ fmt = "{median:.4f} ({global_avg:.4f})"
23
+ self.deque = deque(maxlen=window_size)
24
+ self.total = 0.0
25
+ self.count = 0
26
+ self.fmt = fmt
27
+
28
+ def update(self, value, n=1):
29
+ self.deque.append(value)
30
+ self.count += n
31
+ self.total += value * n
32
+
33
+ def synchronize_between_processes(self, device):
34
+ """
35
+ Warning: does not synchronize the deque!
36
+ """
37
+ if not is_dist_avail_and_initialized():
38
+ return
39
+ if device.type == 'hpu':
40
+ t = torch.tensor([self.count, self.total], dtype=torch.float32).to('hpu')
41
+ else:
42
+ t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
43
+ dist.barrier()
44
+
45
+ dist.all_reduce(t)
46
+ t = t.tolist()
47
+ self.count = int(t[0])
48
+ self.total = t[1]
49
+
50
+ @property
51
+ def median(self):
52
+ d = torch.tensor(list(self.deque))
53
+ return d.median().item()
54
+
55
+ @property
56
+ def avg(self):
57
+ d = torch.tensor(list(self.deque), dtype=torch.float32)
58
+ return d.mean().item()
59
+
60
+ @property
61
+ def global_avg(self):
62
+ return self.total / self.count
63
+
64
+ @property
65
+ def max(self):
66
+ return max(self.deque)
67
+
68
+ @property
69
+ def value(self):
70
+ return self.deque[-1]
71
+
72
+ def __str__(self):
73
+ return self.fmt.format(
74
+ median=self.median,
75
+ avg=self.avg,
76
+ global_avg=self.global_avg,
77
+ max=self.max,
78
+ value=self.value)
79
+
80
+
81
+ class MetricLogger(object):
82
+ def __init__(self, delimiter="\t", device=torch.device('cuda')):
83
+ self.meters = defaultdict(SmoothedValue)
84
+ self.delimiter = delimiter
85
+ self.device = device
86
+
87
+ def update(self, **kwargs):
88
+ for k, v in kwargs.items():
89
+ if isinstance(v, torch.Tensor):
90
+ v = v.item()
91
+ assert isinstance(v, (float, int))
92
+ self.meters[k].update(v)
93
+
94
+ def __getattr__(self, attr):
95
+ if attr in self.meters:
96
+ return self.meters[attr]
97
+ if attr in self.__dict__:
98
+ return self.__dict__[attr]
99
+ raise AttributeError("'{}' object has no attribute '{}'".format(
100
+ type(self).__name__, attr))
101
+
102
+ def __str__(self):
103
+ loss_str = []
104
+ for name, meter in self.meters.items():
105
+ loss_str.append(
106
+ "{}: {}".format(name, str(meter))
107
+ )
108
+ return self.delimiter.join(loss_str)
109
+
110
+ def synchronize_between_processes(self):
111
+ for meter in self.meters.values():
112
+ meter.synchronize_between_processes(self.device)
113
+
114
+ def add_meter(self, name, meter):
115
+ self.meters[name] = meter
116
+
117
+ def log_every(self, iterable, print_freq, header=None):
118
+ i = 0
119
+ if not header:
120
+ header = ''
121
+ start_time = time.time()
122
+ end = time.time()
123
+ iter_time = SmoothedValue(fmt='{avg:.4f}')
124
+ data_time = SmoothedValue(fmt='{avg:.4f}')
125
+ space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
126
+ if torch.cuda.is_available():
127
+ log_msg = self.delimiter.join([
128
+ header,
129
+ '[{0' + space_fmt + '}/{1}]',
130
+ 'eta: {eta}',
131
+ '{meters}',
132
+ 'time: {time}',
133
+ 'data: {data}',
134
+ 'max mem: {memory:.0f}'
135
+ ])
136
+ else:
137
+ log_msg = self.delimiter.join([
138
+ header,
139
+ '[{0' + space_fmt + '}/{1}]',
140
+ 'eta: {eta}',
141
+ '{meters}',
142
+ 'time: {time}',
143
+ 'data: {data}'
144
+ ])
145
+ MB = 1024.0 * 1024.0
146
+ for obj in iterable:
147
+ data_time.update(time.time() - end)
148
+ yield obj
149
+ iter_time.update(time.time() - end)
150
+ if i % print_freq == 0:
151
+ eta_seconds = iter_time.global_avg * (len(iterable) - i)
152
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
153
+ if torch.cuda.is_available():
154
+ print(log_msg.format(
155
+ i, len(iterable), eta=eta_string,
156
+ meters=str(self),
157
+ time=str(iter_time), data=str(data_time),
158
+ memory=torch.cuda.max_memory_allocated() / MB))
159
+ else:
160
+ print(log_msg.format(
161
+ i, len(iterable), eta=eta_string,
162
+ meters=str(self),
163
+ time=str(iter_time), data=str(data_time)))
164
+ i += 1
165
+ end = time.time()
166
+ total_time = time.time() - start_time
167
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
168
+ print('{} Total time: {}'.format(header, total_time_str))
169
+
170
+
171
+ # Modified version of accuracy. target and pred tensors are pytorch Long
172
+ # which is not supported by habana kernels yet. So fall back to CPU for
173
+ # ops involving these(and remain on CPU since this is the last oprton of
174
+ # iteration and we need the accuracy values to be printed out on host)
175
+ def accuracy(output, target, topk=(1,)):
176
+ """Computes the accuracy over the k top predictions for the specified values of k"""
177
+ with torch.no_grad():
178
+ maxk = max(topk)
179
+ batch_size = target.size(0)
180
+
181
+ _, pred = output.topk(maxk, 1, True, True)
182
+
183
+ pred = pred.t()
184
+ pred_cpu = torch.tensor(pred, device='cpu')
185
+ target_cpu = torch.tensor(target, device='cpu')
186
+
187
+ correct = pred_cpu.eq(target_cpu[None])
188
+
189
+ res = []
190
+ for k in topk:
191
+ correct_k = correct[:k].flatten().sum(dtype=torch.float32)
192
+ res.append(correct_k * (100.0 / batch_size))
193
+ return res
194
+
195
+
196
+ def mkdir(path):
197
+ try:
198
+ os.makedirs(path)
199
+ except OSError as e:
200
+ if e.errno != errno.EEXIST:
201
+ raise
202
+
203
+
204
+ def setup_for_distributed(is_master):
205
+ """
206
+ This function disables printing when not in master process
207
+ """
208
+ import builtins as __builtin__
209
+ builtin_print = __builtin__.print
210
+
211
+ def print(*args, **kwargs):
212
+ force = kwargs.pop('force', False)
213
+ if is_master or force:
214
+ builtin_print(*args, **kwargs)
215
+
216
+ __builtin__.print = print
217
+
218
+
219
+ def is_dist_avail_and_initialized():
220
+ if not dist.is_available():
221
+ return False
222
+ if not dist.is_initialized():
223
+ return False
224
+ return True
225
+
226
+
227
+ def get_world_size():
228
+ if not is_dist_avail_and_initialized():
229
+ return 1
230
+ return dist.get_world_size()
231
+
232
+
233
+ def get_rank():
234
+ if not is_dist_avail_and_initialized():
235
+ return 0
236
+ return dist.get_rank()
237
+
238
+
239
+ def is_main_process():
240
+ return get_rank() == 0
241
+
242
+
243
+ def save_on_master(*args, **kwargs):
244
+ if is_main_process():
245
+ torch.save(*args, **kwargs)
246
+
247
+
248
+ def barrier():
249
+ dist.barrier()
250
+
251
+
252
+ def init_distributed_mode(args):
253
+ from habana_frameworks.torch.distributed.hccl import initialize_distributed_hpu
254
+ args.world_size, args.rank, args.local_rank = initialize_distributed_hpu()
255
+ if args.world_size == 1:
256
+ args.distributed = False
257
+ return
258
+
259
+ args.distributed = True
260
+ print('| distributed init (rank {}): {}'.format(
261
+ args.rank, args.dist_url), flush=True)
262
+
263
+ if args.device == 'hpu':
264
+ args.dist_backend = 'hccl'
265
+ dist._DEFAULT_FIRST_BUCKET_BYTES = 200 * 1024 * 1024 # 200MB
266
+ dist.init_process_group(args.dist_backend, rank=args.rank, world_size=args.world_size)
267
+ else:
268
+ torch.cuda.set_device(args.gpu)
269
+ args.dist_backend = 'nccl'
270
+ torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
271
+ world_size=args.world_size, rank=args.rank)
272
+
273
+ setup_for_distributed(args.rank == 0)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/batch_256.cfg ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # general param
4
+ export NUM_WORKERS_PER_HLS=8
5
+ export EVAL_OFFSET_EPOCHS=3
6
+ export EPOCHS_BETWEEN_EVALS=4
7
+ export DISPLAY_STEPS=1000
8
+
9
+ # hp param
10
+ export NUM_WORKERS=8
11
+ export BATCH_SIZE=256
12
+ export TRAIN_EPOCHS=35
13
+ export LARS_DECAY_EPOCHS=36
14
+ export WARMUP_EPOCHS=3
15
+ export BASE_LEARNING_RATE=9
16
+ export END_LEARNING_RATE=0.0001
17
+ export WEIGHT_DECAY=0.00005
18
+ export LR_MOMENTUM=0.9
19
+ export LABEL_SMOOTH=0.1
20
+ export STOP_THRESHOLD=0.759
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/HLS-Gaudi2-PT/launch_resnet.sh ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ function print_synopsis()
4
+ {
5
+ cat << EOF
6
+ NAME
7
+ `basename $0`
8
+
9
+ SYNOPSIS
10
+ `basename $0` [-c <config>] [-ld <log-dir>] [-wd <work-dir>] [-dd <data-dir>] [-h]
11
+
12
+ DESCRIPTION
13
+ Runs 8-gaudi local MLPerf Resnet training on PyTorch.
14
+
15
+ -c <config-file>, --config <config-file>
16
+ configuration file containing series of "export VAR_NAME=value" commands
17
+ overrides default settings for Resnet training
18
+
19
+ -ld <log-dir>, --log-dir <log-dir>
20
+ specify the loggin directory, used to store mllogs and outputs from all mpi processes
21
+
22
+ -wd <work-dir>, --work-dir <work-dir>
23
+ specify the work directory, used to store temporary files during the training
24
+
25
+ -dd <data-dir>
26
+ specify the data directory, containing the ImageNet dataset
27
+
28
+ -ut <bool>, --use-torch-compile <bool>
29
+ turn on the torch compile, default is false
30
+
31
+ -h, --help
32
+ print this help message
33
+
34
+ EXAMPLES
35
+ `basename $0` -wd /data/imagenet
36
+ MLPerf Resnet training on dataset stored in /data/imagenet
37
+
38
+ EOF
39
+ }
40
+
41
+ function parse_config()
42
+ {
43
+ while [ -n "$1" ]; do
44
+ case "$1" in
45
+ -c | --config )
46
+ CONFIG_FILE=$2
47
+ if [[ -f ${CONFIG_FILE} ]]; then
48
+ source $CONFIG_FILE
49
+ return
50
+ else
51
+ echo "Could not find ${CONFIG_FILE}"
52
+ exit 1
53
+ fi
54
+ ;;
55
+ * )
56
+ shift
57
+ ;;
58
+ esac
59
+ done
60
+ }
61
+
62
+ function parse_args()
63
+ {
64
+ while [ -n "$1" ]; do
65
+ case "$1" in
66
+ -c | --config )
67
+ shift 2
68
+ ;;
69
+ -ld | --log-dir )
70
+ LOG_DIR=$2
71
+ shift 2
72
+ ;;
73
+ -wd | --work-dir )
74
+ WORK_DIR=$2
75
+ shift 2
76
+ ;;
77
+ -dd | --data-dir )
78
+ DATA_DIR=$2
79
+ shift 2
80
+ ;;
81
+ -ut | --use-torch-compile )
82
+ USE_TORCH_COMPILE=$2
83
+ shift 2
84
+ ;;
85
+ -h | --help )
86
+ print_synopsis
87
+ exit 0
88
+ ;;
89
+ * )
90
+ echo "error: invalid parameter: $1"
91
+ print_synopsis
92
+ exit 1
93
+ ;;
94
+ esac
95
+ done
96
+ }
97
+
98
+ # Default setting for Pytorch Resnet trainig
99
+
100
+ NUM_WORKERS_PER_HLS=8
101
+ EVAL_OFFSET_EPOCHS=3
102
+ EPOCHS_BETWEEN_EVALS=4
103
+ DISPLAY_STEPS=1000
104
+
105
+ NUM_WORKERS=8
106
+ BATCH_SIZE=256
107
+ TRAIN_EPOCHS=35
108
+ LARS_DECAY_EPOCHS=36
109
+ WARMUP_EPOCHS=3
110
+ BASE_LEARNING_RATE=9
111
+ END_LEARNING_RATE=0.0001
112
+ WEIGHT_DECAY=0.00005
113
+ LR_MOMENTUM=0.9
114
+ LABEL_SMOOTH=0.1
115
+ STOP_THRESHOLD=0.759
116
+ USE_TORCH_COMPILE=false
117
+
118
+ DATA_DIR=/mnt/weka/data/pytorch/imagenet/ILSVRC2012/
119
+
120
+ WORK_DIR=/tmp/resnet50
121
+ LOG_DIR=/tmp/resnet_log
122
+ SCRIPT_DIR=$(dirname "${BASH_SOURCE[0]}")
123
+
124
+ # Default MPI settings
125
+ MPI_HOSTS=localhost:8
126
+ MPI_OUTPUT=/tmp/resnet_log
127
+ MPI_PATH=/opt/amazon/openmpi
128
+ SSH_PORT=3022
129
+
130
+ # MASTER_ADDR and MASTER_PORT are consumed by PyTorch c10d to establish a distributed group
131
+ export MASTER_ADDR=${MASTER_ADDR:-127.0.0.1}
132
+ export MASTER_PORT=${MASTER_PORT:-12345}
133
+
134
+ # apply optional config, overwriting default settings
135
+ parse_config "$@"
136
+
137
+ # optional command line arguments overwrite both default and config settings
138
+ parse_args "$@"
139
+
140
+ # Use torch compile
141
+ if [ "$USE_TORCH_COMPILE" == "true" ]; then
142
+ echo "torch.compile enabled"
143
+ TOCH_COMPILE_FLAGS="--use_torch_compile --run-lazy-mode false"
144
+ else
145
+ TORCH_COMPILE_FLAGS=""
146
+ fi
147
+
148
+ # Clear caches
149
+ PROC_FS=${PROC_FS:-"/proc"}
150
+ sync && echo 3 > $PROC_FS/sys/vm/drop_caches
151
+
152
+ # determine the number of available cores for each process
153
+ MPI_MAP_BY_PE=`lscpu | grep "^CPU(s):"| awk -v NUM=${NUM_WORKERS_PER_HLS} '{print int($2/NUM/2)}'`
154
+
155
+ # prepare directories
156
+ rm -rf $LOG_DIR
157
+ mkdir -p $WORK_DIR
158
+ mkdir -p $LOG_DIR
159
+
160
+ # run Pytorch Resnet training
161
+ mpirun \
162
+ --allow-run-as-root \
163
+ --np $NUM_WORKERS \
164
+ --bind-to core \
165
+ --rank-by core \
166
+ --map-by socket:PE=$MPI_MAP_BY_PE \
167
+ -H $MPI_HOSTS \
168
+ --report-bindings \
169
+ --tag-output \
170
+ --merge-stderr-to-stdout \
171
+ --output-filename $LOG_DIR \
172
+ --prefix $MPI_PATH \
173
+ -x PT_HPU_AUTOCAST_LOWER_PRECISION_OPS_LIST=$SCRIPT_DIR/PyTorch/ops_bf16_Resnet.txt \
174
+ -x PT_HPU_AUTOCAST_FP32_OPS_LIST=$SCRIPT_DIR/PyTorch/ops_fp32_Resnet.txt \
175
+ python3 $SCRIPT_DIR/PyTorch/train.py \
176
+ --model resnet50 \
177
+ --device hpu \
178
+ --print-freq $DISPLAY_STEPS \
179
+ --channels-last False \
180
+ --dl-time-exclude False \
181
+ --output-dir $WORK_DIR \
182
+ --log-dir $LOG_DIR \
183
+ --data-path $DATA_DIR \
184
+ --eval_offset_epochs $EVAL_OFFSET_EPOCHS \
185
+ --epochs_between_evals $EPOCHS_BETWEEN_EVALS \
186
+ --workers $NUM_WORKERS_PER_HLS \
187
+ --batch-size $BATCH_SIZE \
188
+ --epochs $TRAIN_EPOCHS \
189
+ --lars_decay_epochs $LARS_DECAY_EPOCHS \
190
+ --warmup_epochs $WARMUP_EPOCHS \
191
+ --base_learning_rate $BASE_LEARNING_RATE \
192
+ --end_learning_rate $END_LEARNING_RATE \
193
+ --weight-decay $WEIGHT_DECAY \
194
+ --momentum $LR_MOMENTUM \
195
+ --label-smoothing $LABEL_SMOOTH \
196
+ --target_accuracy $STOP_THRESHOLD \
197
+ --use_autocast \
198
+ $TOCH_COMPILE_FLAGS \
199
+ --dl-worker-type HABANA
200
+
201
+ # finalize LOG_DIR folder
202
+ chmod -R 777 ${LOG_DIR}
203
+ exit 0
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/mlp_log.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 MLBenchmark Group. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Convenience function for logging compliance tags to stdout.
16
+ """
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+
23
+ import inspect
24
+ import json
25
+ import logging
26
+ import os
27
+ import re
28
+ import sys
29
+ import time
30
+
31
+ try:
32
+ import horovod.tensorflow as hvd
33
+ except ImportError:
34
+ hvd = None
35
+
36
+ def get_mllog_mlloger(output_dir=None):
37
+ from mlperf_logging import mllog
38
+
39
+ if hvd is not None and hvd.is_initialized():
40
+ str_hvd_rank = str(hvd.rank())
41
+ else:
42
+ str_hvd_rank = "0"
43
+ mllogger = mllog.get_mllogger()
44
+ mllogger.propagate = False
45
+ mllog.propagate=False
46
+ if output_dir is None: output_dir='./log'
47
+ filenames = os.path.normpath(output_dir) + "/result_rank_" + str_hvd_rank + ".txt"
48
+ mllog.config(filename=filenames)
49
+ workername = "worker" + str_hvd_rank
50
+ mllog.config(
51
+ default_namespace = workername,
52
+ default_stack_offset = 1,
53
+ default_clear_line = False,
54
+ root_dir = os.path.normpath(
55
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")))
56
+
57
+ return mllogger, mllog
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ absl_py==1.0.0
2
+ cloudpickle==1.6.0
3
+ psutil==5.8.0
4
+ PyYAML==6.0.0
5
+ requests==2.25.1
6
+ tensorflow_model_optimization==0.7.2
7
+ git+https://github.com/mlperf/[email protected]
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/resnet_model.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """ResNet50 model for Keras.
16
+ Adapted from tf.keras.applications.resnet50.ResNet50().
17
+ This is ResNet model version 1.5.
18
+ Related papers/blogs:
19
+ - https://arxiv.org/abs/1512.03385
20
+ - https://arxiv.org/pdf/1603.05027v2.pdf
21
+ - http://torch.ch/blog/2016/02/04/resnets.html
22
+ """
23
+ from __future__ import absolute_import
24
+ from __future__ import division
25
+ from __future__ import print_function
26
+
27
+ from absl import flags
28
+ import tensorflow as tf
29
+ from TensorFlow.computer_vision.common import imagenet_preprocessing
30
+
31
+ FLAGS = flags.FLAGS
32
+ flags.DEFINE_float(
33
+ 'weight_decay',
34
+ default=1e-4,
35
+ help=('Weight decay coefficiant for l2 regularization.'))
36
+
37
+ layers = tf.keras.layers
38
+
39
+
40
+ def _gen_l2_regularizer(use_l2_regularizer=True):
41
+ return tf.keras.regularizers.L2(
42
+ FLAGS.weight_decay) if use_l2_regularizer else None
43
+
44
+
45
+ def identity_block(input_tensor,
46
+ kernel_size,
47
+ filters,
48
+ stage,
49
+ block,
50
+ use_l2_regularizer=True,
51
+ batch_norm_decay=0.9,
52
+ batch_norm_epsilon=1e-5):
53
+ """The identity block is the block that has no conv layer at shortcut.
54
+ Args:
55
+ input_tensor: input tensor
56
+ kernel_size: default 3, the kernel size of middle conv layer at main path
57
+ filters: list of integers, the filters of 3 conv layer at main path
58
+ stage: integer, current stage label, used for generating layer names
59
+ block: 'a','b'..., current block label, used for generating layer names
60
+ use_l2_regularizer: whether to use L2 regularizer on Conv layer.
61
+ batch_norm_decay: Moment of batch norm layers.
62
+ batch_norm_epsilon: Epsilon of batch borm layers.
63
+ Returns:
64
+ Output tensor for the block.
65
+ """
66
+ filters1, filters2, filters3 = filters
67
+ if tf.keras.backend.image_data_format() == 'channels_last':
68
+ bn_axis = 3
69
+ else:
70
+ bn_axis = 1
71
+ conv_name_base = 'res' + str(stage) + block + '_branch'
72
+ bn_name_base = 'bn' + str(stage) + block + '_branch'
73
+
74
+ x = layers.Conv2D(
75
+ filters1, (1, 1),
76
+ use_bias=False,
77
+ kernel_initializer='he_normal',
78
+ kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
79
+ name=conv_name_base + '2a')(
80
+ input_tensor)
81
+ x = layers.BatchNormalization(
82
+ axis=bn_axis,
83
+ momentum=batch_norm_decay,
84
+ epsilon=batch_norm_epsilon,
85
+ name=bn_name_base + '2a')(
86
+ x)
87
+ x = layers.Activation('relu')(x)
88
+
89
+ x = layers.Conv2D(
90
+ filters2,
91
+ kernel_size,
92
+ padding='same',
93
+ use_bias=False,
94
+ kernel_initializer='he_normal',
95
+ kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
96
+ name=conv_name_base + '2b')(
97
+ x)
98
+ x = layers.BatchNormalization(
99
+ axis=bn_axis,
100
+ momentum=batch_norm_decay,
101
+ epsilon=batch_norm_epsilon,
102
+ name=bn_name_base + '2b')(
103
+ x)
104
+ x = layers.Activation('relu')(x)
105
+
106
+ x = layers.Conv2D(
107
+ filters3, (1, 1),
108
+ use_bias=False,
109
+ kernel_initializer='he_normal',
110
+ kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
111
+ name=conv_name_base + '2c')(
112
+ x)
113
+ x = layers.BatchNormalization(
114
+ axis=bn_axis,
115
+ momentum=batch_norm_decay,
116
+ epsilon=batch_norm_epsilon,
117
+ name=bn_name_base + '2c')(
118
+ x)
119
+
120
+ x = layers.add([x, input_tensor])
121
+ x = layers.Activation('relu')(x)
122
+ return x
123
+
124
+
125
+ def conv_block(input_tensor,
126
+ kernel_size,
127
+ filters,
128
+ stage,
129
+ block,
130
+ strides=(2, 2),
131
+ use_l2_regularizer=True,
132
+ batch_norm_decay=0.9,
133
+ batch_norm_epsilon=1e-5):
134
+ """A block that has a conv layer at shortcut.
135
+ Note that from stage 3,
136
+ the second conv layer at main path is with strides=(2, 2)
137
+ And the shortcut should have strides=(2, 2) as well
138
+ Args:
139
+ input_tensor: input tensor
140
+ kernel_size: default 3, the kernel size of middle conv layer at main path
141
+ filters: list of integers, the filters of 3 conv layer at main path
142
+ stage: integer, current stage label, used for generating layer names
143
+ block: 'a','b'..., current block label, used for generating layer names
144
+ strides: Strides for the second conv layer in the block.
145
+ use_l2_regularizer: whether to use L2 regularizer on Conv layer.
146
+ batch_norm_decay: Moment of batch norm layers.
147
+ batch_norm_epsilon: Epsilon of batch borm layers.
148
+ Returns:
149
+ Output tensor for the block.
150
+ """
151
+ filters1, filters2, filters3 = filters
152
+ if tf.keras.backend.image_data_format() == 'channels_last':
153
+ bn_axis = 3
154
+ else:
155
+ bn_axis = 1
156
+ conv_name_base = 'res' + str(stage) + block + '_branch'
157
+ bn_name_base = 'bn' + str(stage) + block + '_branch'
158
+
159
+ x = layers.Conv2D(
160
+ filters1, (1, 1),
161
+ use_bias=False,
162
+ kernel_initializer='he_normal',
163
+ kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
164
+ name=conv_name_base + '2a')(
165
+ input_tensor)
166
+ x = layers.BatchNormalization(
167
+ axis=bn_axis,
168
+ momentum=batch_norm_decay,
169
+ epsilon=batch_norm_epsilon,
170
+ name=bn_name_base + '2a')(
171
+ x)
172
+ x = layers.Activation('relu')(x)
173
+
174
+ x = layers.Conv2D(
175
+ filters2,
176
+ kernel_size,
177
+ strides=strides,
178
+ padding='same',
179
+ use_bias=False,
180
+ kernel_initializer='he_normal',
181
+ kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
182
+ name=conv_name_base + '2b')(
183
+ x)
184
+ x = layers.BatchNormalization(
185
+ axis=bn_axis,
186
+ momentum=batch_norm_decay,
187
+ epsilon=batch_norm_epsilon,
188
+ name=bn_name_base + '2b')(
189
+ x)
190
+ x = layers.Activation('relu')(x)
191
+
192
+ x = layers.Conv2D(
193
+ filters3, (1, 1),
194
+ use_bias=False,
195
+ kernel_initializer='he_normal',
196
+ kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
197
+ name=conv_name_base + '2c')(
198
+ x)
199
+ x = layers.BatchNormalization(
200
+ axis=bn_axis,
201
+ momentum=batch_norm_decay,
202
+ epsilon=batch_norm_epsilon,
203
+ name=bn_name_base + '2c')(
204
+ x)
205
+
206
+ shortcut = layers.Conv2D(
207
+ filters3, (1, 1),
208
+ strides=strides,
209
+ use_bias=False,
210
+ kernel_initializer='he_normal',
211
+ kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
212
+ name=conv_name_base + '1')(
213
+ input_tensor)
214
+ shortcut = layers.BatchNormalization(
215
+ axis=bn_axis,
216
+ momentum=batch_norm_decay,
217
+ epsilon=batch_norm_epsilon,
218
+ name=bn_name_base + '1')(
219
+ shortcut)
220
+
221
+ x = layers.add([x, shortcut])
222
+ x = layers.Activation('relu')(x)
223
+ return x
224
+
225
+
226
+ def resnet50(num_classes,
227
+ batch_size=None,
228
+ use_l2_regularizer=True,
229
+ rescale_inputs=False,
230
+ batch_norm_decay=0.9,
231
+ batch_norm_epsilon=1e-5):
232
+ """Instantiates the ResNet50 architecture.
233
+ Args:
234
+ num_classes: `int` number of classes for image classification.
235
+ batch_size: Size of the batches for each step.
236
+ use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
237
+ rescale_inputs: whether to rescale inputs from 0 to 1.
238
+ batch_norm_decay: Moment of batch norm layers.
239
+ batch_norm_epsilon: Epsilon of batch borm layers.
240
+ Returns:
241
+ A Keras model instance.
242
+ """
243
+ input_shape = (224, 224, 3)
244
+ img_input = layers.Input(shape=input_shape, batch_size=batch_size)
245
+ if rescale_inputs:
246
+ # Hub image modules expect inputs in the range [0, 1]. This rescales these
247
+ # inputs to the range expected by the trained model.
248
+ x = layers.Lambda(
249
+ lambda x: x * 255.0 - tf.keras.backend.constant( # pylint: disable=g-long-lambda
250
+ imagenet_preprocessing.CHANNEL_MEANS,
251
+ shape=[1, 1, 3],
252
+ dtype=x.dtype),
253
+ name='rescale')(
254
+ img_input)
255
+ else:
256
+ x = img_input
257
+
258
+ if tf.keras.backend.image_data_format() == 'channels_first':
259
+ x = layers.Permute((3, 1, 2))(x)
260
+ bn_axis = 1
261
+ else: # channels_last
262
+ bn_axis = 3
263
+
264
+ block_config = dict(
265
+ use_l2_regularizer=use_l2_regularizer,
266
+ batch_norm_decay=batch_norm_decay,
267
+ batch_norm_epsilon=batch_norm_epsilon)
268
+ x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
269
+ x = layers.Conv2D(
270
+ 64, (7, 7),
271
+ strides=(2, 2),
272
+ padding='valid',
273
+ use_bias=False,
274
+ kernel_initializer='he_normal',
275
+ kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
276
+ name='conv1')(
277
+ x)
278
+ x = layers.BatchNormalization(
279
+ axis=bn_axis,
280
+ momentum=batch_norm_decay,
281
+ epsilon=batch_norm_epsilon,
282
+ name='bn_conv1')(
283
+ x)
284
+ x = layers.Activation('relu')(x)
285
+ x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
286
+
287
+ x = conv_block(
288
+ x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), **block_config)
289
+ x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', **block_config)
290
+ x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', **block_config)
291
+
292
+ x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', **block_config)
293
+ x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', **block_config)
294
+ x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', **block_config)
295
+ x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', **block_config)
296
+
297
+ x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', **block_config)
298
+ x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', **block_config)
299
+ x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', **block_config)
300
+ x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', **block_config)
301
+ x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', **block_config)
302
+ x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', **block_config)
303
+
304
+ x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', **block_config)
305
+ x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', **block_config)
306
+ x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', **block_config)
307
+
308
+ x = layers.GlobalAveragePooling2D()(x)
309
+ x = layers.Dense(
310
+ num_classes,
311
+ kernel_initializer=tf.compat.v1.keras.initializers.random_normal(
312
+ stddev=0.01),
313
+ kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
314
+ bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
315
+ name='fc1000')(
316
+ x)
317
+
318
+ # A softmax that is followed by the model loss must be done cannot be done
319
+ # in float16 due to numeric issues. So we pass dtype=float32.
320
+ x = layers.Activation('softmax', dtype='float32')(x)
321
+
322
+ # Create model.
323
+ return tf.keras.Model(img_input, x, name='resnet50')
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/Resnets/resnet_keras/resnet_runnable.py ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ # List of changes:
16
+ # - added profiling callbacks support
17
+
18
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
19
+
20
+ """Runs a ResNet model on the ImageNet dataset using custom training loops."""
21
+
22
+ from __future__ import absolute_import
23
+ from __future__ import division
24
+ from __future__ import print_function
25
+
26
+ import json
27
+ import os
28
+ from typing import Dict, Optional, Text
29
+
30
+ import tensorflow as tf
31
+ from TensorFlow.common.modeling import performance
32
+ from TensorFlow.common.training import grad_utils
33
+ from TensorFlow.common.training import standard_runnable
34
+ from TensorFlow.common.training import utils
35
+ from TensorFlow.utils.flags import core as flags_core
36
+ from TensorFlow.computer_vision.common import imagenet_preprocessing
37
+ from TensorFlow.computer_vision.Resnets.resnet_keras import common
38
+ from TensorFlow.computer_vision.Resnets.resnet_keras import resnet_model
39
+ from TensorFlow.computer_vision.Resnets.resnet_keras.common import get_global_batch_size
40
+
41
+
42
+ try:
43
+ import horovod.tensorflow as hvd
44
+ except ImportError:
45
+ hvd = None
46
+ class ResnetRunnable(standard_runnable.StandardTrainable,
47
+ standard_runnable.StandardEvaluable):
48
+ """Implements the training and evaluation APIs for Resnet model."""
49
+
50
+ def __init__(self, flags_obj, time_callback, train_steps, epoch_steps, profiler_callback,mlperf_mlloger,mlperf_mllog):
51
+ standard_runnable.StandardTrainable.__init__(self,
52
+ flags_obj.use_tf_while_loop,
53
+ flags_obj.use_tf_function)
54
+ standard_runnable.StandardEvaluable.__init__(self,
55
+ flags_obj.use_tf_function)
56
+
57
+ self.strategy = tf.distribute.get_strategy()
58
+ self.flags_obj = flags_obj
59
+ self.dtype = flags_core.get_tf_dtype(flags_obj)
60
+ self.time_callback = time_callback
61
+ self.profiler_callback = profiler_callback
62
+ self.first_step = True
63
+ self.warmup_train_dataset = None
64
+ self.warmup_train_iter = None
65
+ self.warmup_eval_dataset = None
66
+ self.warmup_eval_iter = None
67
+
68
+ self.mlperf_mlloger, self.mlperf_mllog = mlperf_mlloger, mlperf_mllog
69
+ # Input pipeline related
70
+ batch_size = flags_obj.batch_size
71
+ if batch_size % self.strategy.num_replicas_in_sync != 0:
72
+ raise ValueError(
73
+ 'Batch size must be divisible by number of replicas : {}'.format(
74
+ self.strategy.num_replicas_in_sync))
75
+
76
+ # As auto rebatching is not supported in
77
+ # `experimental_distribute_datasets_from_function()` API, which is
78
+ # required when cloning dataset to multiple workers in eager mode,
79
+ # we use per-replica batch size.
80
+ self.batch_size = int(batch_size / self.strategy.num_replicas_in_sync)
81
+
82
+ if self.flags_obj.use_synthetic_data:
83
+ self.input_fn = self.get_synth_input_fn(True)
84
+ else:
85
+ self.input_fn = imagenet_preprocessing.input_fn
86
+
87
+ self.model = resnet_model.resnet50(
88
+ num_classes=imagenet_preprocessing.NUM_CLASSES,
89
+ batch_size=flags_obj.batch_size,
90
+ use_l2_regularizer=not flags_obj.single_l2_loss_op)
91
+
92
+ mlperf_variable_map = self.get_mlperf_variable_map()
93
+ for weight in self.model.weights:
94
+ if ('moving_mean' not in weight.name) and ('moving_variance' not in weight.name):
95
+ mlperf_mlloger.event(key=mlperf_mllog.constants.WEIGHTS_INITIALIZATION, metadata={'tensor': mlperf_variable_map[weight.name.split(':')[0]]})
96
+
97
+ self.use_lars_optimizer = self.flags_obj.optimizer == 'LARS'
98
+
99
+ self.optimizer = common.get_optimizer(flags_obj,
100
+ get_global_batch_size(flags_obj.batch_size),
101
+ train_steps,mlperf_mlloger,mlperf_mllog)
102
+ # Make sure iterations variable is created inside scope.
103
+ self.global_step = self.optimizer.iterations
104
+ self.train_steps = train_steps
105
+
106
+ self.one_hot = False
107
+ self.label_smoothing = flags_obj.label_smoothing
108
+ if self.label_smoothing and self.label_smoothing > 0:
109
+ self.one_hot = True
110
+
111
+ use_graph_rewrite = flags_obj.fp16_implementation == 'graph_rewrite'
112
+ if use_graph_rewrite and not flags_obj.use_tf_function:
113
+ raise ValueError('--fp16_implementation=graph_rewrite requires '
114
+ '--use_tf_function to be true')
115
+ self.optimizer = performance.configure_optimizer(
116
+ self.optimizer,
117
+ use_float16=self.dtype == tf.float16,
118
+ use_graph_rewrite=use_graph_rewrite,
119
+ loss_scale=flags_core.get_loss_scale(flags_obj, default_for_fp16=128))
120
+
121
+ if self.flags_obj.report_accuracy_metrics:
122
+ self.train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
123
+ if self.one_hot:
124
+ self.train_accuracy = tf.keras.metrics.CategoricalAccuracy(
125
+ 'train_accuracy', dtype=tf.float32)
126
+ else:
127
+ self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
128
+ 'train_accuracy', dtype=tf.float32)
129
+ self.test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)
130
+ else:
131
+ self.train_loss = None
132
+ self.train_accuracy = None
133
+ self.test_loss = None
134
+
135
+ self.dist_eval = flags_obj.dist_eval
136
+ self.profile = flags_obj.profile
137
+
138
+ if self.one_hot:
139
+ self.test_accuracy = tf.keras.metrics.CategoricalAccuracy(
140
+ 'test_accuracy', dtype=tf.float32)
141
+ else:
142
+ self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
143
+ 'test_accuracy', dtype=tf.float32)
144
+ self.eval_accuracy = 0
145
+
146
+ self.checkpoint = tf.train.Checkpoint(
147
+ model=self.model, optimizer=self.optimizer)
148
+
149
+ self.local_loss_mean = tf.keras.metrics.Mean("local_loss_min", dtype=tf.float32)
150
+
151
+ # Handling epochs.
152
+ self.epoch_steps = epoch_steps
153
+ self.epoch_helper = utils.EpochHelper(epoch_steps, self.global_step)
154
+
155
+ self.num_acc_steps = flags_obj.num_acc_steps
156
+ if self.num_acc_steps > 1:
157
+ self.init_accumulation_variables()
158
+
159
+ self.model_state = None
160
+
161
+ def init_accumulation_variables(self):
162
+ self.cur_acc_step = tf.compat.v1.get_variable(
163
+ name='cur_acc_step',
164
+ shape=(),
165
+ dtype=tf.int32,
166
+ trainable=False,
167
+ initializer=tf.compat.v1.constant_initializer(value=0)
168
+ )
169
+ self.accum_vars = [tf.compat.v1.get_variable(
170
+ name=tvar.name.split(':')[0] + '/accum',
171
+ shape=tvar.shape.as_list(),
172
+ dtype=tf.float32,
173
+ trainable=False,
174
+ initializer=tf.compat.v1.zeros_initializer()) for tvar in self.model.trainable_variables]
175
+ self.loss_acc = tf.compat.v1.get_variable(
176
+ name='loss_acc',
177
+ shape=(),
178
+ dtype=tf.float32,
179
+ trainable=False,
180
+ initializer=tf.compat.v1.constant_initializer(value=0.0)
181
+ )
182
+
183
+ def get_mlperf_variable_map(self):
184
+ try:
185
+ script_path = os.path.realpath(__file__)
186
+ head_tail = os.path.split(script_path)
187
+ mlperf_map_file = head_tail[0] + '/mlperf_variable_map.json'
188
+ with open(mlperf_map_file, mode='r') as file_handle:
189
+ json_content = file_handle.read()
190
+ mlperf_map = json.loads(json_content)
191
+ except IOError:
192
+ raise IOError(f"MLPerf variable map file: {mlperf_map_file} not accesible")
193
+ return mlperf_map
194
+
195
+ def get_synth_input_fn(self, is_training):
196
+ return common.get_synth_input_fn(
197
+ height=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
198
+ width=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
199
+ num_channels=imagenet_preprocessing.NUM_CHANNELS,
200
+ num_classes=imagenet_preprocessing.NUM_CLASSES,
201
+ dtype=common.get_dl_type(self.flags_obj),
202
+ drop_remainder=is_training,
203
+ experimental_preloading=self.flags_obj.experimental_preloading)
204
+
205
+ def build_train_dataset(self, synthetic=False, manifest_path=None):
206
+ """See base class."""
207
+ return utils.make_distributed_dataset(
208
+ self.strategy,
209
+ self.input_fn,
210
+ is_training=True,
211
+ data_dir=self.flags_obj.data_dir,
212
+ jpeg_data_dir=self.flags_obj.jpeg_data_dir,
213
+ batch_size=self.batch_size,
214
+ model_dir=self.flags_obj.model_dir,
215
+ parse_record_fn=imagenet_preprocessing.parse_record,
216
+ datasets_num_private_threads=self.flags_obj
217
+ .datasets_num_private_threads,
218
+ dtype=common.get_dl_type(self.flags_obj),
219
+ drop_remainder=True,
220
+ dataset_cache=self.flags_obj.dataset_cache,
221
+ experimental_preloading=self.flags_obj.experimental_preloading,
222
+ num_train_files=self.flags_obj.num_train_files,
223
+ num_eval_files=self.flags_obj.num_eval_files,
224
+ synthetic=synthetic,
225
+ manifest_path=manifest_path)
226
+
227
+ def build_synthetic_train_dataset(self):
228
+ return self.build_train_dataset(synthetic=True)
229
+
230
+ def build_eval_dataset(self, synthetic=False):
231
+ """See base class."""
232
+ return utils.make_distributed_dataset(
233
+ self.strategy,
234
+ self.input_fn,
235
+ is_training=False,
236
+ data_dir=self.flags_obj.data_dir,
237
+ jpeg_data_dir=self.flags_obj.jpeg_data_dir,
238
+ batch_size=self.batch_size,
239
+ model_dir=self.flags_obj.model_dir,
240
+ parse_record_fn=imagenet_preprocessing.parse_record,
241
+ dtype=common.get_dl_type(self.flags_obj),
242
+ dataset_cache=self.flags_obj.dataset_cache,
243
+ experimental_preloading=self.flags_obj.experimental_preloading,
244
+ num_train_files=self.flags_obj.num_train_files,
245
+ num_eval_files=self.flags_obj.num_eval_files,
246
+ synthetic=synthetic)
247
+
248
+ def build_synthetic_eval_dataset(self):
249
+ return self.build_eval_dataset(synthetic=True)
250
+
251
+ def get_prediction_loss(self, labels, logits, training=True):
252
+ if self.one_hot:
253
+ return tf.keras.losses.categorical_crossentropy(
254
+ labels, logits, label_smoothing=self.label_smoothing)
255
+ else:
256
+ return tf.keras.losses.sparse_categorical_crossentropy(labels, logits)
257
+
258
+ def train_loop_begin(self):
259
+ """See base class."""
260
+ # Reset all metrics
261
+ if self.train_loss:
262
+ self.train_loss.reset_states()
263
+ if self.train_accuracy:
264
+ self.train_accuracy.reset_states()
265
+
266
+ self._epoch_begin()
267
+ self.time_callback.on_batch_begin(self.epoch_helper.batch_index)
268
+ if self.profiler_callback is not None:
269
+ self.profiler_callback.on_batch_begin(self.epoch_helper.batch_index)
270
+
271
+ def train_step(self, iterator):
272
+ """See base class."""
273
+
274
+ def step_fn_broadcast():
275
+ if hvd is not None and hvd.is_initialized():
276
+ tf.cond(self.global_step == 1,
277
+ lambda: hvd.broadcast_variables(self.model.variables + self.optimizer.variables(), root_rank=0),
278
+ lambda: tf.constant(True))
279
+
280
+ def step_fn_modeling():
281
+ if self.flags_obj.modeling:
282
+ sess = tf.compat.v1.Session()
283
+ # pbtxt generation
284
+ tf.io.write_graph(sess.graph.as_graph_def(add_shapes=True), self.flags_obj.model_dir, 'graph.pbtxt')
285
+ # meta graph generation
286
+ tf.compat.v1.train.export_meta_graph(filename='checkpoint_model.meta', meta_info_def=None, graph_def=None, saver_def=None, collection_list=None, as_text=False, graph=None, export_scope=None, clear_devices=False, clear_extraneous_savers=False, strip_default_attrs=False, save_debug_info=False)
287
+
288
+ def step_fn_accumulation_steps_enabled(loss, tape):
289
+ grads = tape.gradient(loss, self.model.trainable_variables)
290
+
291
+ if self.cur_acc_step == 0:
292
+ for i in range(len(self.accum_vars)):
293
+ self.accum_vars[i].assign(grads[i])
294
+ else: # self.cur_acc_step > 0
295
+ for i in range(len(self.accum_vars)):
296
+ self.accum_vars[i].assign_add(grads[i])
297
+
298
+ self.loss_acc.assign_add(loss)
299
+ self.cur_acc_step.assign_add(1)
300
+
301
+ if self.cur_acc_step == self.num_acc_steps:
302
+ grads_and_vars = zip(self.accum_vars, self.model.trainable_variables)
303
+ self.optimizer.apply_gradients(grads_and_vars, experimental_aggregate_gradients=False)
304
+
305
+ step_fn_broadcast()
306
+ step_fn_modeling()
307
+
308
+ if self.train_loss:
309
+ self.train_loss.update_state(self.loss_acc)
310
+
311
+ self.cur_acc_step.assign(0)
312
+ self.loss_acc.assign(0.0)
313
+
314
+ def step_fn_accumulation_steps_disabled(loss, tape):
315
+ if hvd is not None and hvd.is_initialized():
316
+ grads = tape.gradient(loss, self.model.trainable_variables)
317
+ grads_and_vars = zip(grads, self.model.trainable_variables)
318
+ self.optimizer.apply_gradients(grads_and_vars, experimental_aggregate_gradients=False)
319
+ else:
320
+ grad_utils.minimize_using_explicit_allreduce(
321
+ tape, self.optimizer, loss, self.model.trainable_variables)
322
+
323
+ step_fn_broadcast()
324
+ step_fn_modeling()
325
+
326
+ if self.train_loss:
327
+ self.train_loss.update_state(loss)
328
+
329
+ def step_fn(inputs):
330
+ """Function to run on the device."""
331
+ images, labels = inputs
332
+ if self.one_hot:
333
+ labels = tf.cast(labels, tf.int32)
334
+ labels = tf.one_hot(labels, 1001)
335
+ labels = tf.squeeze(labels)
336
+
337
+ with tf.GradientTape() as tape:
338
+ logits = self.model(images, training=True)
339
+ prediction_loss = self.get_prediction_loss(labels, logits)
340
+ loss = tf.reduce_sum(prediction_loss) * (1.0 / self.flags_obj.batch_size)
341
+
342
+ if not self.use_lars_optimizer:
343
+ num_replicas = self.strategy.num_replicas_in_sync
344
+
345
+ if self.flags_obj.single_l2_loss_op:
346
+ l2_loss = self.flags_obj.weight_decay * tf.add_n([
347
+ tf.nn.l2_loss(v)
348
+ for v in self.model.trainable_variables
349
+ if ('bn' not in v.name)
350
+ ])
351
+
352
+ loss += (l2_loss / num_replicas)
353
+ else:
354
+ loss += (tf.reduce_sum(self.model.losses) / num_replicas)
355
+
356
+ loss = loss / self.num_acc_steps
357
+
358
+ if hvd is not None and hvd.is_initialized():
359
+ tape = hvd.DistributedGradientTape(tape)
360
+
361
+ if self.num_acc_steps > 1:
362
+ step_fn_accumulation_steps_enabled(loss, tape)
363
+ else:
364
+ step_fn_accumulation_steps_disabled(loss, tape)
365
+
366
+ if self.train_accuracy:
367
+ self.train_accuracy.update_state(labels, logits)
368
+
369
+ self.strategy.run(step_fn, args=(next(iterator),))
370
+
371
+ def train_loop_end(self):
372
+ """See base class."""
373
+ metrics = dict()
374
+ if self.train_loss:
375
+ metrics['train_loss'] = self.train_loss.result()
376
+ if self.train_accuracy:
377
+ metrics['train_accuracy'] = self.train_accuracy.result()
378
+ self.time_callback.on_batch_end(self.epoch_helper.batch_index - 1)
379
+ if self.profiler_callback is not None:
380
+ self.profiler_callback.on_batch_end(self.epoch_helper.batch_index - 1)
381
+ self._epoch_end()
382
+ return metrics
383
+
384
+ def eval_begin(self):
385
+ """See base class."""
386
+ if self.test_loss:
387
+ self.test_loss.reset_states()
388
+ self.test_accuracy.reset_states()
389
+ epoch_num = int(self.epoch_helper.current_epoch)
390
+ self.mlperf_mlloger.start(
391
+ key=self.mlperf_mllog.constants.EVAL_START, value=None, metadata={'epoch_num': epoch_num + 1})
392
+
393
+ def eval_step(self, iterator):
394
+ """See base class."""
395
+
396
+ def step_fn(inputs):
397
+ """Function to run on the device."""
398
+ images, labels = inputs
399
+ if self.one_hot:
400
+ labels = tf.cast(labels, tf.int32)
401
+ labels = tf.one_hot(labels, 1001)
402
+ labels = tf.squeeze(labels)
403
+
404
+ logits = self.model(images, training=False)
405
+ loss = self.get_prediction_loss(labels, logits, training=False)
406
+ loss = tf.reduce_sum(loss) * (1.0 / self.flags_obj.batch_size)
407
+ if self.test_loss:
408
+ self.test_loss.update_state(loss)
409
+ self.test_accuracy.update_state(labels, logits)
410
+
411
+ self.strategy.run(step_fn, args=(next(iterator),))
412
+
413
+ def eval_end(self):
414
+ """See base class."""
415
+ epoch_num = int(self.epoch_helper.current_epoch)
416
+ self.mlperf_mlloger.end(
417
+ key=self.mlperf_mllog.constants.EVAL_STOP, value=None, metadata={'epoch_num': epoch_num + 1})
418
+
419
+ local_hit = self.test_accuracy.total
420
+ local_count = self.test_accuracy.count
421
+
422
+ global_hit = local_hit
423
+ global_count = local_count
424
+ if hvd is not None and hvd.is_initialized() and self.dist_eval:
425
+ global_hit = hvd.allreduce(local_hit, op=hvd.Sum)
426
+ global_count = hvd.allreduce(local_count, op=hvd.Sum)
427
+ global_accuracy = float(global_hit / global_count)
428
+
429
+ # assign to self
430
+ self.test_accuracy.total.assign(global_hit)
431
+ self.test_accuracy.count.assign(global_count)
432
+
433
+ eval_accuracy = global_accuracy
434
+ self.eval_accuracy = eval_accuracy
435
+ self.mlperf_mlloger.event(
436
+ key=self.mlperf_mllog.constants.EVAL_ACCURACY, value=eval_accuracy, metadata={'epoch_num': epoch_num + 1})
437
+
438
+ first_epoch_num = max(epoch_num - self.flags_obj.epochs_between_evals + 1, 0)
439
+ epoch_count = self.flags_obj.epochs_between_evals
440
+ if first_epoch_num == 0:
441
+ epoch_count = self.flags_obj.eval_offset_epochs
442
+ if epoch_count == 0:
443
+ epoch_count = self.flags_obj.epochs_between_evals
444
+ self.mlperf_mlloger.end(
445
+ key=self.mlperf_mllog.constants.BLOCK_STOP,
446
+ value=None,
447
+ metadata={
448
+ 'first_epoch_num': first_epoch_num + 1,
449
+ 'epoch_count': epoch_count
450
+ })
451
+
452
+ past_threshold = False
453
+ if self.flags_obj.target_accuracy is not None:
454
+ past_threshold = eval_accuracy >= self.flags_obj.target_accuracy
455
+ if (hvd is not None and hvd.is_initialized() and (not self.dist_eval) ):
456
+ past_threshold = hvd.allreduce(tf.cast(past_threshold, tf.float32),
457
+ op=hvd.Sum) > 0
458
+
459
+ continue_training = True
460
+ if past_threshold:
461
+ continue_training = False
462
+ elif ( (not self.profile) and eval_accuracy <= 0.002):
463
+ continue_training = False
464
+ elif self.global_step.numpy() < self.train_steps:
465
+ self.mlperf_mlloger.start(
466
+ key=self.mlperf_mllog.constants.BLOCK_START,
467
+ value=None,
468
+ metadata={
469
+ 'first_epoch_num': epoch_num + 2,
470
+ 'epoch_count': self.flags_obj.epochs_between_evals
471
+ })
472
+
473
+ metrics = {
474
+ 'test_accuracy': eval_accuracy,
475
+ 'continue_training': continue_training,
476
+ }
477
+ if self.test_loss:
478
+ metrics['test_loss'] = self.test_loss.result()
479
+ return metrics
480
+
481
+ def warmup(self, num_steps: Optional[tf.Tensor]) -> Optional[Dict[Text, tf.Tensor]]:
482
+ """Implements device warmup with multiple steps.
483
+
484
+ This loop runs the input pipeline on synthetic data before training, thereby
485
+ allowing tf.function tracing before the dataset is accessed.
486
+
487
+ Args:
488
+ num_steps: A guideline for how many training steps to run. Note that it is
489
+ up to the model what constitutes a "step" (this may involve more than
490
+ one update to model parameters, e.g. if training a GAN).
491
+
492
+ Returns:
493
+ The function may return a dictionary of `Tensors`, which will be
494
+ written to logs and as TensorBoard summaries.
495
+ """
496
+ self.model_state = [weight.numpy() for weight in self.model.weights]
497
+
498
+ if self.warmup_train_dataset is None:
499
+ self.warmup_train_dataset = self.build_synthetic_train_dataset()
500
+ self.warmup_train_iter = tf.nest.map_structure(iter, self.warmup_train_dataset)
501
+
502
+ if self.train_loop_fn is None:
503
+ train_fn = self.train_step
504
+ if self.use_tf_while_loop:
505
+ self.train_loop_fn = utils.create_tf_while_loop_fn(train_fn)
506
+ else:
507
+ if self.use_tf_function:
508
+ train_fn = tf.function(train_fn)
509
+ self.train_loop_fn = utils.create_loop_fn(train_fn)
510
+
511
+ self.train_loop_fn(self.warmup_train_iter, num_steps)
512
+
513
+ if self.warmup_eval_dataset is None:
514
+ self.warmup_eval_dataset = self.build_synthetic_eval_dataset()
515
+ self.warmup_eval_iter = tf.nest.map_structure(iter, self.warmup_eval_dataset)
516
+
517
+ if self.eval_loop_fn is None:
518
+ eval_fn = self.eval_step
519
+ if self.eval_use_tf_function:
520
+ eval_fn = tf.function(eval_fn)
521
+ self.eval_loop_fn = utils.create_loop_fn(eval_fn)
522
+
523
+ self.eval_loop_fn(self.warmup_eval_iter, num_steps)
524
+
525
+ return self.warmup_loop_end()
526
+
527
+ def warmup_loop_end(self):
528
+ """See base class."""
529
+ # Reset the state
530
+ for weight, state in zip(self.model.weights, self.model_state):
531
+ weight.assign(state)
532
+ for weight in self.optimizer.weights:
533
+ weight.assign(tf.zeros(shape=weight.shape, dtype=weight.dtype))
534
+
535
+ def _epoch_begin(self):
536
+ if self.epoch_helper.epoch_begin():
537
+ self.time_callback.on_epoch_begin(self.epoch_helper.current_epoch)
538
+ if self.profiler_callback is not None:
539
+ self.profiler_callback.on_epoch_begin(self.epoch_helper.current_epoch)
540
+
541
+ def _epoch_end(self):
542
+ if self.epoch_helper.epoch_end():
543
+ self.time_callback.on_epoch_end(self.epoch_helper.current_epoch)
544
+ if self.profiler_callback is not None:
545
+ self.profiler_callback.on_epoch_end(self.epoch_helper.current_epoch)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/computer_vision/common/imagenet_preprocessing.py ADDED
@@ -0,0 +1,680 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ # List of changes:
16
+ # - added support for prefetching to HPU
17
+ # - flags to control image preprocessing
18
+ # - flag to influence parallelism of dataset processing
19
+
20
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
21
+
22
+
23
+ """Provides utilities to preprocess images.
24
+
25
+ Training images are sampled using the provided bounding boxes, and subsequently
26
+ cropped to the sampled bounding box. Images are additionally flipped randomly,
27
+ then resized to the target output size (without aspect-ratio preservation).
28
+
29
+ Images used during evaluation are resized (with aspect-ratio preservation) and
30
+ centrally cropped.
31
+
32
+ All images undergo mean color subtraction.
33
+
34
+ Note that these steps are colloquially referred to as "ResNet preprocessing,"
35
+ and they differ from "VGG preprocessing," which does not use bounding boxes
36
+ and instead does an aspect-preserving resize followed by random crop during
37
+ training. (These both differ from "Inception preprocessing," which introduces
38
+ color distortion steps.)
39
+
40
+ """
41
+
42
+ from __future__ import absolute_import
43
+ from __future__ import division
44
+ from __future__ import print_function
45
+
46
+ import os
47
+ import uuid
48
+ from absl import flags
49
+ from absl import logging
50
+ import tensorflow as tf
51
+ from habana_frameworks.tensorflow.media import habana_imagenet_dataset
52
+
53
+ from habana_frameworks.tensorflow.multinode_helpers import comm_size
54
+
55
+ try:
56
+ import horovod.tensorflow as hvd
57
+ except ImportError:
58
+ hvd = None
59
+
60
+ DEFAULT_IMAGE_SIZE = 224
61
+ NUM_CHANNELS = 3
62
+ NUM_CLASSES = 1001
63
+
64
+ NUM_IMAGES = {
65
+ 'train': 1281167,
66
+ 'validation': 50000,
67
+ }
68
+
69
+ _SHUFFLE_BUFFER = 10000
70
+
71
+ _R_MEAN = 123.68
72
+ _G_MEAN = 116.78
73
+ _B_MEAN = 103.94
74
+ CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
75
+
76
+ # The lower bound for the smallest side of the image for aspect-preserving
77
+ # resizing. For example, if an image is 500 x 1000, it will be resized to
78
+ # _RESIZE_MIN x (_RESIZE_MIN * 2).
79
+ _RESIZE_MIN = 256
80
+
81
+ flags.DEFINE_integer(name='dataset_parallel_calls', default=tf.data.experimental.AUTOTUNE, help='Determines the number of parallel calls in dataset operations')
82
+
83
+
84
+ def process_record_dataset(dataset,
85
+ is_training,
86
+ batch_size,
87
+ shuffle_buffer,
88
+ parse_record_fn,
89
+ dtype=tf.float32,
90
+ datasets_num_private_threads=None,
91
+ drop_remainder=False,
92
+ tf_data_experimental_slack=False,
93
+ experimental_preloading=False):
94
+ """Given a Dataset with raw records, return an iterator over the records.
95
+
96
+ Args:
97
+ dataset: A Dataset representing raw records
98
+ is_training: A boolean denoting whether the input is for training.
99
+ batch_size: The number of samples per batch.
100
+ shuffle_buffer: The buffer size to use when shuffling records. A larger
101
+ value results in better randomness, but smaller values reduce startup
102
+ time and use less memory.
103
+ parse_record_fn: A function that takes a raw record and returns the
104
+ corresponding (image, label) pair.
105
+ dtype: Data type to use for images/features.
106
+ datasets_num_private_threads: Number of threads for a private
107
+ threadpool created for all datasets computation.
108
+ drop_remainder: A boolean indicates whether to drop the remainder of the
109
+ batches. If True, the batch dimension will be static.
110
+ tf_data_experimental_slack: Whether to enable tf.data's
111
+ `experimental_slack` option.
112
+
113
+ Returns:
114
+ Dataset of (image, label) pairs ready for iteration.
115
+ """
116
+ # Defines a specific size thread pool for tf.data operations.
117
+ if datasets_num_private_threads:
118
+ options = tf.data.Options()
119
+ options.experimental_threading.private_threadpool_size = (
120
+ datasets_num_private_threads)
121
+ dataset = dataset.with_options(options)
122
+ logging.info(
123
+ 'datasets_num_private_threads: %s', datasets_num_private_threads)
124
+
125
+ if is_training:
126
+ # Shuffles records before repeating to respect epoch boundaries.
127
+ dataset = dataset.shuffle(buffer_size=shuffle_buffer)
128
+ # Repeats the dataset for the number of epochs to train.
129
+ dataset = dataset.repeat()
130
+
131
+ num_parallel_calls = flags.FLAGS.dataset_parallel_calls
132
+ if hvd is not None and hvd.is_initialized():
133
+ num_parallel_calls = 16
134
+ # Parses the raw records into images and labels.
135
+ dataset = dataset.map(
136
+ lambda value: parse_record_fn(value, is_training, dtype),
137
+ num_parallel_calls=num_parallel_calls, deterministic=False)
138
+ dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
139
+
140
+ # Operations between the final prefetch and the get_next call to the iterator
141
+ # will happen synchronously during run time. We prefetch here again to
142
+ # background all of the above processing work and keep it out of the
143
+ # critical training path. Setting buffer_size to tf.data.experimental.AUTOTUNE
144
+ # allows DistributionStrategies to adjust how many batches to fetch based
145
+ # on how many devices are present.
146
+ if experimental_preloading:
147
+ device = "/device:HPU:0"
148
+ with tf.device(device):
149
+ dataset = dataset.apply(tf.data.experimental.prefetch_to_device(device))
150
+ else:
151
+ dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
152
+
153
+ options = tf.data.Options()
154
+ options.experimental_slack = tf_data_experimental_slack
155
+ dataset = dataset.with_options(options)
156
+
157
+ return dataset
158
+
159
+
160
+ def get_filenames(is_training, data_dir, num_train_files, num_eval_files):
161
+ """Return filenames for dataset."""
162
+ if is_training:
163
+ return [
164
+ os.path.join(data_dir, 'train/train-%05d-of-%05d' % (i, num_train_files))
165
+ for i in range(num_train_files)]
166
+ else:
167
+ return [
168
+ os.path.join(data_dir, 'validation/validation-%05d-of-%05d' % (i, num_eval_files))
169
+ for i in range(num_eval_files)]
170
+
171
+
172
+ def parse_example_proto(example_serialized):
173
+ """Parses an Example proto containing a training example of an image.
174
+
175
+ The output of the build_image_data.py image preprocessing script is a dataset
176
+ containing serialized Example protocol buffers. Each Example proto contains
177
+ the following fields (values are included as examples):
178
+
179
+ image/height: 462
180
+ image/width: 581
181
+ image/colorspace: 'RGB'
182
+ image/channels: 3
183
+ image/class/label: 615
184
+ image/class/synset: 'n03623198'
185
+ image/class/text: 'knee pad'
186
+ image/object/bbox/xmin: 0.1
187
+ image/object/bbox/xmax: 0.9
188
+ image/object/bbox/ymin: 0.2
189
+ image/object/bbox/ymax: 0.6
190
+ image/object/bbox/label: 615
191
+ image/format: 'JPEG'
192
+ image/filename: 'ILSVRC2012_val_00041207.JPEG'
193
+ image/encoded: <JPEG encoded string>
194
+
195
+ Args:
196
+ example_serialized: scalar Tensor tf.string containing a serialized
197
+ Example protocol buffer.
198
+
199
+ Returns:
200
+ image_buffer: Tensor tf.string containing the contents of a JPEG file.
201
+ label: Tensor tf.int32 containing the label.
202
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
203
+ where each coordinate is [0, 1) and the coordinates are arranged as
204
+ [ymin, xmin, ymax, xmax].
205
+ """
206
+ # Dense features in Example proto.
207
+ feature_map = {
208
+ 'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,
209
+ default_value=''),
210
+ 'image/class/label': tf.io.FixedLenFeature([], dtype=tf.int64,
211
+ default_value=-1),
212
+ 'image/class/text': tf.io.FixedLenFeature([], dtype=tf.string,
213
+ default_value=''),
214
+ }
215
+ sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)
216
+ # Sparse features in Example proto.
217
+ feature_map.update(
218
+ {k: sparse_float32 for k in [
219
+ 'image/object/bbox/xmin', 'image/object/bbox/ymin',
220
+ 'image/object/bbox/xmax', 'image/object/bbox/ymax']})
221
+
222
+ features = tf.io.parse_single_example(serialized=example_serialized,
223
+ features=feature_map)
224
+ label = tf.cast(features['image/class/label'], dtype=tf.int32)
225
+
226
+ xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
227
+ ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
228
+ xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
229
+ ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
230
+
231
+ # Note that we impose an ordering of (y, x) just to make life difficult.
232
+ bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
233
+
234
+ # Force the variable number of bounding boxes into the shape
235
+ # [1, num_boxes, coords].
236
+ bbox = tf.expand_dims(bbox, 0)
237
+ bbox = tf.transpose(a=bbox, perm=[0, 2, 1])
238
+
239
+ return features['image/encoded'], label, bbox
240
+
241
+
242
+ def parse_record(raw_record, is_training, dtype):
243
+ """Parses a record containing a training example of an image.
244
+
245
+ The input record is parsed into a label and image, and the image is passed
246
+ through preprocessing steps (cropping, flipping, and so on).
247
+
248
+ Args:
249
+ raw_record: scalar Tensor tf.string containing a serialized
250
+ Example protocol buffer.
251
+ is_training: A boolean denoting whether the input is for training.
252
+ dtype: data type to use for images/features.
253
+
254
+ Returns:
255
+ Tuple with processed image tensor in a channel-last format and
256
+ one-hot-encoded label tensor.
257
+ """
258
+ image_buffer, label, bbox = parse_example_proto(raw_record)
259
+
260
+ image = preprocess_image(
261
+ image_buffer=image_buffer,
262
+ bbox=bbox,
263
+ output_height=DEFAULT_IMAGE_SIZE,
264
+ output_width=DEFAULT_IMAGE_SIZE,
265
+ num_channels=NUM_CHANNELS,
266
+ is_training=is_training)
267
+ image = tf.cast(image, dtype)
268
+
269
+ # Subtract one so that labels are in [0, 1000), and cast to float32 for
270
+ # Keras model.
271
+ label = tf.cast(tf.cast(tf.reshape(label, shape=[1]), dtype=tf.int32) - 1,
272
+ dtype=tf.float32)
273
+ return image, label
274
+
275
+
276
+ def get_parse_record_fn(use_keras_image_data_format=False):
277
+ """Get a function for parsing the records, accounting for image format.
278
+
279
+ This is useful by handling different types of Keras models. For instance,
280
+ the current resnet_model.resnet50 input format is always channel-last,
281
+ whereas the keras_applications mobilenet input format depends on
282
+ tf.keras.backend.image_data_format(). We should set
283
+ use_keras_image_data_format=False for the former and True for the latter.
284
+
285
+ Args:
286
+ use_keras_image_data_format: A boolean denoting whether data format is keras
287
+ backend image data format. If False, the image format is channel-last. If
288
+ True, the image format matches tf.keras.backend.image_data_format().
289
+
290
+ Returns:
291
+ Function to use for parsing the records.
292
+ """
293
+ def parse_record_fn(raw_record, is_training, dtype):
294
+ image, label = parse_record(raw_record, is_training, dtype)
295
+ if use_keras_image_data_format:
296
+ if tf.keras.backend.image_data_format() == 'channels_first':
297
+ image = tf.transpose(image, perm=[2, 0, 1])
298
+ return image, label
299
+ return parse_record_fn
300
+
301
+
302
+ def imagenet_dataset_fallback(is_training,
303
+ data_dir,
304
+ batch_size,
305
+ dtype=tf.float32,
306
+ datasets_num_private_threads=None,
307
+ parse_record_fn=parse_record,
308
+ input_context=None,
309
+ drop_remainder=False,
310
+ tf_data_experimental_slack=False,
311
+ dataset_cache=True,
312
+ filenames=None,
313
+ experimental_preloading=False,
314
+ num_train_files=1024,
315
+ num_eval_files=128,
316
+ use_distributed_eval=False):
317
+
318
+ if filenames is None:
319
+ filenames = get_filenames(is_training, data_dir, num_train_files, num_eval_files)
320
+ dataset = tf.data.Dataset.from_tensor_slices(filenames)
321
+
322
+ if (is_training or use_distributed_eval) and hvd is not None and hvd.is_initialized():
323
+ logging.info(
324
+ 'HVD sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d',
325
+ hvd.rank(), hvd.size())
326
+ dataset = dataset.shard(hvd.size(), hvd.rank())
327
+
328
+ if input_context:
329
+ logging.info(
330
+ 'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d',
331
+ input_context.input_pipeline_id, input_context.num_input_pipelines)
332
+ dataset = dataset.shard(input_context.num_input_pipelines,
333
+ input_context.input_pipeline_id)
334
+
335
+ if is_training:
336
+ # Shuffle the input files
337
+ dataset = dataset.shuffle(buffer_size=num_train_files)
338
+
339
+ # Convert to individual records.
340
+ # cycle_length = 10 means that up to 10 files will be read and deserialized in
341
+ # parallel. You may want to increase this number if you have a large number of
342
+ # CPU cores.
343
+ cycle_length = 10
344
+ if hvd is not None and hvd.is_initialized():
345
+ if is_training:
346
+ cycle_length = num_train_files // comm_size()
347
+ else:
348
+ cycle_length = num_eval_files // comm_size()
349
+ cycle_length = min(cycle_length, 10)
350
+ dataset = dataset.interleave(
351
+ tf.data.TFRecordDataset,
352
+ cycle_length=cycle_length,
353
+ num_parallel_calls=flags.FLAGS.dataset_parallel_calls)
354
+
355
+ if dataset_cache:
356
+ # Improve performance when training and eval data is in remote storage and
357
+ # can fit into worker memory.
358
+ dataset = dataset.cache()
359
+
360
+ return process_record_dataset(
361
+ dataset=dataset,
362
+ is_training=is_training,
363
+ batch_size=batch_size,
364
+ shuffle_buffer=_SHUFFLE_BUFFER,
365
+ parse_record_fn=parse_record_fn,
366
+ dtype=dtype,
367
+ datasets_num_private_threads=datasets_num_private_threads,
368
+ drop_remainder=drop_remainder,
369
+ tf_data_experimental_slack=tf_data_experimental_slack,
370
+ experimental_preloading=experimental_preloading
371
+ )
372
+
373
+
374
+ def fetch_synth_data(is_training: bool, batch_size: int, model_dir, dtype) -> str:
375
+ """A function that generates and stores synthetic data
376
+
377
+ Args:
378
+ is_training: A boolean denoting whether the input is for training.
379
+ batch_size: The number of samples per batch
380
+ dtype: Data type to use for images/features
381
+
382
+ Returns:
383
+ Path to synthetic data
384
+ """
385
+ root_path = f'{model_dir}/resnet_synth_data'
386
+ batch_class_name = f'{uuid.uuid4()}'
387
+ batch_path = f'{root_path}/train/{batch_class_name}/' if is_training else f'{root_path}/val/{batch_class_name}'
388
+
389
+ os.makedirs(batch_path, exist_ok=True)
390
+ for i in range(batch_size):
391
+ input = tf.random.truncated_normal([DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE, NUM_CHANNELS],
392
+ dtype=dtype,
393
+ mean=127,
394
+ stddev=60,
395
+ name='synthetic_inputs')
396
+ casted_input = tf.cast(input, tf.uint8).numpy()
397
+ encode = tf.image.encode_jpeg(casted_input, format='rgb', quality=95).numpy()
398
+ filename = f'{batch_class_name}_{i}.JPEG'
399
+ with open(f'{batch_path}/{filename}', 'w+b') as fd:
400
+ fd.write(encode)
401
+ return root_path
402
+
403
+ def input_fn(is_training,
404
+ data_dir,
405
+ jpeg_data_dir,
406
+ batch_size,
407
+ model_dir,
408
+ dtype=tf.float32,
409
+ datasets_num_private_threads=None,
410
+ parse_record_fn=parse_record,
411
+ input_context=None,
412
+ drop_remainder=False,
413
+ tf_data_experimental_slack=False,
414
+ dataset_cache=True,
415
+ filenames=None,
416
+ experimental_preloading=False,
417
+ num_train_files=1024,
418
+ num_eval_files=128,
419
+ synthetic=False,
420
+ manifest_path=None):
421
+ """Input function which provides batches for train or eval.
422
+
423
+ Args:
424
+ is_training: A boolean denoting whether the input is for training.
425
+ data_dir: The directory containing the input data.
426
+ batch_size: The number of samples per batch.
427
+ dtype: Data type to use for images/features
428
+ datasets_num_private_threads: Number of private threads for tf.data.
429
+ parse_record_fn: Function to use for parsing the records.
430
+ input_context: A `tf.distribute.InputContext` object passed in by
431
+ `tf.distribute.Strategy`.
432
+ drop_remainder: A boolean indicates whether to drop the remainder of the
433
+ batches. If True, the batch dimension will be static.
434
+ tf_data_experimental_slack: Whether to enable tf.data's
435
+ `experimental_slack` option.
436
+ dataset_cache: Whether to cache the training and eval datasets on workers.
437
+ Typically used to improve performance when training and eval data is in
438
+ remote storage and can fit into worker memory.
439
+ filenames: Optional field for providing the file names of the TFRecords.
440
+ synthetic: A boolean that determines whether synthetic data should be generated.
441
+
442
+ Returns:
443
+ A dataset that can be used for iteration.
444
+ """
445
+ jpeg_data_path = fetch_synth_data(is_training, batch_size, model_dir, dtype) if synthetic else jpeg_data_dir
446
+ return habana_imagenet_dataset(fallback=imagenet_dataset_fallback,
447
+ is_training=is_training,
448
+ tf_data_dir=data_dir,
449
+ jpeg_data_dir=jpeg_data_path,
450
+ batch_size=batch_size,
451
+ num_channels=NUM_CHANNELS,
452
+ img_size=DEFAULT_IMAGE_SIZE,
453
+ dtype=dtype,
454
+ use_distributed_eval=flags.FLAGS.dist_eval,
455
+ datasets_num_private_threads=datasets_num_private_threads,
456
+ parse_record_fn=parse_record_fn,
457
+ input_context=input_context,
458
+ drop_remainder=drop_remainder,
459
+ tf_data_experimental_slack=tf_data_experimental_slack,
460
+ dataset_cache=dataset_cache,
461
+ filenames=filenames,
462
+ experimental_preloading=experimental_preloading,
463
+ num_train_files=num_train_files,
464
+ num_eval_files=num_eval_files,
465
+ use_pytorch_style_crop=True,
466
+ manifest_path=manifest_path)
467
+
468
+
469
+ def _decode_crop_and_flip(image_buffer, bbox, num_channels):
470
+ """Crops the given image to a random part of the image, and randomly flips.
471
+
472
+ We use the fused decode_and_crop op, which performs better than the two ops
473
+ used separately in series, but note that this requires that the image be
474
+ passed in as an un-decoded string Tensor.
475
+
476
+ Args:
477
+ image_buffer: scalar string Tensor representing the raw JPEG image buffer.
478
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
479
+ where each coordinate is [0, 1) and the coordinates are arranged as
480
+ [ymin, xmin, ymax, xmax].
481
+ num_channels: Integer depth of the image buffer for decoding.
482
+
483
+ Returns:
484
+ 3-D tensor with cropped image.
485
+
486
+ """
487
+ # A large fraction of image datasets contain a human-annotated bounding box
488
+ # delineating the region of the image containing the object of interest. We
489
+ # choose to create a new bounding box for the object which is a randomly
490
+ # distorted version of the human-annotated bounding box that obeys an
491
+ # allowed range of aspect ratios, sizes and overlap with the human-annotated
492
+ # bounding box. If no box is supplied, then we assume the bounding box is
493
+ # the entire image.
494
+ sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
495
+ tf.image.extract_jpeg_shape(image_buffer),
496
+ bounding_boxes=bbox,
497
+ min_object_covered=0.1,
498
+ aspect_ratio_range=[0.75, 1.33],
499
+ area_range=[0.05, 1.0],
500
+ max_attempts=100,
501
+ use_image_if_no_bounding_boxes=True)
502
+ bbox_begin, bbox_size, _ = sample_distorted_bounding_box
503
+
504
+ # Reassemble the bounding box in the format the crop op requires.
505
+ offset_y, offset_x, _ = tf.unstack(bbox_begin)
506
+ target_height, target_width, _ = tf.unstack(bbox_size)
507
+ crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
508
+
509
+ # Use the fused decode and crop op here, which is faster than each in series.
510
+ cropped = tf.image.decode_and_crop_jpeg(
511
+ image_buffer, crop_window, channels=num_channels)
512
+
513
+ # Flip to add a little more random distortion in.
514
+ cropped = tf.image.random_flip_left_right(cropped)
515
+ return cropped
516
+
517
+
518
+ def _central_crop(image, crop_height, crop_width):
519
+ """Performs central crops of the given image list.
520
+
521
+ Args:
522
+ image: a 3-D image tensor
523
+ crop_height: the height of the image following the crop.
524
+ crop_width: the width of the image following the crop.
525
+
526
+ Returns:
527
+ 3-D tensor with cropped image.
528
+ """
529
+ shape = tf.shape(input=image)
530
+ height, width = shape[0], shape[1]
531
+
532
+ amount_to_be_cropped_h = (height - crop_height)
533
+ crop_top = amount_to_be_cropped_h // 2
534
+ amount_to_be_cropped_w = (width - crop_width)
535
+ crop_left = amount_to_be_cropped_w // 2
536
+ return tf.slice(
537
+ image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])
538
+
539
+
540
+ def _mean_image_subtraction(image, means, num_channels):
541
+ """Subtracts the given means from each image channel.
542
+
543
+ For example:
544
+ means = [123.68, 116.779, 103.939]
545
+ image = _mean_image_subtraction(image, means)
546
+
547
+ Note that the rank of `image` must be known.
548
+
549
+ Args:
550
+ image: a tensor of size [height, width, C].
551
+ means: a C-vector of values to subtract from each channel.
552
+ num_channels: number of color channels in the image that will be distorted.
553
+
554
+ Returns:
555
+ the centered image.
556
+
557
+ Raises:
558
+ ValueError: If the rank of `image` is unknown, if `image` has a rank other
559
+ than three or if the number of channels in `image` doesn't match the
560
+ number of values in `means`.
561
+ """
562
+ if image.get_shape().ndims != 3:
563
+ raise ValueError('Input must be of size [height, width, C>0]')
564
+
565
+ if len(means) != num_channels:
566
+ raise ValueError('len(means) must match the number of channels')
567
+
568
+ # We have a 1-D tensor of means; convert to 3-D.
569
+ # Note(b/130245863): we explicitly call `broadcast` instead of simply
570
+ # expanding dimensions for better performance.
571
+ means = tf.broadcast_to(means, tf.shape(image))
572
+
573
+ return image - means
574
+
575
+
576
+ def _smallest_size_at_least(height, width, resize_min):
577
+ """Computes new shape with the smallest side equal to `smallest_side`.
578
+
579
+ Computes new shape with the smallest side equal to `smallest_side` while
580
+ preserving the original aspect ratio.
581
+
582
+ Args:
583
+ height: an int32 scalar tensor indicating the current height.
584
+ width: an int32 scalar tensor indicating the current width.
585
+ resize_min: A python integer or scalar `Tensor` indicating the size of
586
+ the smallest side after resize.
587
+
588
+ Returns:
589
+ new_height: an int32 scalar tensor indicating the new height.
590
+ new_width: an int32 scalar tensor indicating the new width.
591
+ """
592
+ resize_min = tf.cast(resize_min, tf.float32)
593
+
594
+ # Convert to floats to make subsequent calculations go smoothly.
595
+ height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
596
+
597
+ smaller_dim = tf.minimum(height, width)
598
+ scale_ratio = resize_min / smaller_dim
599
+
600
+ # Convert back to ints to make heights and widths that TF ops will accept.
601
+ new_height = tf.cast(height * scale_ratio, tf.int32)
602
+ new_width = tf.cast(width * scale_ratio, tf.int32)
603
+
604
+ return new_height, new_width
605
+
606
+
607
+ def _aspect_preserving_resize(image, resize_min):
608
+ """Resize images preserving the original aspect ratio.
609
+
610
+ Args:
611
+ image: A 3-D image `Tensor`.
612
+ resize_min: A python integer or scalar `Tensor` indicating the size of
613
+ the smallest side after resize.
614
+
615
+ Returns:
616
+ resized_image: A 3-D tensor containing the resized image.
617
+ """
618
+ shape = tf.shape(input=image)
619
+ height, width = shape[0], shape[1]
620
+
621
+ new_height, new_width = _smallest_size_at_least(height, width, resize_min)
622
+
623
+ return _resize_image(image, new_height, new_width)
624
+
625
+
626
+ def _resize_image(image, height, width):
627
+ """Simple wrapper around tf.resize_images.
628
+
629
+ This is primarily to make sure we use the same `ResizeMethod` and other
630
+ details each time.
631
+
632
+ Args:
633
+ image: A 3-D image `Tensor`.
634
+ height: The target height for the resized image.
635
+ width: The target width for the resized image.
636
+
637
+ Returns:
638
+ resized_image: A 3-D tensor containing the resized image. The first two
639
+ dimensions have the shape [height, width].
640
+ """
641
+ return tf.compat.v1.image.resize(
642
+ image, [height, width], method=tf.image.ResizeMethod.BILINEAR,
643
+ align_corners=False)
644
+
645
+
646
+ def preprocess_image(image_buffer, bbox, output_height, output_width,
647
+ num_channels, is_training=False):
648
+ """Preprocesses the given image.
649
+
650
+ Preprocessing includes decoding, cropping, and resizing for both training
651
+ and eval images. Training preprocessing, however, introduces some random
652
+ distortion of the image to improve accuracy.
653
+
654
+ Args:
655
+ image_buffer: scalar string Tensor representing the raw JPEG image buffer.
656
+ bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
657
+ where each coordinate is [0, 1) and the coordinates are arranged as
658
+ [ymin, xmin, ymax, xmax].
659
+ output_height: The height of the image after preprocessing.
660
+ output_width: The width of the image after preprocessing.
661
+ num_channels: Integer depth of the image buffer for decoding.
662
+ is_training: `True` if we're preprocessing the image for training and
663
+ `False` otherwise.
664
+
665
+ Returns:
666
+ A preprocessed image.
667
+ """
668
+ if is_training:
669
+ # For training, we want to randomize some of the distortions.
670
+ image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
671
+ image = _resize_image(image, output_height, output_width)
672
+ else:
673
+ # For validation, we want to decode, resize, then just crop the middle.
674
+ image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
675
+ image = _aspect_preserving_resize(image, _RESIZE_MIN)
676
+ image = _central_crop(image, output_height, output_width)
677
+
678
+ image.set_shape([output_height, output_width, num_channels])
679
+
680
+ return _mean_image_subtraction(image, CHANNEL_MEANS, num_channels)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_base.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Flags which will be nearly universal across models."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ from absl import flags
22
+ import tensorflow as tf
23
+
24
+ from TensorFlow.utils.flags._conventions import help_wrap
25
+ from TensorFlow.utils.logs import hooks_helper
26
+
27
+
28
+ def define_base(data_dir=True, model_dir=True, clean=False, train_epochs=False,
29
+ epochs_between_evals=False, stop_threshold=False,
30
+ batch_size=True, num_gpu=False, hooks=False, export_dir=False,
31
+ distribution_strategy=False, run_eagerly=False):
32
+ """Register base flags.
33
+
34
+ Args:
35
+ data_dir: Create a flag for specifying the input data directory.
36
+ model_dir: Create a flag for specifying the model file directory.
37
+ clean: Create a flag for removing the model_dir.
38
+ train_epochs: Create a flag to specify the number of training epochs.
39
+ epochs_between_evals: Create a flag to specify the frequency of testing.
40
+ stop_threshold: Create a flag to specify a threshold accuracy or other
41
+ eval metric which should trigger the end of training.
42
+ batch_size: Create a flag to specify the batch size.
43
+ num_gpu: Create a flag to specify the number of GPUs used.
44
+ hooks: Create a flag to specify hooks for logging.
45
+ export_dir: Create a flag to specify where a SavedModel should be exported.
46
+ distribution_strategy: Create a flag to specify which Distribution Strategy
47
+ to use.
48
+ run_eagerly: Create a flag to specify to run eagerly op by op.
49
+ Returns:
50
+ A list of flags for core.py to marks as key flags.
51
+ """
52
+ key_flags = []
53
+
54
+ if data_dir:
55
+ flags.DEFINE_string(
56
+ name="data_dir", short_name="dd", default="/tmp",
57
+ help=help_wrap("The location of the input data."))
58
+ key_flags.append("data_dir")
59
+
60
+ flags.DEFINE_string(
61
+ name="jpeg_data_dir", short_name="jpdd", default=None,
62
+ help=help_wrap("The location of the JPEG version of the input data. Used for media dataloader"))
63
+ key_flags.append("jpeg_data_dir")
64
+
65
+ if model_dir:
66
+ flags.DEFINE_string(
67
+ name="model_dir", short_name="md", default="/tmp",
68
+ help=help_wrap("The location of the model checkpoint files."))
69
+ key_flags.append("model_dir")
70
+
71
+ if clean:
72
+ flags.DEFINE_boolean(
73
+ name="clean", default=False,
74
+ help=help_wrap("If set, model_dir will be removed if it exists."))
75
+ key_flags.append("clean")
76
+
77
+ if train_epochs:
78
+ flags.DEFINE_integer(
79
+ name="train_epochs", short_name="te", default=1,
80
+ help=help_wrap("The number of epochs used to train."))
81
+ key_flags.append("train_epochs")
82
+
83
+ if epochs_between_evals:
84
+ flags.DEFINE_integer(
85
+ name="epochs_between_evals", short_name="ebe", default=1,
86
+ help=help_wrap("The number of training epochs to run between "
87
+ "evaluations."))
88
+ key_flags.append("epochs_between_evals")
89
+
90
+ if stop_threshold:
91
+ flags.DEFINE_float(
92
+ name="stop_threshold", short_name="st",
93
+ default=None,
94
+ help=help_wrap("If passed, training will stop at the earlier of "
95
+ "train_epochs and when the evaluation metric is "
96
+ "greater than or equal to stop_threshold."))
97
+
98
+ if batch_size:
99
+ flags.DEFINE_integer(
100
+ name="batch_size", short_name="bs", default=32,
101
+ help=help_wrap("Batch size for training and evaluation. When using "
102
+ "multiple gpus, this is the global batch size for "
103
+ "all devices. For example, if the batch size is 32 "
104
+ "and there are 4 GPUs, each GPU will get 8 examples on "
105
+ "each step."))
106
+ key_flags.append("batch_size")
107
+
108
+ if num_gpu:
109
+ flags.DEFINE_integer(
110
+ name="num_gpus", short_name="ng",
111
+ default=1,
112
+ help=help_wrap(
113
+ "How many GPUs to use at each worker with the "
114
+ "DistributionStrategies API. The default is 1."))
115
+
116
+ if run_eagerly:
117
+ flags.DEFINE_boolean(
118
+ name="run_eagerly", default=False,
119
+ help="Run the model op by op without building a model function.")
120
+
121
+ if hooks:
122
+ # Construct a pretty summary of hooks.
123
+ hook_list_str = (
124
+ u"\ufeff Hook:\n" + u"\n".join([u"\ufeff {}".format(key) for key
125
+ in hooks_helper.HOOKS]))
126
+ flags.DEFINE_list(
127
+ name="hooks", short_name="hk", default="LoggingTensorHook",
128
+ help=help_wrap(
129
+ u"A list of (case insensitive) strings to specify the names of "
130
+ u"training hooks.\n{}\n\ufeff Example: `--hooks ProfilerHook,"
131
+ u"ExamplesPerSecondHook`\n See official.utils.logs.hooks_helper "
132
+ u"for details.".format(hook_list_str))
133
+ )
134
+ key_flags.append("hooks")
135
+
136
+ if export_dir:
137
+ flags.DEFINE_string(
138
+ name="export_dir", short_name="ed", default=None,
139
+ help=help_wrap("If set, a SavedModel serialization of the model will "
140
+ "be exported to this directory at the end of training. "
141
+ "See the README for more details and relevant links.")
142
+ )
143
+ key_flags.append("export_dir")
144
+
145
+ if distribution_strategy:
146
+ flags.DEFINE_string(
147
+ name="distribution_strategy", short_name="ds", default="mirrored",
148
+ help=help_wrap("The Distribution Strategy to use for training. "
149
+ "Accepted values are 'off', 'one_device', "
150
+ "'mirrored', 'parameter_server', 'collective', "
151
+ "case insensitive. 'off' means not to use "
152
+ "Distribution Strategy; 'default' means to choose "
153
+ "from `MirroredStrategy` or `OneDeviceStrategy` "
154
+ "according to the number of GPUs.")
155
+ )
156
+
157
+
158
+ return key_flags
159
+
160
+
161
+ def get_num_gpus(flags_obj):
162
+ """Treat num_gpus=-1 as 'use all'."""
163
+ if flags_obj.num_gpus != -1:
164
+ return flags_obj.num_gpus
165
+
166
+ from tensorflow.python.client import device_lib # pylint: disable=g-import-not-at-top
167
+ local_device_protos = device_lib.list_local_devices()
168
+ return sum([1 for d in local_device_protos if d.device_type == "GPU"])
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_device.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Flags for managing compute devices. Currently only contains TPU flags."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ from absl import flags
22
+ import tensorflow as tf
23
+
24
+ from TensorFlow.utils.flags._conventions import help_wrap
25
+
26
+
27
+ def require_cloud_storage(flag_names):
28
+ """Register a validator to check directory flags.
29
+ Args:
30
+ flag_names: An iterable of strings containing the names of flags to be
31
+ checked.
32
+ """
33
+ msg = "TPU requires GCS path for {}".format(", ".join(flag_names))
34
+ @flags.multi_flags_validator(["tpu"] + flag_names, message=msg)
35
+ def _path_check(flag_values): # pylint: disable=missing-docstring
36
+ if flag_values["tpu"] is None:
37
+ return True
38
+
39
+ valid_flags = True
40
+ for key in flag_names:
41
+ if not flag_values[key].startswith("gs://"):
42
+ tf.compat.v1.logging.error("{} must be a GCS path.".format(key))
43
+ valid_flags = False
44
+
45
+ return valid_flags
46
+
47
+
48
+ def define_device(tpu=True):
49
+ """Register device specific flags.
50
+ Args:
51
+ tpu: Create flags to specify TPU operation.
52
+ Returns:
53
+ A list of flags for core.py to marks as key flags.
54
+ """
55
+
56
+ key_flags = []
57
+
58
+ if tpu:
59
+ flags.DEFINE_string(
60
+ name="tpu", default=None,
61
+ help=help_wrap(
62
+ "The Cloud TPU to use for training. This should be either the name "
63
+ "used when creating the Cloud TPU, or a "
64
+ "grpc://ip.address.of.tpu:8470 url. Passing `local` will use the"
65
+ "CPU of the local instance instead. (Good for debugging.)"))
66
+ key_flags.append("tpu")
67
+
68
+ flags.DEFINE_string(
69
+ name="tpu_zone", default=None,
70
+ help=help_wrap(
71
+ "[Optional] GCE zone where the Cloud TPU is located in. If not "
72
+ "specified, we will attempt to automatically detect the GCE "
73
+ "project from metadata."))
74
+
75
+ flags.DEFINE_string(
76
+ name="tpu_gcp_project", default=None,
77
+ help=help_wrap(
78
+ "[Optional] Project name for the Cloud TPU-enabled project. If not "
79
+ "specified, we will attempt to automatically detect the GCE "
80
+ "project from metadata."))
81
+
82
+ flags.DEFINE_integer(name="num_tpu_shards", default=8,
83
+ help=help_wrap("Number of shards (TPU chips)."))
84
+
85
+ return key_flags
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/_performance.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Register flags for optimizing performance."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import multiprocessing
22
+
23
+ from absl import flags # pylint: disable=g-bad-import-order
24
+ import tensorflow as tf # pylint: disable=g-bad-import-order
25
+
26
+ from TensorFlow.utils.flags._conventions import help_wrap
27
+
28
+
29
+ # Map string to TensorFlow dtype
30
+ DTYPE_MAP = {
31
+ "fp16": tf.float16,
32
+ "bf16": tf.bfloat16,
33
+ "fp32": tf.float32,
34
+ }
35
+
36
+
37
+ def get_tf_dtype(flags_obj):
38
+ if getattr(flags_obj, "fp16_implementation", None) == "graph_rewrite":
39
+ # If the graph_rewrite is used, we build the graph with fp32, and let the
40
+ # graph rewrite change ops to fp16.
41
+ return tf.float32
42
+ return DTYPE_MAP[flags_obj.dtype]
43
+
44
+
45
+ def get_loss_scale(flags_obj, default_for_fp16):
46
+ dtype = get_tf_dtype(flags_obj)
47
+ if flags_obj.loss_scale == "dynamic":
48
+ return flags_obj.loss_scale
49
+ elif flags_obj.loss_scale is not None:
50
+ return float(flags_obj.loss_scale)
51
+ elif dtype == tf.float32 or dtype == tf.bfloat16:
52
+ return 1 # No loss scaling is needed for fp32
53
+ else:
54
+ assert dtype == tf.float16
55
+ return default_for_fp16
56
+
57
+
58
+ def define_performance(num_parallel_calls=False, inter_op=False, intra_op=False,
59
+ synthetic_data=False, max_train_steps=False, dtype=False,
60
+ all_reduce_alg=False, num_packs=False,
61
+ tf_gpu_thread_mode=False,
62
+ datasets_num_private_threads=False,
63
+ datasets_num_parallel_batches=False,
64
+ dynamic_loss_scale=False, fp16_implementation=False,
65
+ loss_scale=False,
66
+ tf_data_experimental_slack=False, enable_xla=False,
67
+ dataset_cache=True):
68
+ """Register flags for specifying performance tuning arguments.
69
+
70
+ Args:
71
+ num_parallel_calls: Create a flag to specify parallelism of data loading.
72
+ inter_op: Create a flag to allow specification of inter op threads.
73
+ intra_op: Create a flag to allow specification of intra op threads.
74
+ synthetic_data: Create a flag to allow the use of synthetic data.
75
+ max_train_steps: Create a flags to allow specification of maximum number
76
+ of training steps
77
+ dtype: Create flags for specifying dtype.
78
+ all_reduce_alg: If set forces a specific algorithm for multi-gpu.
79
+ num_packs: If set provides number of packs for MirroredStrategy's cross
80
+ device ops.
81
+ tf_gpu_thread_mode: gpu_private triggers us of private thread pool.
82
+ datasets_num_private_threads: Number of private threads for datasets.
83
+ datasets_num_parallel_batches: Determines how many batches to process in
84
+ parallel when using map and batch from tf.data.
85
+ dynamic_loss_scale: Allow the "loss_scale" flag to take on the value
86
+ "dynamic". Only valid if `dtype` is True.
87
+ fp16_implementation: Create fp16_implementation flag.
88
+ loss_scale: Controls the loss scaling, normally for mixed-precision
89
+ training. Can only be turned on if dtype is also True.
90
+ tf_data_experimental_slack: Determines whether to enable tf.data's
91
+ `experimental_slack` option.
92
+ enable_xla: Determines if XLA (auto clustering) is turned on.
93
+ dataset_cache: Whether to cache the training and eval dataset on workers.
94
+ Typically used to improve training performance when training data is in
95
+ remote storage and can fit into worker memory.
96
+
97
+ Returns:
98
+ A list of flags for core.py to marks as key flags.
99
+ """
100
+
101
+ key_flags = []
102
+ if num_parallel_calls:
103
+ flags.DEFINE_integer(
104
+ name="num_parallel_calls", short_name="npc",
105
+ default=multiprocessing.cpu_count(),
106
+ help=help_wrap("The number of records that are processed in parallel "
107
+ "during input processing. This can be optimized per "
108
+ "data set but for generally homogeneous data sets, "
109
+ "should be approximately the number of available CPU "
110
+ "cores. (default behavior)"))
111
+
112
+ if inter_op:
113
+ flags.DEFINE_integer(
114
+ name="inter_op_parallelism_threads", short_name="inter", default=0,
115
+ help=help_wrap("Number of inter_op_parallelism_threads to use for CPU. "
116
+ "See TensorFlow config.proto for details.")
117
+ )
118
+
119
+ if intra_op:
120
+ flags.DEFINE_integer(
121
+ name="intra_op_parallelism_threads", short_name="intra", default=0,
122
+ help=help_wrap("Number of intra_op_parallelism_threads to use for CPU. "
123
+ "See TensorFlow config.proto for details."))
124
+
125
+ if synthetic_data:
126
+ flags.DEFINE_bool(
127
+ name="use_synthetic_data", short_name="synth", default=False,
128
+ help=help_wrap(
129
+ "If set, use fake data (zeroes) instead of a real dataset. "
130
+ "This mode is useful for performance debugging, as it removes "
131
+ "input processing steps, but will not learn anything."))
132
+
133
+ if max_train_steps:
134
+ flags.DEFINE_integer(
135
+ name="max_train_steps", short_name="mts", default=None, help=help_wrap(
136
+ "The model will stop training if the global_step reaches this "
137
+ "value. If not set, training will run until the specified number "
138
+ "of epochs have run as usual. It is generally recommended to set "
139
+ "--train_epochs=1 when using this flag."
140
+ ))
141
+
142
+ if dtype:
143
+ flags.DEFINE_enum(
144
+ name="dtype", short_name="dt", default="fp32",
145
+ enum_values=DTYPE_MAP.keys(),
146
+ help=help_wrap("The TensorFlow datatype used for calculations. "
147
+ "Variables may be cast to a higher precision on a "
148
+ "case-by-case basis for numerical stability."))
149
+
150
+ loss_scale_help_text = (
151
+ "The amount to scale the loss by when the model is run. {}. Before "
152
+ "gradients are computed, the loss is multiplied by the loss scale, "
153
+ "making all gradients loss_scale times larger. To adjust for this, "
154
+ "gradients are divided by the loss scale before being applied to "
155
+ "variables. This is mathematically equivalent to training without "
156
+ "a loss scale, but the loss scale helps avoid some intermediate "
157
+ "gradients from underflowing to zero. If not provided the default "
158
+ "for fp16 is 128 and 1 for all other dtypes.{}"
159
+ )
160
+ if dynamic_loss_scale:
161
+ loss_scale_help_text = loss_scale_help_text.format(
162
+ "This can be an int/float or the string 'dynamic'",
163
+ " The string 'dynamic' can be used to dynamically determine the "
164
+ "optimal loss scale during training, but currently this "
165
+ "significantly slows down performance")
166
+ loss_scale_validation_msg = ("loss_scale should be a positive int/float "
167
+ "or the string 'dynamic'.")
168
+ else:
169
+ loss_scale_help_text = loss_scale_help_text.format(
170
+ "This must be an int/float", "")
171
+ loss_scale_validation_msg = "loss_scale should be a positive int/float."
172
+ if loss_scale:
173
+ flags.DEFINE_string(
174
+ name="loss_scale", short_name="ls", default=None,
175
+ help=help_wrap(loss_scale_help_text))
176
+
177
+ @flags.validator(flag_name="loss_scale",
178
+ message=loss_scale_validation_msg)
179
+ def _check_loss_scale(loss_scale): # pylint: disable=unused-variable
180
+ """Validator to check the loss scale flag is valid."""
181
+ if loss_scale is None:
182
+ return True # null case is handled in get_loss_scale()
183
+
184
+ if loss_scale == "dynamic" and dynamic_loss_scale:
185
+ return True
186
+
187
+ try:
188
+ loss_scale = float(loss_scale)
189
+ except ValueError:
190
+ return False
191
+
192
+ return loss_scale > 0
193
+
194
+ if fp16_implementation:
195
+ flags.DEFINE_enum(
196
+ name="fp16_implementation", default="keras",
197
+ enum_values=("keras', 'graph_rewrite"),
198
+ help=help_wrap(
199
+ "When --dtype=fp16, how fp16 should be implemented. This has no "
200
+ "impact on correctness. 'keras' uses the "
201
+ "tf.keras.mixed_precision API. 'graph_rewrite' uses the "
202
+ "tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite "
203
+ "API."))
204
+
205
+ @flags.multi_flags_validator(["fp16_implementation", "dtype",
206
+ "loss_scale"])
207
+ def _check_fp16_implementation(flags_dict):
208
+ """Validator to check fp16_implementation flag is valid."""
209
+ if (flags_dict["fp16_implementation"] == "graph_rewrite" and
210
+ flags_dict["dtype"] != "fp16"):
211
+ raise flags.ValidationError("--fp16_implementation should not be "
212
+ "specified unless --dtype=fp16")
213
+ return True
214
+
215
+ if all_reduce_alg:
216
+ flags.DEFINE_string(
217
+ name="all_reduce_alg", short_name="ara", default=None,
218
+ help=help_wrap("Defines the algorithm to use for performing all-reduce."
219
+ "When specified with MirroredStrategy for single "
220
+ "worker, this controls "
221
+ "tf.contrib.distribute.AllReduceCrossTowerOps. When "
222
+ "specified with MultiWorkerMirroredStrategy, this "
223
+ "controls "
224
+ "tf.distribute.experimental.CollectiveCommunication; "
225
+ "valid options are `ring` and `nccl`."))
226
+
227
+ if num_packs:
228
+ flags.DEFINE_integer(
229
+ name="num_packs", default=1,
230
+ help=help_wrap("Sets `num_packs` in the cross device ops used in "
231
+ "MirroredStrategy. For details, see "
232
+ "tf.distribute.NcclAllReduce."))
233
+
234
+ if tf_gpu_thread_mode:
235
+ flags.DEFINE_string(
236
+ name="tf_gpu_thread_mode", short_name="gt_mode", default=None,
237
+ help=help_wrap(
238
+ "Whether and how the GPU device uses its own threadpool.")
239
+ )
240
+
241
+ flags.DEFINE_integer(
242
+ name="per_gpu_thread_count", short_name="pgtc", default=0,
243
+ help=help_wrap(
244
+ "The number of threads to use for GPU. Only valid when "
245
+ "tf_gpu_thread_mode is not global.")
246
+ )
247
+
248
+ if datasets_num_private_threads:
249
+ flags.DEFINE_integer(
250
+ name="datasets_num_private_threads",
251
+ default=None,
252
+ help=help_wrap(
253
+ "Number of threads for a private threadpool created for all"
254
+ "datasets computation..")
255
+ )
256
+
257
+ if datasets_num_parallel_batches:
258
+ flags.DEFINE_integer(
259
+ name="datasets_num_parallel_batches",
260
+ default=None,
261
+ help=help_wrap(
262
+ "Determines how many batches to process in parallel when using "
263
+ "map and batch from tf.data.")
264
+ )
265
+
266
+ if dataset_cache:
267
+ flags.DEFINE_boolean(
268
+ name="dataset_cache",
269
+ default=True,
270
+ help=help_wrap(
271
+ "Determines whether to cache the training and eval dataset on workers. "
272
+ "Typically used to improve training performance when training and eval "
273
+ "data is in remote storage and can fit into worker memory.")
274
+ )
275
+
276
+ if tf_data_experimental_slack:
277
+ flags.DEFINE_boolean(
278
+ name="tf_data_experimental_slack",
279
+ default=False,
280
+ help=help_wrap(
281
+ "Whether to enable tf.data's `experimental_slack` option.")
282
+ )
283
+
284
+ if enable_xla:
285
+ flags.DEFINE_boolean(
286
+ name="enable_xla", default=False,
287
+ help="Whether to enable XLA auto jit compilation")
288
+
289
+ return key_flags
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/flags/core.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Public interface for flag definition.
16
+
17
+ See _example.py for detailed instructions on defining flags.
18
+ """
19
+
20
+ from __future__ import absolute_import
21
+ from __future__ import division
22
+ from __future__ import print_function
23
+
24
+ import sys
25
+ from six.moves import shlex_quote
26
+
27
+ from absl import app as absl_app
28
+ from absl import flags
29
+
30
+ from TensorFlow.utils.flags import _base
31
+ from TensorFlow.utils.flags import _benchmark
32
+ from TensorFlow.utils.flags import _conventions
33
+ from TensorFlow.utils.flags import _device
34
+ from TensorFlow.utils.flags import _distribution
35
+ from TensorFlow.utils.flags import _misc
36
+ from TensorFlow.utils.flags import _performance
37
+
38
+
39
+ def set_defaults(**kwargs):
40
+ for key, value in kwargs.items():
41
+ flags.FLAGS.set_default(name=key, value=value)
42
+
43
+
44
+ def parse_flags(argv=None):
45
+ """Reset flags and reparse. Currently only used in testing."""
46
+ flags.FLAGS.unparse_flags()
47
+ absl_app.parse_flags_with_usage(argv or sys.argv)
48
+
49
+
50
+ def register_key_flags_in_core(f):
51
+ """Defines a function in core.py, and registers its key flags.
52
+
53
+ absl uses the location of a flags.declare_key_flag() to determine the context
54
+ in which a flag is key. By making all declares in core, this allows model
55
+ main functions to call flags.adopt_module_key_flags() on core and correctly
56
+ chain key flags.
57
+
58
+ Args:
59
+ f: The function to be wrapped
60
+
61
+ Returns:
62
+ The "core-defined" version of the input function.
63
+ """
64
+
65
+ def core_fn(*args, **kwargs):
66
+ key_flags = f(*args, **kwargs)
67
+ [flags.declare_key_flag(fl) for fl in key_flags] # pylint: disable=expression-not-assigned
68
+ return core_fn
69
+
70
+
71
+ define_base = register_key_flags_in_core(_base.define_base)
72
+ # We have define_base_eager for compatibility, since it used to be a separate
73
+ # function from define_base.
74
+ define_base_eager = define_base
75
+ define_log_steps = register_key_flags_in_core(_benchmark.define_log_steps)
76
+ define_benchmark = register_key_flags_in_core(_benchmark.define_benchmark)
77
+ define_device = register_key_flags_in_core(_device.define_device)
78
+ define_image = register_key_flags_in_core(_misc.define_image)
79
+ define_performance = register_key_flags_in_core(_performance.define_performance)
80
+ define_distribution = register_key_flags_in_core(
81
+ _distribution.define_distribution)
82
+
83
+
84
+ help_wrap = _conventions.help_wrap
85
+
86
+
87
+ get_num_gpus = _base.get_num_gpus
88
+ get_tf_dtype = _performance.get_tf_dtype
89
+ get_loss_scale = _performance.get_loss_scale
90
+ DTYPE_MAP = _performance.DTYPE_MAP
91
+ require_cloud_storage = _device.require_cloud_storage
92
+
93
+ def _get_nondefault_flags_as_dict():
94
+ """Returns the nondefault flags as a dict from flag name to value."""
95
+ nondefault_flags = {}
96
+ for flag_name in flags.FLAGS:
97
+ flag_value = getattr(flags.FLAGS, flag_name)
98
+ if (flag_name != flags.FLAGS[flag_name].short_name and
99
+ flag_value != flags.FLAGS[flag_name].default):
100
+ nondefault_flags[flag_name] = flag_value
101
+ return nondefault_flags
102
+
103
+
104
+ def get_nondefault_flags_as_str():
105
+ """Returns flags as a string that can be passed as command line arguments.
106
+
107
+ E.g., returns: "--batch_size=256 --use_synthetic_data" for the following code
108
+ block:
109
+
110
+ ```
111
+ flags.FLAGS.batch_size = 256
112
+ flags.FLAGS.use_synthetic_data = True
113
+ print(get_nondefault_flags_as_str())
114
+ ```
115
+
116
+ Only flags with nondefault values are returned, as passing default flags as
117
+ command line arguments has no effect.
118
+
119
+ Returns:
120
+ A string with the flags, that can be passed as command line arguments to a
121
+ program to use the flags.
122
+ """
123
+ nondefault_flags = _get_nondefault_flags_as_dict()
124
+ flag_strings = []
125
+ for name, value in sorted(nondefault_flags.items()):
126
+ if isinstance(value, bool):
127
+ flag_str = '--{}'.format(name) if value else '--no{}'.format(name)
128
+ elif isinstance(value, list):
129
+ flag_str = '--{}={}'.format(name, ','.join(value))
130
+ else:
131
+ flag_str = '--{}={}'.format(name, value)
132
+ flag_strings.append(flag_str)
133
+ return ' '.join(shlex_quote(flag_str) for flag_str in flag_strings)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/cloud_lib.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ """Utilities that interact with cloud service.
17
+ """
18
+
19
+ import requests
20
+
21
+ GCP_METADATA_URL = "http://metadata/computeMetadata/v1/instance/hostname"
22
+ GCP_METADATA_HEADER = {"Metadata-Flavor": "Google"}
23
+
24
+
25
+ def on_gcp():
26
+ """Detect whether the current running environment is on GCP."""
27
+ try:
28
+ # Timeout in 5 seconds, in case the test environment has connectivity issue.
29
+ # There is not default timeout, which means it might block forever.
30
+ response = requests.get(
31
+ GCP_METADATA_URL, headers=GCP_METADATA_HEADER, timeout=5)
32
+ return response.status_code == 200
33
+ except requests.exceptions.RequestException:
34
+ return False
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/hooks.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ """Hook that counts examples per second every N steps or seconds."""
17
+
18
+
19
+ from __future__ import absolute_import
20
+ from __future__ import division
21
+ from __future__ import print_function
22
+
23
+ import tensorflow as tf # pylint: disable=g-bad-import-order
24
+
25
+ from TensorFlow.utils.logs import logger
26
+
27
+
28
+ class ExamplesPerSecondHook(tf.estimator.SessionRunHook):
29
+ """Hook to print out examples per second.
30
+
31
+ Total time is tracked and then divided by the total number of steps
32
+ to get the average step time and then batch_size is used to determine
33
+ the running average of examples per second. The examples per second for the
34
+ most recent interval is also logged.
35
+ """
36
+
37
+ def __init__(self,
38
+ batch_size,
39
+ every_n_steps=None,
40
+ every_n_secs=None,
41
+ warm_steps=0,
42
+ metric_logger=None):
43
+ """Initializer for ExamplesPerSecondHook.
44
+
45
+ Args:
46
+ batch_size: Total batch size across all workers used to calculate
47
+ examples/second from global time.
48
+ every_n_steps: Log stats every n steps.
49
+ every_n_secs: Log stats every n seconds. Exactly one of the
50
+ `every_n_steps` or `every_n_secs` should be set.
51
+ warm_steps: The number of steps to be skipped before logging and running
52
+ average calculation. warm_steps steps refers to global steps across all
53
+ workers, not on each worker
54
+ metric_logger: instance of `BenchmarkLogger`, the benchmark logger that
55
+ hook should use to write the log. If None, BaseBenchmarkLogger will
56
+ be used.
57
+
58
+ Raises:
59
+ ValueError: if neither `every_n_steps` or `every_n_secs` is set, or
60
+ both are set.
61
+ """
62
+
63
+ if (every_n_steps is None) == (every_n_secs is None):
64
+ raise ValueError("exactly one of every_n_steps"
65
+ " and every_n_secs should be provided.")
66
+
67
+ self._logger = metric_logger or logger.BaseBenchmarkLogger()
68
+
69
+ self._timer = tf.estimator.SecondOrStepTimer(
70
+ every_steps=every_n_steps, every_secs=every_n_secs)
71
+
72
+ self._step_train_time = 0
73
+ self._total_steps = 0
74
+ self._batch_size = batch_size
75
+ self._warm_steps = warm_steps
76
+ # List of examples per second logged every_n_steps.
77
+ self.current_examples_per_sec_list = []
78
+
79
+ def begin(self):
80
+ """Called once before using the session to check global step."""
81
+ self._global_step_tensor = tf.compat.v1.train.get_global_step()
82
+ if self._global_step_tensor is None:
83
+ raise RuntimeError(
84
+ "Global step should be created to use StepCounterHook.")
85
+
86
+ def before_run(self, run_context): # pylint: disable=unused-argument
87
+ """Called before each call to run().
88
+
89
+ Args:
90
+ run_context: A SessionRunContext object.
91
+
92
+ Returns:
93
+ A SessionRunArgs object or None if never triggered.
94
+ """
95
+ return tf.estimator.SessionRunArgs(self._global_step_tensor)
96
+
97
+ def after_run(self, run_context, run_values): # pylint: disable=unused-argument
98
+ """Called after each call to run().
99
+
100
+ Args:
101
+ run_context: A SessionRunContext object.
102
+ run_values: A SessionRunValues object.
103
+ """
104
+ global_step = run_values.results
105
+
106
+ if self._timer.should_trigger_for_step(
107
+ global_step) and global_step > self._warm_steps:
108
+ elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
109
+ global_step)
110
+ if elapsed_time is not None:
111
+ self._step_train_time += elapsed_time
112
+ self._total_steps += elapsed_steps
113
+
114
+ # average examples per second is based on the total (accumulative)
115
+ # training steps and training time so far
116
+ average_examples_per_sec = self._batch_size * (
117
+ self._total_steps / self._step_train_time)
118
+ # current examples per second is based on the elapsed training steps
119
+ # and training time per batch
120
+ current_examples_per_sec = self._batch_size * (
121
+ elapsed_steps / elapsed_time)
122
+ # Logs entries to be read from hook during or after run.
123
+ self.current_examples_per_sec_list.append(current_examples_per_sec)
124
+ self._logger.log_metric(
125
+ "average_examples_per_sec", average_examples_per_sec,
126
+ global_step=global_step)
127
+
128
+ self._logger.log_metric(
129
+ "current_examples_per_sec", current_examples_per_sec,
130
+ global_step=global_step)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/hooks_helper.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ """Hooks helper to return a list of TensorFlow hooks for training by name.
17
+
18
+ More hooks can be added to this set. To add a new hook, 1) add the new hook to
19
+ the registry in HOOKS, 2) add a corresponding function that parses out necessary
20
+ parameters.
21
+ """
22
+
23
+ from __future__ import absolute_import
24
+ from __future__ import division
25
+ from __future__ import print_function
26
+
27
+ import tensorflow as tf # pylint: disable=g-bad-import-order
28
+
29
+ from TensorFlow.utils.logs import hooks
30
+ from TensorFlow.utils.logs import logger
31
+ from TensorFlow.utils.logs import metric_hook
32
+
33
+ _TENSORS_TO_LOG = dict((x, x) for x in ['learning_rate',
34
+ 'cross_entropy',
35
+ 'train_accuracy'])
36
+
37
+
38
+ def get_train_hooks(name_list, use_tpu=False, **kwargs):
39
+ """Factory for getting a list of TensorFlow hooks for training by name.
40
+
41
+ Args:
42
+ name_list: a list of strings to name desired hook classes. Allowed:
43
+ LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
44
+ as keys in HOOKS
45
+ use_tpu: Boolean of whether computation occurs on a TPU. This will disable
46
+ hooks altogether.
47
+ **kwargs: a dictionary of arguments to the hooks.
48
+
49
+ Returns:
50
+ list of instantiated hooks, ready to be used in a classifier.train call.
51
+
52
+ Raises:
53
+ ValueError: if an unrecognized name is passed.
54
+ """
55
+
56
+ if not name_list:
57
+ return []
58
+
59
+ if use_tpu:
60
+ tf.compat.v1.logging.warning('hooks_helper received name_list `{}`, but a '
61
+ 'TPU is specified. No hooks will be used.'
62
+ .format(name_list))
63
+ return []
64
+
65
+ train_hooks = []
66
+ for name in name_list:
67
+ hook_name = HOOKS.get(name.strip().lower())
68
+ if hook_name is None:
69
+ raise ValueError('Unrecognized training hook requested: {}'.format(name))
70
+ else:
71
+ train_hooks.append(hook_name(**kwargs))
72
+
73
+ return train_hooks
74
+
75
+
76
+ def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument
77
+ """Function to get LoggingTensorHook.
78
+
79
+ Args:
80
+ every_n_iter: `int`, print the values of `tensors` once every N local
81
+ steps taken on the current worker.
82
+ tensors_to_log: List of tensor names or dictionary mapping labels to tensor
83
+ names. If not set, log _TENSORS_TO_LOG by default.
84
+ **kwargs: a dictionary of arguments to LoggingTensorHook.
85
+
86
+ Returns:
87
+ Returns a LoggingTensorHook with a standard set of tensors that will be
88
+ printed to stdout.
89
+ """
90
+ if tensors_to_log is None:
91
+ tensors_to_log = _TENSORS_TO_LOG
92
+
93
+ return tf.estimator.LoggingTensorHook(
94
+ tensors=tensors_to_log,
95
+ every_n_iter=every_n_iter)
96
+
97
+
98
+ def get_profiler_hook(model_dir, save_steps=1000, **kwargs): # pylint: disable=unused-argument
99
+ """Function to get ProfilerHook.
100
+
101
+ Args:
102
+ model_dir: The directory to save the profile traces to.
103
+ save_steps: `int`, print profile traces every N steps.
104
+ **kwargs: a dictionary of arguments to ProfilerHook.
105
+
106
+ Returns:
107
+ Returns a ProfilerHook that writes out timelines that can be loaded into
108
+ profiling tools like chrome://tracing.
109
+ """
110
+ return tf.estimator.ProfilerHook(save_steps=save_steps, output_dir=model_dir)
111
+
112
+
113
+ def get_examples_per_second_hook(every_n_steps=100,
114
+ batch_size=128,
115
+ warm_steps=5,
116
+ **kwargs): # pylint: disable=unused-argument
117
+ """Function to get ExamplesPerSecondHook.
118
+
119
+ Args:
120
+ every_n_steps: `int`, print current and average examples per second every
121
+ N steps.
122
+ batch_size: `int`, total batch size used to calculate examples/second from
123
+ global time.
124
+ warm_steps: skip this number of steps before logging and running average.
125
+ **kwargs: a dictionary of arguments to ExamplesPerSecondHook.
126
+
127
+ Returns:
128
+ Returns a ProfilerHook that writes out timelines that can be loaded into
129
+ profiling tools like chrome://tracing.
130
+ """
131
+ return hooks.ExamplesPerSecondHook(
132
+ batch_size=batch_size, every_n_steps=every_n_steps,
133
+ warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
134
+
135
+
136
+ def get_logging_metric_hook(tensors_to_log=None,
137
+ every_n_secs=600,
138
+ **kwargs): # pylint: disable=unused-argument
139
+ """Function to get LoggingMetricHook.
140
+
141
+ Args:
142
+ tensors_to_log: List of tensor names or dictionary mapping labels to tensor
143
+ names. If not set, log _TENSORS_TO_LOG by default.
144
+ every_n_secs: `int`, the frequency for logging the metric. Default to every
145
+ 10 mins.
146
+ **kwargs: a dictionary of arguments.
147
+
148
+ Returns:
149
+ Returns a LoggingMetricHook that saves tensor values in a JSON format.
150
+ """
151
+ if tensors_to_log is None:
152
+ tensors_to_log = _TENSORS_TO_LOG
153
+ return metric_hook.LoggingMetricHook(
154
+ tensors=tensors_to_log,
155
+ metric_logger=logger.get_benchmark_logger(),
156
+ every_n_secs=every_n_secs)
157
+
158
+
159
+ def get_step_counter_hook(**kwargs):
160
+ """Function to get StepCounterHook."""
161
+ del kwargs
162
+ return tf.estimator.StepCounterHook()
163
+
164
+
165
+ # A dictionary to map one hook name and its corresponding function
166
+ HOOKS = {
167
+ 'loggingtensorhook': get_logging_tensor_hook,
168
+ 'profilerhook': get_profiler_hook,
169
+ 'examplespersecondhook': get_examples_per_second_hook,
170
+ 'loggingmetrichook': get_logging_metric_hook,
171
+ 'stepcounterhook': get_step_counter_hook
172
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/implementations/TensorFlow/utils/logs/logger.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ """Logging utilities for benchmark.
17
+
18
+ For collecting local environment metrics like CPU and memory, certain python
19
+ packages need be installed. See README for details.
20
+ """
21
+ from __future__ import absolute_import
22
+ from __future__ import division
23
+ from __future__ import print_function
24
+
25
+ import contextlib
26
+ import datetime
27
+ import json
28
+ import multiprocessing
29
+ import numbers
30
+ import os
31
+ import threading
32
+ import uuid
33
+
34
+ from six.moves import _thread as thread
35
+ from absl import flags
36
+ import tensorflow as tf
37
+ from tensorflow.python.client import device_lib
38
+
39
+ from TensorFlow.utils.logs import cloud_lib
40
+
41
+ METRIC_LOG_FILE_NAME = "metric.log"
42
+ BENCHMARK_RUN_LOG_FILE_NAME = "benchmark_run.log"
43
+ _DATE_TIME_FORMAT_PATTERN = "%Y-%m-%dT%H:%M:%S.%fZ"
44
+ GCP_TEST_ENV = "GCP"
45
+ RUN_STATUS_SUCCESS = "success"
46
+ RUN_STATUS_FAILURE = "failure"
47
+ RUN_STATUS_RUNNING = "running"
48
+
49
+
50
+ FLAGS = flags.FLAGS
51
+
52
+ # Don't use it directly. Use get_benchmark_logger to access a logger.
53
+ _benchmark_logger = None
54
+ _logger_lock = threading.Lock()
55
+
56
+
57
+ def config_benchmark_logger(flag_obj=None):
58
+ """Config the global benchmark logger."""
59
+ _logger_lock.acquire()
60
+ try:
61
+ global _benchmark_logger
62
+ if not flag_obj:
63
+ flag_obj = FLAGS
64
+
65
+ if (not hasattr(flag_obj, "benchmark_logger_type") or
66
+ flag_obj.benchmark_logger_type == "BaseBenchmarkLogger"):
67
+ _benchmark_logger = BaseBenchmarkLogger()
68
+ elif flag_obj.benchmark_logger_type == "BenchmarkFileLogger":
69
+ _benchmark_logger = BenchmarkFileLogger(flag_obj.benchmark_log_dir)
70
+ elif flag_obj.benchmark_logger_type == "BenchmarkBigQueryLogger":
71
+ from benchmark import benchmark_uploader as bu # pylint: disable=g-import-not-at-top
72
+ bq_uploader = bu.BigQueryUploader(gcp_project=flag_obj.gcp_project)
73
+ _benchmark_logger = BenchmarkBigQueryLogger(
74
+ bigquery_uploader=bq_uploader,
75
+ bigquery_data_set=flag_obj.bigquery_data_set,
76
+ bigquery_run_table=flag_obj.bigquery_run_table,
77
+ bigquery_run_status_table=flag_obj.bigquery_run_status_table,
78
+ bigquery_metric_table=flag_obj.bigquery_metric_table,
79
+ run_id=str(uuid.uuid4()))
80
+ else:
81
+ raise ValueError("Unrecognized benchmark_logger_type: %s"
82
+ % flag_obj.benchmark_logger_type)
83
+
84
+ finally:
85
+ _logger_lock.release()
86
+ return _benchmark_logger
87
+
88
+
89
+ def get_benchmark_logger():
90
+ if not _benchmark_logger:
91
+ config_benchmark_logger()
92
+ return _benchmark_logger
93
+
94
+
95
+ @contextlib.contextmanager
96
+ def benchmark_context(flag_obj):
97
+ """Context of benchmark, which will update status of the run accordingly."""
98
+ benchmark_logger = config_benchmark_logger(flag_obj)
99
+ try:
100
+ yield
101
+ benchmark_logger.on_finish(RUN_STATUS_SUCCESS)
102
+ except Exception: # pylint: disable=broad-except
103
+ # Catch all the exception, update the run status to be failure, and re-raise
104
+ benchmark_logger.on_finish(RUN_STATUS_FAILURE)
105
+ raise
106
+
107
+
108
+ class BaseBenchmarkLogger(object):
109
+ """Class to log the benchmark information to STDOUT."""
110
+
111
+ def log_evaluation_result(self, eval_results):
112
+ """Log the evaluation result.
113
+
114
+ The evaluate result is a dictionary that contains metrics defined in
115
+ model_fn. It also contains a entry for global_step which contains the value
116
+ of the global step when evaluation was performed.
117
+
118
+ Args:
119
+ eval_results: dict, the result of evaluate.
120
+ """
121
+ if not isinstance(eval_results, dict):
122
+ tf.compat.v1.logging.warning(
123
+ "eval_results should be dictionary for logging. Got %s",
124
+ type(eval_results))
125
+ return
126
+ global_step = eval_results[tf.compat.v1.GraphKeys.GLOBAL_STEP]
127
+ for key in sorted(eval_results):
128
+ if key != tf.compat.v1.GraphKeys.GLOBAL_STEP:
129
+ self.log_metric(key, eval_results[key], global_step=global_step)
130
+
131
+ def log_metric(self, name, value, unit=None, global_step=None, extras=None):
132
+ """Log the benchmark metric information to local file.
133
+
134
+ Currently the logging is done in a synchronized way. This should be updated
135
+ to log asynchronously.
136
+
137
+ Args:
138
+ name: string, the name of the metric to log.
139
+ value: number, the value of the metric. The value will not be logged if it
140
+ is not a number type.
141
+ unit: string, the unit of the metric, E.g "image per second".
142
+ global_step: int, the global_step when the metric is logged.
143
+ extras: map of string:string, the extra information about the metric.
144
+ """
145
+ metric = _process_metric_to_json(name, value, unit, global_step, extras)
146
+ if metric:
147
+ tf.compat.v1.logging.info("Benchmark metric: %s", metric)
148
+
149
+ def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
150
+ tf.compat.v1.logging.info(
151
+ "Benchmark run: %s", _gather_run_info(model_name, dataset_name,
152
+ run_params, test_id))
153
+
154
+ def on_finish(self, status):
155
+ pass
156
+
157
+
158
+ class BenchmarkFileLogger(BaseBenchmarkLogger):
159
+ """Class to log the benchmark information to local disk."""
160
+
161
+ def __init__(self, logging_dir):
162
+ super(BenchmarkFileLogger, self).__init__()
163
+ self._logging_dir = logging_dir
164
+ if not tf.io.gfile.isdir(self._logging_dir):
165
+ tf.io.gfile.makedirs(self._logging_dir)
166
+ self._metric_file_handler = tf.io.gfile.GFile(
167
+ os.path.join(self._logging_dir, METRIC_LOG_FILE_NAME), "a")
168
+
169
+ def log_metric(self, name, value, unit=None, global_step=None, extras=None):
170
+ """Log the benchmark metric information to local file.
171
+
172
+ Currently the logging is done in a synchronized way. This should be updated
173
+ to log asynchronously.
174
+
175
+ Args:
176
+ name: string, the name of the metric to log.
177
+ value: number, the value of the metric. The value will not be logged if it
178
+ is not a number type.
179
+ unit: string, the unit of the metric, E.g "image per second".
180
+ global_step: int, the global_step when the metric is logged.
181
+ extras: map of string:string, the extra information about the metric.
182
+ """
183
+ metric = _process_metric_to_json(name, value, unit, global_step, extras)
184
+ if metric:
185
+ try:
186
+ json.dump(metric, self._metric_file_handler)
187
+ self._metric_file_handler.write("\n")
188
+ self._metric_file_handler.flush()
189
+ except (TypeError, ValueError) as e:
190
+ tf.compat.v1.logging.warning(
191
+ "Failed to dump metric to log file: name %s, value %s, error %s",
192
+ name, value, e)
193
+
194
+ def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
195
+ """Collect most of the TF runtime information for the local env.
196
+
197
+ The schema of the run info follows official/benchmark/datastore/schema.
198
+
199
+ Args:
200
+ model_name: string, the name of the model.
201
+ dataset_name: string, the name of dataset for training and evaluation.
202
+ run_params: dict, the dictionary of parameters for the run, it could
203
+ include hyperparameters or other params that are important for the run.
204
+ test_id: string, the unique name of the test run by the combination of key
205
+ parameters, eg batch size, num of GPU. It is hardware independent.
206
+ """
207
+ run_info = _gather_run_info(model_name, dataset_name, run_params, test_id)
208
+
209
+ with tf.io.gfile.GFile(os.path.join(
210
+ self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), "w") as f:
211
+ try:
212
+ json.dump(run_info, f)
213
+ f.write("\n")
214
+ except (TypeError, ValueError) as e:
215
+ tf.compat.v1.logging.warning(
216
+ "Failed to dump benchmark run info to log file: %s", e)
217
+
218
+ def on_finish(self, status):
219
+ self._metric_file_handler.flush()
220
+ self._metric_file_handler.close()
221
+
222
+
223
+ class BenchmarkBigQueryLogger(BaseBenchmarkLogger):
224
+ """Class to log the benchmark information to BigQuery data store."""
225
+
226
+ def __init__(self,
227
+ bigquery_uploader,
228
+ bigquery_data_set,
229
+ bigquery_run_table,
230
+ bigquery_run_status_table,
231
+ bigquery_metric_table,
232
+ run_id):
233
+ super(BenchmarkBigQueryLogger, self).__init__()
234
+ self._bigquery_uploader = bigquery_uploader
235
+ self._bigquery_data_set = bigquery_data_set
236
+ self._bigquery_run_table = bigquery_run_table
237
+ self._bigquery_run_status_table = bigquery_run_status_table
238
+ self._bigquery_metric_table = bigquery_metric_table
239
+ self._run_id = run_id
240
+
241
+ def log_metric(self, name, value, unit=None, global_step=None, extras=None):
242
+ """Log the benchmark metric information to bigquery.
243
+
244
+ Args:
245
+ name: string, the name of the metric to log.
246
+ value: number, the value of the metric. The value will not be logged if it
247
+ is not a number type.
248
+ unit: string, the unit of the metric, E.g "image per second".
249
+ global_step: int, the global_step when the metric is logged.
250
+ extras: map of string:string, the extra information about the metric.
251
+ """
252
+ metric = _process_metric_to_json(name, value, unit, global_step, extras)
253
+ if metric:
254
+ # Starting new thread for bigquery upload in case it might take long time
255
+ # and impact the benchmark and performance measurement. Starting a new
256
+ # thread might have potential performance impact for model that run on
257
+ # CPU.
258
+ thread.start_new_thread(
259
+ self._bigquery_uploader.upload_benchmark_metric_json,
260
+ (self._bigquery_data_set,
261
+ self._bigquery_metric_table,
262
+ self._run_id,
263
+ [metric]))
264
+
265
+ def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
266
+ """Collect most of the TF runtime information for the local env.
267
+
268
+ The schema of the run info follows official/benchmark/datastore/schema.
269
+
270
+ Args:
271
+ model_name: string, the name of the model.
272
+ dataset_name: string, the name of dataset for training and evaluation.
273
+ run_params: dict, the dictionary of parameters for the run, it could
274
+ include hyperparameters or other params that are important for the run.
275
+ test_id: string, the unique name of the test run by the combination of key
276
+ parameters, eg batch size, num of GPU. It is hardware independent.
277
+ """
278
+ run_info = _gather_run_info(model_name, dataset_name, run_params, test_id)
279
+ # Starting new thread for bigquery upload in case it might take long time
280
+ # and impact the benchmark and performance measurement. Starting a new
281
+ # thread might have potential performance impact for model that run on CPU.
282
+ thread.start_new_thread(
283
+ self._bigquery_uploader.upload_benchmark_run_json,
284
+ (self._bigquery_data_set,
285
+ self._bigquery_run_table,
286
+ self._run_id,
287
+ run_info))
288
+ thread.start_new_thread(
289
+ self._bigquery_uploader.insert_run_status,
290
+ (self._bigquery_data_set,
291
+ self._bigquery_run_status_table,
292
+ self._run_id,
293
+ RUN_STATUS_RUNNING))
294
+
295
+ def on_finish(self, status):
296
+ self._bigquery_uploader.update_run_status(
297
+ self._bigquery_data_set,
298
+ self._bigquery_run_status_table,
299
+ self._run_id,
300
+ status)
301
+
302
+
303
+ def _gather_run_info(model_name, dataset_name, run_params, test_id):
304
+ """Collect the benchmark run information for the local environment."""
305
+ run_info = {
306
+ "model_name": model_name,
307
+ "dataset": {"name": dataset_name},
308
+ "machine_config": {},
309
+ "test_id": test_id,
310
+ "run_date": datetime.datetime.utcnow().strftime(
311
+ _DATE_TIME_FORMAT_PATTERN)}
312
+ _collect_tensorflow_info(run_info)
313
+ _collect_tensorflow_environment_variables(run_info)
314
+ _collect_run_params(run_info, run_params)
315
+ _collect_cpu_info(run_info)
316
+ _collect_memory_info(run_info)
317
+ _collect_test_environment(run_info)
318
+ return run_info
319
+
320
+
321
+ def _process_metric_to_json(
322
+ name, value, unit=None, global_step=None, extras=None):
323
+ """Validate the metric data and generate JSON for insert."""
324
+ if not isinstance(value, numbers.Number):
325
+ tf.compat.v1.logging.warning(
326
+ "Metric value to log should be a number. Got %s", type(value))
327
+ return None
328
+
329
+ extras = _convert_to_json_dict(extras)
330
+ return {
331
+ "name": name,
332
+ "value": float(value),
333
+ "unit": unit,
334
+ "global_step": global_step,
335
+ "timestamp": datetime.datetime.utcnow().strftime(
336
+ _DATE_TIME_FORMAT_PATTERN),
337
+ "extras": extras}
338
+
339
+
340
+ def _collect_tensorflow_info(run_info):
341
+ run_info["tensorflow_version"] = {
342
+ "version": tf.version.VERSION, "git_hash": tf.version.GIT_VERSION}
343
+
344
+
345
+ def _collect_run_params(run_info, run_params):
346
+ """Log the parameter information for the benchmark run."""
347
+ def process_param(name, value):
348
+ type_check = {
349
+ str: {"name": name, "string_value": value},
350
+ int: {"name": name, "long_value": value},
351
+ bool: {"name": name, "bool_value": str(value)},
352
+ float: {"name": name, "float_value": value},
353
+ }
354
+ return type_check.get(type(value),
355
+ {"name": name, "string_value": str(value)})
356
+ if run_params:
357
+ run_info["run_parameters"] = [
358
+ process_param(k, v) for k, v in sorted(run_params.items())]
359
+
360
+
361
+ def _collect_tensorflow_environment_variables(run_info):
362
+ run_info["tensorflow_environment_variables"] = [
363
+ {"name": k, "value": v}
364
+ for k, v in sorted(os.environ.items()) if k.startswith("TF_")]
365
+
366
+
367
+ # The following code is mirrored from tensorflow/tools/test/system_info_lib
368
+ # which is not exposed for import.
369
+ def _collect_cpu_info(run_info):
370
+ """Collect the CPU information for the local environment."""
371
+ cpu_info = {}
372
+
373
+ cpu_info["num_cores"] = multiprocessing.cpu_count()
374
+
375
+ try:
376
+ # Note: cpuinfo is not installed in the TensorFlow OSS tree.
377
+ # It is installable via pip.
378
+ import cpuinfo # pylint: disable=g-import-not-at-top
379
+
380
+ info = cpuinfo.get_cpu_info()
381
+ cpu_info["cpu_info"] = info["brand"]
382
+ cpu_info["mhz_per_cpu"] = info["hz_advertised_raw"][0] / 1.0e6
383
+
384
+ run_info["machine_config"]["cpu_info"] = cpu_info
385
+ except ImportError:
386
+ tf.compat.v1.logging.warn(
387
+ "'cpuinfo' not imported. CPU info will not be logged.")
388
+
389
+
390
+ def _collect_memory_info(run_info):
391
+ try:
392
+ # Note: psutil is not installed in the TensorFlow OSS tree.
393
+ # It is installable via pip.
394
+ import psutil # pylint: disable=g-import-not-at-top
395
+ vmem = psutil.virtual_memory()
396
+ run_info["machine_config"]["memory_total"] = vmem.total
397
+ run_info["machine_config"]["memory_available"] = vmem.available
398
+ except ImportError:
399
+ tf.compat.v1.logging.warn(
400
+ "'psutil' not imported. Memory info will not be logged.")
401
+
402
+
403
+ def _collect_test_environment(run_info):
404
+ """Detect the local environment, eg GCE, AWS or DGX, etc."""
405
+ if cloud_lib.on_gcp():
406
+ run_info["test_environment"] = GCP_TEST_ENV
407
+ # TODO(scottzhu): Add more testing env detection for other platform
408
+
409
+
410
+ def _parse_gpu_model(physical_device_desc):
411
+ # Assume all the GPU connected are same model
412
+ for kv in physical_device_desc.split(","):
413
+ k, _, v = kv.partition(":")
414
+ if k.strip() == "name":
415
+ return v.strip()
416
+ return None
417
+
418
+
419
+ def _convert_to_json_dict(input_dict):
420
+ if input_dict:
421
+ return [{"name": k, "value": v} for k, v in sorted(input_dict.items())]
422
+ else:
423
+ return []
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/scripts/launch_keras_resnet_hvd.sh ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ DEBUG=${DEBUG:-0}
4
+ if [[ $DEBUG -eq 1 ]]; then
5
+ set -x
6
+ env
7
+ fi
8
+
9
+ # Basic paths
10
+ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
11
+ export BASE_PATH="$( cd "$(dirname "$(readlink -e ${SCRIPT_DIR}/TensorFlow)" )" && pwd)"
12
+
13
+ PYTHONPATH=${BASE_PATH}:$PYTHONPATH
14
+ IMAGENET_DIR=/root/datasets/imagenet/tf_records
15
+ exit_code=0
16
+
17
+ # Determine OpenMPI prefix from location of mpirun in the system
18
+ OMPI_PREFIX=$(which mpirun)
19
+ if [[ ! -z $OMPI_PREFIX ]]; then
20
+ OMPI_PREFIX=$(dirname $(dirname ${OMPI_PREFIX}) )
21
+ fi
22
+
23
+ # Fixed variables needed by this script
24
+ export NUM_WORKERS_PER_HLS=${NUM_WORKERS_PER_HLS:-4}
25
+ export HLS_TYPE=${HLS_TYPE:-HLS2}
26
+
27
+ function help()
28
+ {
29
+ echo "Usage:"
30
+ echo "$0 [ -key1 value1 -key2 value2 .... -keyn valuen ]"
31
+ echo "-b | --batch-size Batch size"
32
+ echo "-c | --config Configuration file path (defaults to ./defaults.cfg)"
33
+ echo "-p | --cpu-pin [ none | cpu | numa ]"
34
+ echo "-a | --data-dir Imagenet data directory"
35
+ echo "-m | --model-dir Model dir, defaults to /tmp/resnet_50"
36
+ echo "-dc | --dataset_cache Enable (true) or disable (false) dataset caching"
37
+ echo "-noEval | --disable_eval Enable (0) or disable (1) evaluation"
38
+ echo "-bf16 | --enable_bf16"
39
+ echo "-mlperf | --enable_mlperf"
40
+ echo "-e | --epochs Number of epochs"
41
+ echo "-ebe | --epochs_between_eval Number of training epochs between eval"
42
+ echo "-eof | --epoch_eval_offset"
43
+ echo "-hf | --hostfile Host file path, 'localhost' is used if no file is provided"
44
+ echo "-lbs | --label_smoothing"
45
+ echo "-l | --lars-opt Enable LARS optimizer"
46
+ echo "-lde | --lars_decay_epochs"
47
+ echo "-mm | --momentum"
48
+ echo "-md | --modeling Enable (1) or disable (0) TF dumpos for SPARTA modeling"
49
+ echo "-w | --number-worker Number of Gaudis"
50
+ echo "-rs | --resnet-size Resnet size"
51
+ echo "-slr | --start_learning_rate"
52
+ echo "-sth | --stop_thold Target accuracy"
53
+ echo "-s | --steps Display logs for evey number of steps"
54
+ echo "-sl | --steps-per-loop Number of steps per loop in Keras implementation"
55
+ echo "-sd | --synthetic_data Enable (1) or disable (0) synthetic dataset"
56
+ echo "-tev | --train_eval"
57
+ echo "-ts | --train_steps Train steps, will be overwritten if epochs > 0"
58
+ echo "-u | --use_horovod Enable (1) or disable (0) horovod use"
59
+ echo "-we | --warmup_epochs"
60
+ echo "-wd | --weight_decay"
61
+ echo "-nas | --num_accumulation_steps"
62
+ echo "-crc | --clean_recipe_cache Clean TF recipe cache Enable (1) disable (0), default: 1"
63
+ echo "-tcp | --mpi_tcp_include"
64
+ echo "-log | --log_dir"
65
+ echo "-ps | --pod_size"
66
+ echo "-ntf | --num_train_files Number of training tf records"
67
+ echo "-nef | --num_eval_files Number of evaluation tf records"
68
+ echo "-sfg | --signaling_from_graph Enable (1) or disable (0) signaling from graph, default is 1"
69
+ }
70
+
71
+ function getmulti_hls_ips()
72
+ {
73
+ if [[ $USE_HOROVOD -ne 1 ]]; then
74
+ return
75
+ fi
76
+
77
+ multi_hcl_ip="MULTI_HLS_IPS="
78
+ hostsFile=$1
79
+ firstHost=1
80
+ hostCount=0
81
+
82
+ # iterate over non-empty and non-commented lines
83
+ for h in $(cat $hostsFile | sed '/^$/d' | grep -v '^#'); do
84
+ if [[ $firstHost -eq 1 ]]; then
85
+ firstHost=0
86
+ else
87
+ multi_hcl_ip+=","
88
+ fi
89
+ multi_hcl_ip+=$h
90
+ hostCount=$((hostCount + 1))
91
+ done
92
+
93
+ echo "[getmulti_hls_ips] Host Count : $hostCount"
94
+ echo "[getmulti_hls_ips] Exporting : $multi_hcl_ip"
95
+ export $multi_hcl_ip
96
+ }
97
+
98
+ function generate_mpi_hostfile()
99
+ {
100
+ if [[ $USE_HOROVOD -ne 1 ]]; then
101
+ return
102
+ fi
103
+
104
+ echo "Generating MPI hostfile..."
105
+ local num_nodes=${2:-8}
106
+ local file_name="hostfile"
107
+ export MPI_HOSTFILE_PATH=$1/${file_name}
108
+
109
+ rm -rf ${MPI_HOSTFILE_PATH}
110
+ echo "PATH: ${MPI_HOSTFILE_PATH}"
111
+ touch ${MPI_HOSTFILE_PATH}
112
+
113
+ IFS=',' read -ra IPS <<< "$MULTI_HLS_IPS"
114
+ for i in "${IPS[@]}"; do
115
+ echo "$i slots=${num_nodes}" >> ${MPI_HOSTFILE_PATH}
116
+ done
117
+
118
+ echo "Config: "
119
+ cat ${MPI_HOSTFILE_PATH}
120
+ }
121
+
122
+ function run_per_ip()
123
+ {
124
+ if [[ $USE_HOROVOD -ne 1 ]]; then
125
+ _cmd="$@"
126
+ $_cmd
127
+ return 0
128
+ fi
129
+
130
+ if [ -n "$OMPI_COMM_WORLD_SIZE" ]; then
131
+ print_error "Function run_per_ip is not meant to be ran from within an OpenMPI context. It is intended to invoke mpirun by itelf."
132
+ exit 1
133
+ fi
134
+
135
+ _cmd="$@"
136
+
137
+ if [[ -z ${MULTI_HLS_IPS} ]]; then
138
+ echo "[launch_keras_resnet_hvd] MULTI_HLS_IPS undefined - maybe a missing /root/shared/hosts file?"
139
+ exit -1
140
+ else
141
+ if [ -n "$MPI_TCP_INCLUDE" ]; then
142
+ _option_btl_tcp_if_include="--mca btl_tcp_if_include ${MPI_TCP_INCLUDE}"
143
+ else
144
+ _option_btl_tcp_if_include=""
145
+ fi
146
+
147
+ mpirun --allow-run-as-root \
148
+ --mca plm_rsh_args -p${SSH_PORT} \
149
+ ${_option_btl_tcp_if_include} \
150
+ --tag-output \
151
+ --merge-stderr-to-stdout \
152
+ --prefix ${OMPI_PREFIX} \
153
+ -H ${MULTI_HLS_IPS} \
154
+ bash -c "`declare`; `declare -x`; ($_cmd 2>&1)" 2>/dev/null
155
+ fi
156
+ }
157
+
158
+ # Parse command line options
159
+ unset __bsize
160
+ unset __config
161
+ unset __data_dir
162
+ unset __jpeg_data_dir
163
+ unset __dataset_cache
164
+ unset __disable_eval
165
+ unset __enable_bf16_conversion
166
+ unset __enable_lars
167
+ unset __enable_mlperf
168
+ unset __epochs
169
+ unset __epochs_between_eval
170
+ unset __eval_offset_epochs
171
+ unset __hostfile
172
+ unset __label_smoothing
173
+ unset __lars_decay_epochs
174
+ unset __momentum
175
+ unset __modeling
176
+ unset __number_Worker
177
+ unset __resnetSize
178
+ unset __run_number
179
+ unset __start_learning_rate
180
+ unset __steps
181
+ unset __steps_per_loop
182
+ unset __stop_thold
183
+ unset __train_eval
184
+ unset __train_steps
185
+ unset __use_horovod
186
+ unset __synthetic_data
187
+ unset __warmup_epochs
188
+ unset __weight_decay
189
+ unset __num_accumulation_steps
190
+ unset __workload_to_cpu_pin_type
191
+ unset __dataset_cache
192
+ unset __clean_recipe_cache
193
+ unset __mpi_tcp_include
194
+ unset __log_dir
195
+ unset __model_dir
196
+ unset __pod_size
197
+ unset __num_train_files
198
+ unset __num_eval_files
199
+ unset __ssh_port
200
+ unset __signaling_from_graph
201
+
202
+ while [ -n "$1" ]; do
203
+ case $1 in
204
+ -rs | --resnet-size )
205
+ shift
206
+ __resnetSize=$1
207
+ ;;
208
+ -c | --config )
209
+ shift
210
+ __config=$1
211
+ ;;
212
+ -a | --data-dir )
213
+ shift
214
+ __data_dir=$1
215
+ ;;
216
+ -ja | --jpeg-data-dir )
217
+ shift
218
+ __jpeg_data_dir=$1
219
+ ;;
220
+ -m | --model-dir )
221
+ shift
222
+ __model_dir=$1
223
+ ;;
224
+ -b | --batch-size )
225
+ shift
226
+ __bsize=$1
227
+ ;;
228
+ -s | --steps )
229
+ shift
230
+ __steps=$1
231
+ ;;
232
+ -sl | --steps-per-loop )
233
+ shift
234
+ __steps_per_loop=$1
235
+ ;;
236
+ -e | --epochs )
237
+ shift
238
+ __epochs=$1
239
+ ;;
240
+ -w | --number-worker )
241
+ shift
242
+ __number_Worker=$1
243
+ ;;
244
+ -hf | --hostfile)
245
+ shift
246
+ __hostfile=$1
247
+ ;;
248
+ -l | --lars-opt)
249
+ shift
250
+ __enable_lars=$1
251
+ ;;
252
+ -dc | --dataset_cache)
253
+ shift
254
+ __dataset_cache=$1
255
+ ;;
256
+ -ebe | --epochs_between_eval)
257
+ shift
258
+ __epochs_between_eval=$1
259
+ ;;
260
+ -ts | --train_steps)
261
+ shift
262
+ __train_steps=$1
263
+ ;;
264
+ -we | --warmup_epochs)
265
+ shift
266
+ __warmup_epochs=$1
267
+ ;;
268
+ -wd | --weight_decay)
269
+ shift
270
+ __weight_decay=$1
271
+ ;;
272
+ -nas | --num_accumulation_steps)
273
+ shift
274
+ __num_accumulation_steps=$1
275
+ ;;
276
+ -lbs | --label_smoothing)
277
+ shift
278
+ __label_smoothing=$1
279
+ ;;
280
+ -slr | --start_learning_rate)
281
+ shift
282
+ __start_learning_rate=$1
283
+ ;;
284
+ -sd | --synthetic_data)
285
+ shift
286
+ __synthetic_data=$1
287
+ ;;
288
+ -mlperf | --enable_mlperf)
289
+ shift
290
+ __enable_mlperf=$1
291
+ ;;
292
+ -noEval | --disable_eval)
293
+ shift
294
+ __disable_eval=$1
295
+ ;;
296
+ -tev | --train_eval)
297
+ shift
298
+ __train_eval=$1
299
+ ;;
300
+ -bf16 | --enable_bf16)
301
+ shift
302
+ __enable_bf16_conversion=$1
303
+ ;;
304
+ -eof | --epoch_eval_offset)
305
+ shift
306
+ __eval_offset_epochs=$1
307
+ ;;
308
+ -sth | --stop_thold)
309
+ shift
310
+ __stop_thold=$1
311
+ ;;
312
+ -mm | --momentum)
313
+ shift
314
+ __momentum=$1
315
+ ;;
316
+ -md | --modeling)
317
+ shift
318
+ __modeling=$1
319
+ ;;
320
+ -rn | --run-number )
321
+ shift
322
+ __run_number=$1
323
+ ;;
324
+ -p | --cpu-pin)
325
+ shift
326
+ __workload_to_cpu_pin_type=$1
327
+ case ${__workload_to_cpu_pin_type} in
328
+ numa | cpu | none )
329
+ ;;
330
+ *)
331
+ echo "--cpu-pin must be one of the following numa | cpu | none "
332
+ exit 1
333
+ esac
334
+ ;;
335
+ -u | --use_horovod)
336
+ shift
337
+ __use_horovod=$1
338
+ ;;
339
+ -dc | --dataset_cache)
340
+ shift
341
+ __dataset_cache=$1
342
+ ;;
343
+ -lde | --lars_decay_epochs)
344
+ shift
345
+ __lars_decay_epochs=$1
346
+ ;;
347
+ -crc | --clean_recipe_cache)
348
+ shift
349
+ __clean_recipe_cache=$1
350
+ ;;
351
+ -tcp | --mpi_tcp_include)
352
+ shift
353
+ __mpi_tcp_include=$1
354
+ ;;
355
+ -log | --log_dir)
356
+ shift
357
+ __log_dir=$1
358
+ ;;
359
+ -ps | --pod_size)
360
+ shift
361
+ __pod_size=$1
362
+ ;;
363
+ -ntf | --num_train_files)
364
+ shift
365
+ __num_train_files=$1
366
+ ;;
367
+ -nef | --num_eval_files)
368
+ shift
369
+ __num_eval_files=$1
370
+ ;;
371
+ -port | --ssh_port)
372
+ shift
373
+ __ssh_port=$1
374
+ ;;
375
+ -sfg | --signaling_from_graph)
376
+ shift
377
+ __signaling_from_graph=$1
378
+ ;;
379
+ -h | --help)
380
+ help
381
+ exit 1
382
+ ;;
383
+ * )
384
+ echo "The parameter $1 is not allowed"
385
+ help
386
+ ;;
387
+ esac
388
+ shift
389
+ done
390
+
391
+ # Set default values for environmental variable
392
+ export CFG_FILE=${__config:-"${BASE_PATH}/defaults.cfg"}
393
+ export HOST_FILE=${__hostfile:-"${OMPI_MCA_orte_default_hostfile}"}
394
+ export SSH_PORT=${__ssh_port:-"3022"}
395
+
396
+ if [[ -f ${CFG_FILE} ]]; then
397
+ source ${CFG_FILE}
398
+ else
399
+ echo "Could not find ${CFG_FILE}"
400
+ exit 1
401
+ fi
402
+
403
+ # Set default directory name
404
+ WORK_DIR=/tmp/resnet50
405
+
406
+ # set default LOG_DIR
407
+ export testdate=`date +%Y-%m-%d`
408
+ export testtime=`date +%H%M%S`
409
+ export LOG_DIR=/root/scratch/resnet/resnet_gaudi${NUM_WORKERS}_${testdate}_${testtime}
410
+
411
+ # Override defaults with command line options if needed
412
+ export RESNET_SIZE=${__resnetSize:-"$RESNET_SIZE"}
413
+ export IMAGENET_DIR=${__data_dir:-"$IMAGENET_DIR"}
414
+ export JPEG_IMAGENET_DIR=${__jpeg_data_dir:-"$JPEG_IMAGENET_DIR"}
415
+ export BATCH_SIZE=${__bsize:-"$BATCH_SIZE"}
416
+ export TRAIN_EPOCHS=${__epochs:-"$TRAIN_EPOCHS"}
417
+ export TRAIN_STEPS=${__train_steps:-"$TRAIN_STEPS"}
418
+ export DISPLAY_STEPS=${__steps:-"$DISPLAY_STEPS"}
419
+ export STEPS_PER_LOOP=${__steps_per_loop:-"$STEPS_PER_LOOP"}
420
+ export NUM_WORKERS=${__number_Worker:-"$NUM_WORKERS"}
421
+ export USE_LARS_OPTIMIZER=${__enable_lars:-"$USE_LARS_OPTIMIZER"}
422
+ export CPU_BIND_TYPE=${__workload_to_cpu_pin_type:-"$CPU_BIND_TYPE"}
423
+ export EPOCHS_BETWEEN_EVALS=${__epochs_between_eval:-"$EPOCHS_BETWEEN_EVALS"}
424
+ export WEIGHT_DECAY=${__weight_decay:-"$WEIGHT_DECAY"}
425
+ export NUM_ACCUMULATION_STEPS=${__num_accumulation_steps:-"$NUM_ACCUMULATION_STEPS"}
426
+ export LABEL_SMOOTH=${__label_smoothing:-"$LABEL_SMOOTH"}
427
+ export BASE_LEARNING_RATE=${__start_learning_rate:-"$BASE_LEARNING_RATE"}
428
+ export WARMUP_EPOCHS=${__warmup_epochs:-"$WARMUP_EPOCHS"}
429
+ export USE_MLPERF=${__enable_mlperf:-"$USE_MLPERF"}
430
+ export NO_EVAL=${__disable_eval:-"$NO_EVAL"}
431
+ export STOP_THRESHOLD=${__stop_thold:-"$STOP_THRESHOLD"}
432
+ export LR_MOMENTUM=${__momentum:-"$LR_MOMENTUM"}
433
+ export EVAL_OFFSET_EPOCHS=${__eval_offset_epochs:-"$EVAL_OFFSET_EPOCHS"}
434
+ export TF_BF16_CONVERSION=${__enable_bf16_conversion:-"$TF_BF16_CONVERSION"}
435
+ export USE_HOROVOD=${__use_horovod:-"$USE_HOROVOD"}
436
+ export DATASET_CACHE=${__dataset_cache:-"$DATASET_CACHE"}
437
+ export LARS_DECAY_EPOCHS=${__lars_decay_epochs:-"$LARS_DECAY_EPOCHS"}
438
+ export SYNTHETIC_DATA=${__synthetic_data:-"$SYNTHETIC_DATA"}
439
+ if [ -z ${__train_eval} ]; then
440
+ export TRAIN_AND_EVAL=${__train_eval:-"$TRAIN_AND_EVAL"}
441
+ fi
442
+ export TRAIN_STEPS=${TRAIN_STEPS:--1}
443
+ export MODELING=${__modeling:-"$MODELING"}
444
+ export CLEAN_RECIPE_CACHE=${__clean_recipe_cache:-1}
445
+ export MPI_TCP_INCLUDE=${__mpi_tcp_include:-$MPI_TCP_INCLUDE}
446
+ export LOG_DIR=${__log_dir:-"$LOG_DIR"}
447
+ export WORK_DIR=${__model_dir:-"$WORK_DIR"}
448
+ export NUM_TRAIN_FILES=${__num_train_files:-"$NUM_TRAIN_FILES"}
449
+ export NUM_EVAL_FILES=${__num_eval_files:-"$NUM_EVAL_FILES"}
450
+ # Workaound on SW-75839
451
+ export TF_ENABLE_DYNAMIC_SHAPES=${TF_ENABLE_DYNAMIC_SHAPES:-false}
452
+ export SIGNALING_FROM_GRAPH=${__signaling_from_graph:-1}
453
+
454
+ echo "[launch_keras_resnet_hvd] General Settings:"
455
+ echo "[launch_keras_resnet_hvd] CFG_FILE" $CFG_FILE
456
+ echo "[launch_keras_resnet_hvd] HOST_FILE" $HOST_FILE
457
+ echo "[launch_keras_resnet_hvd] NUM_WORKERS" $NUM_WORKERS
458
+ echo "[launch_keras_resnet_hvd] RESNET_SIZE" $RESNET_SIZE
459
+ echo "[launch_keras_resnet_hvd] IMAGENET_DIR" $IMAGENET_DIR
460
+ echo "[launch_keras_resnet_hvd] JPEG_IMAGENET_DIR" $JPEG_IMAGENET_DIR
461
+ echo "[launch_keras_resnet_hvd] BATCH_SIZE" $BATCH_SIZE
462
+ echo "[launch_keras_resnet_hvd] TRAIN_EPOCHS" $TRAIN_EPOCHS
463
+ echo "[launch_keras_resnet_hvd] TRAIN_STEPS" $TRAIN_STEPS
464
+ echo "[launch_keras_resnet_hvd] DISPLAY_STEPS" $DISPLAY_STEPS
465
+ echo "[launch_keras_resnet_hvd] USE_LARS_OPTIMIZER" $USE_LARS_OPTIMIZER
466
+ echo "[launch_keras_resnet_hvd] CPU_BIND_TYPE" $CPU_BIND_TYPE
467
+ echo "[launch_keras_resnet_hvd] EPOCHS_BETWEEN_EVALS" $EPOCHS_BETWEEN_EVALS
468
+ echo "[launch_keras_resnet_hvd] TRAIN_AND_EVAL" $TRAIN_AND_EVAL
469
+ echo "[launch_keras_resnet_hvd] TF_BF16_CONVERSION" $TF_BF16_CONVERSION
470
+ echo "[launch_keras_resnet_hvd] USE_HOROVOD" $USE_HOROVOD
471
+ echo "[launch_keras_resnet_hvd] DATASET_CACHE" $DATASET_CACHE
472
+ echo "[launch_keras_resnet_hvd] MODELING" $MODELING
473
+ echo "[launch_keras_resnet_hvd] MPI_TCP_INCLUDE" $MPI_TCP_INCLUDE
474
+ echo "[launch_keras_resnet_hvd] LOG_DIR" $LOG_DIR
475
+ echo "[launch_keras_resnet_hvd] NUM_TRAIN_FILES" $NUM_TRAIN_FILES
476
+ echo "[launch_keras_resnet_hvd] NUM_EVAL_FILES" $NUM_EVAL_FILES
477
+ echo
478
+ echo "[launch_keras_resnet_hvd] Learning Setting:"
479
+ echo "[launch_keras_resnet_hvd] WEIGHT_DECAY" $WEIGHT_DECAY
480
+ echo "[launch_keras_resnet_hvd] NUM_ACCUMULATION_STEPS" $NUM_ACCUMULATION_STEPS
481
+ echo "[launch_keras_resnet_hvd] LABEL_SMOOTH" $LABEL_SMOOTH
482
+ echo "[launch_keras_resnet_hvd] BASE_LEARNING_RATE" $BASE_LEARNING_RATE
483
+ echo "[launch_keras_resnet_hvd] WARMUP_EPOCHS" $WARMUP_EPOCHS
484
+ echo "[launch_keras_resnet_hvd] USE_MLPERF" $USE_MLPERF
485
+ echo "[launch_keras_resnet_hvd] NO_EVAL" $NO_EVAL
486
+ echo "[launch_keras_resnet_hvd] STOP_THRESHOLD" $STOP_THRESHOLD
487
+ echo "[launch_keras_resnet_hvd] LR_MOMENTUM" $LR_MOMENTUM
488
+ echo "[launch_keras_resnet_hvd] EVAL_OFFSET_EPOCHS" $EVAL_OFFSET_EPOCHS
489
+ echo "[launch_keras_resnet_hvd] LARS_DECAY_EPOCHS" $LARS_DECAY_EPOCHS
490
+ echo "[launch_keras_resnet_hvd] SYNTHETIC_DATA" $SYNTHETIC_DATA
491
+ echo "[launch_keras_resnet_hvd] WORK_DIR" $WORK_DIR
492
+ echo "[launch_keras_resnet_hvd] TF_ENABLE_DYNAMIC_SHAPES" $TF_ENABLE_DYNAMIC_SHAPES
493
+ echo "[launch_keras_resnet_hvd] SIGNALING_FROM_GRAPH" $SIGNALING_FROM_GRAPH
494
+
495
+ # This check always needs to go after all environment variable proccessing is complete.
496
+ if [ ! -d ${IMAGENET_DIR} ] && [ ! -d ${JPEG_IMAGENET_DIR} ]; then
497
+ echo "[launch_keras_resnet_hvd] ImageNet image database not found on ${IMAGENET_DIR}"
498
+ exit -1
499
+ fi
500
+
501
+ rm -rf $LOG_DIR
502
+ mkdir -p $WORK_DIR
503
+ mkdir -p $LOG_DIR
504
+
505
+ export MULTI_HLS_IPS=localhost
506
+ if [[ -f ${HOST_FILE} ]]; then
507
+ getmulti_hls_ips ${HOST_FILE}
508
+ fi
509
+
510
+ # Setup the cahe directory and create ramdisk
511
+ export TF_RECIPE_CACHE_PATH=${WORK_DIR}/graph_dump_recipes
512
+ if [[ $CLEAN_RECIPE_CACHE -eq 1 ]]; then
513
+ run_per_ip rm -rf ${TF_RECIPE_CACHE_PATH}
514
+ fi
515
+ run_per_ip rm -rf ${WORK_DIR}/resnet_synth_data
516
+ run_per_ip mkdir -p ${TF_RECIPE_CACHE_PATH}
517
+
518
+ run_per_ip 'mkdir -p ${BASE_PATH}/log'
519
+
520
+ printf "[launch_keras_resnet_hvd] Cleaning temp files...\n\n"
521
+ run_per_ip rm -rf /tmp/checkpoint /tmp/eval /tmp/events.out.tfevents.* /tmp/graph.pbtxt /tmp/model.ckpt-*
522
+ run_per_ip rm -rf /tmp/rank_*/checkpoint /tmp/rank_*/eval /tmp/rank_*/events.out.tfevents.* /tmp/rank_*/graph.pbtxt /tmp/rank_*/model.ckpt-*
523
+
524
+ if [[ $USE_HOROVOD -eq 1 ]]; then
525
+
526
+ if [[ -z ${MULTI_HLS_IPS} ]]; then
527
+ echo "[launch_keras_resnet_hvd] MULTI_HLS_IPS undefined - maybe a missing /root/shared/hosts file?"
528
+ exit -1
529
+ fi
530
+
531
+ generate_mpi_hostfile ${WORK_DIR} ${NUM_WORKERS_PER_HLS}
532
+
533
+ # Substituted this by the calculation below
534
+ #calc_optimal_cpu_resources_for_mpi veces leri
535
+ MPI_MAP_BY=socket
536
+ MPI_MAP_BY_PE=`lscpu | grep "^CPU(s):"| awk -v NUM=${NUM_WORKERS_PER_HLS} '{print int($2/NUM/2)}'`
537
+ if [[ "$CPU_BIND_TYPE" == "numa" || "$CPU_BIND_TYPE" == "none" ]]; then
538
+ MPIRUN_ARGS_MAP_BY_PE="-bind-to none"
539
+ else
540
+ MPIRUN_ARGS_MAP_BY_PE="--bind-to core --map-by $MPI_MAP_BY:PE=$MPI_MAP_BY_PE"
541
+ fi
542
+
543
+ if [ -n "$MPI_TCP_INCLUDE" ]; then
544
+ _option_btl_tcp_if_include="--mca btl_tcp_if_include ${MPI_TCP_INCLUDE}"
545
+ else
546
+ _option_btl_tcp_if_include=""
547
+ fi
548
+
549
+ TRAINING_COMMAND="mpirun --allow-run-as-root \
550
+ -np $NUM_WORKERS --hostfile ${MPI_HOSTFILE_PATH} \
551
+ --prefix ${OMPI_PREFIX} \
552
+ --mca plm_rsh_args -p${SSH_PORT} \
553
+ ${_option_btl_tcp_if_include} \
554
+ -x BASE_PATH=${BASE_PATH} \
555
+ -x PYTHONPATH=${PYTHONPATH} \
556
+ -x DATASET_CACHE=${DATASET_CACHE} \
557
+ -x DEBUG=${DEBUG} \
558
+ -x RESNET_SIZE=${RESNET_SIZE} \
559
+ -x IMAGENET_DIR=${IMAGENET_DIR} \
560
+ -x JPEG_IMAGENET_DIR=${JPEG_IMAGENET_DIR} \
561
+ -x TF_BF16_CONVERSION=${TF_BF16_CONVERSION} \
562
+ -x TF_RECIPE_CACHE_PATH=${TF_RECIPE_CACHE_PATH} \
563
+ -x LD_PRELOAD=${LD_PRELOAD} \
564
+ -x TF_MODULES_RELEASE_BUILD=${TF_MODULES_RELEASE_BUILD} \
565
+ -x HABANA_LOGS=${HABANA_LOGS} \
566
+ -x CPU_BIND_TYPE=${CPU_BIND_TYPE} \
567
+ -x WORK_DIR=${WORK_DIR} \
568
+ -x BATCH_SIZE=${BATCH_SIZE} \
569
+ -x TRAIN_EPOCHS=${TRAIN_EPOCHS} \
570
+ -x TRAIN_STEPS=${TRAIN_STEPS} \
571
+ -x DISPLAY_STEPS=${DISPLAY_STEPS} \
572
+ -x STEPS_PER_LOOP=${STEPS_PER_LOOP} \
573
+ -x NUM_WORKERS=${NUM_WORKERS} \
574
+ -x EPOCHS_BETWEEN_EVALS=${EPOCHS_BETWEEN_EVALS} \
575
+ -x EVAL_OFFSET_EPOCHS=${EVAL_OFFSET_EPOCHS} \
576
+ -x WARMUP_EPOCHS=${WARMUP_EPOCHS} \
577
+ -x LABEL_SMOOTH=${LABEL_SMOOTH} \
578
+ -x WEIGHT_DECAY=${WEIGHT_DECAY} \
579
+ -x NUM_ACCUMULATION_STEPS=${NUM_ACCUMULATION_STEPS}
580
+ -x LR_MOMENTUM=${LR_MOMENTUM} \
581
+ -x USE_LARS_OPTIMIZER=${USE_LARS_OPTIMIZER} \
582
+ -x SYNTHETIC_DATA=${SYNTHETIC_DATA} \
583
+ -x BASE_LEARNING_RATE=${BASE_LEARNING_RATE} \
584
+ -x USE_MLPERF=${USE_MLPERF} \
585
+ -x ENABLE_BARRIERS=0 \
586
+ -x SCALE_OUT_PORTS=1 \
587
+ -x STOP_THRESHOLD=${STOP_THRESHOLD} \
588
+ -x NO_EVAL=${NO_EVAL} \
589
+ -x USE_HOROVOD=${USE_HOROVOD} \
590
+ -x TRAIN_STEPS=${TRAIN_STEPS} \
591
+ -x LARS_DECAY_EPOCHS=${LARS_DECAY_EPOCHS} \
592
+ -x LOG_DIR=${LOG_DIR} \
593
+ -x NUM_TRAIN_FILES=${NUM_TRAIN_FILES} \
594
+ -x NUM_EVAL_FILES=${NUM_EVAL_FILES} \
595
+ -x TF_ENABLE_DYNAMIC_SHAPES=${TF_ENABLE_DYNAMIC_SHAPES} \
596
+ ${MPIRUN_ARGS_MAP_BY_PE} \
597
+ -x MODELING=${MODELING} \
598
+ -x HOROVOD_FUSION_THRESHOLD \
599
+ -x SIGNALING_FROM_GRAPH \
600
+ --merge-stderr-to-stdout --output-filename ${LOG_DIR} \
601
+ ${SCRIPT_DIR}/run.sh"
602
+
603
+ else
604
+ TRAINING_COMMAND="${SCRIPT_DIR}/run.sh"
605
+ fi
606
+
607
+ echo "TRAINING COMMAND = ${TRAINING_COMMAND}"
608
+ printf "[launch_keras_resnet_hvd] Starting training...\n\n"
609
+ $TRAINING_COMMAND
610
+
611
+ run_per_ip rm -rf ${WORK_DIR}/resnet_synth_data
612
+
613
+ rm -rf ${BASE_PATH}/log
614
+ cp /root/build_log.csv ${LOG_DIR}/
615
+ cp ${MPI_HOSTFILE_PATH} ${LOG_DIR}/
616
+ cp -r ${LOG_DIR} ${BASE_PATH}/log
617
+ chmod -R 777 ${LOG_DIR}
618
+ exit $exit_code
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/scripts/run.sh ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ if [[ $DEBUG -eq 1 ]]; then
4
+ set -x
5
+ env
6
+ #LOG_LEVEL:0 - TRACE, 1 - DEBUG, 2 - INFO, 3 - WARNING, 4 - ERROR, 5 - CRITICAL, 6 - OFF
7
+ export LOG_LEVEL_ALL_HCL=2
8
+ else
9
+ export LOG_LEVEL_ALL_HCL=6
10
+ fi
11
+
12
+ if [ -z $BASE_PATH ]; then
13
+ BASE_PATH="$( cd "$(dirname "$(readlink -f ./defaults.cfg)" )" && pwd)"
14
+ PYTHONPATH=${BASE_PATH}:$PYTHONPATH
15
+ fi
16
+
17
+ TRAIN_SCRIPT=${BASE_PATH}/TensorFlow/computer_vision/Resnets/resnet_keras/resnet_ctl_imagenet_main.py
18
+ PT_VERSION=`python3 -c 'import sys; print(f"{sys.version_info[0]}.{sys.version_info[1]}")'`
19
+ TF_VERSION=`python3 -c "import tensorflow as tf; print(tf.__version__.replace('.', '_'))"`
20
+ PATCH_PATH=/usr/local/lib/python${PT_VERSION}/dist-packages/habana_frameworks/tensorflow/tf${TF_VERSION}/lib/habanalabs
21
+ # This is required for HW profiling but does not hurt so we add it always
22
+ export PYTHONPATH=${PATCH_PATH}:${PYTHONPATH}
23
+
24
+ # Fixed varaibles, not inherited from launcher
25
+ export TF_ALLOW_CONTROL_EDGES_IN_HABANA_OPS=1
26
+ export EXPERIMENTAL_PRELOADING=1
27
+ export ENABLE_TENSORBOARD=false
28
+ export REPORT_ACCURACY_METRICS=true
29
+ export DIST_EVAL=true
30
+ export ENABLE_DEVICE_WARMUP=true
31
+ export TF_DISABLE_MKL=1
32
+ export SYNTHETIC_DATA=${SYNTHETIC_DATA}
33
+
34
+ if [[ $MODELING -eq 1 ]]; then
35
+ ENABLE_CHECKPOINT=true
36
+ else
37
+ ENABLE_CHECKPOINT=false
38
+ fi
39
+ if [[ $TF_BF16_CONVERSION -eq 1 ]]; then
40
+ DATA_TYPE="bf16"
41
+ else
42
+ DATA_TYPE="fp32"
43
+ fi
44
+ if [[ ${NO_EVAL} -eq 1 ]]; then
45
+ SKIP_EVAL=true
46
+ else
47
+ SKIP_EVAL=false
48
+ fi
49
+ if [[ ${USE_LARS_OPTIMIZER} -eq 1 ]]; then
50
+ OPTIMIZER="LARS"
51
+ else
52
+ OPTIMIZER="SGD"
53
+ fi
54
+ if [[ ${USE_HOROVOD} -eq 1 ]]; then
55
+ DIST_EVAL=true
56
+ USE_HOROVOD='--use_horovod'
57
+ else
58
+ DIST_EVAL=false
59
+ USE_HOROVOD=''
60
+ fi
61
+ if [[ ${SYNTHETIC_DATA} -eq 1 ]]; then
62
+ SYNTHETIC_DATA=true
63
+ fi
64
+ if [[ -n ${NUM_ACCUMULATION_STEPS} ]]; then
65
+ NUM_ACCUMULATION_STEPS="--num_acc_steps=${NUM_ACCUMULATION_STEPS}"
66
+ else
67
+ NUM_ACCUMULATION_STEPS=""
68
+ fi
69
+
70
+ if [[ -n ${JPEG_IMAGENET_DIR} ]]; then
71
+ JPEG_IMAGENET_DIR="--jpeg_data_dir=${JPEG_IMAGENET_DIR}"
72
+ fi
73
+
74
+ if [[ $SIGNALING_FROM_GRAPH -eq 1 ]]; then
75
+ export HOROVOD_FUSION_THRESHOLD=0
76
+ export TF_USE_SIGNALING_FROM_ENCAP_OP=1
77
+ else
78
+ export TF_USE_SIGNALING_FROM_ENCAP_OP=0
79
+ fi
80
+
81
+ # clear cache
82
+ PROC_FS=${PROC_FS:-"/proc"}
83
+ sync && echo 3 > $PROC_FS/sys/vm/drop_caches
84
+
85
+ TRAIN_COMMAND="python3 ${TRAIN_SCRIPT}
86
+ --model_dir=${WORK_DIR}
87
+ --data_dir=${IMAGENET_DIR}
88
+ ${JPEG_IMAGENET_DIR}
89
+ --batch_size=${BATCH_SIZE}
90
+ --distribution_strategy=off
91
+ --num_gpus=0
92
+ --data_format=channels_last
93
+ --train_epochs=${TRAIN_EPOCHS}
94
+ --train_steps=${TRAIN_STEPS}
95
+ --experimental_preloading=${EXPERIMENTAL_PRELOADING}
96
+ --log_steps=${DISPLAY_STEPS}
97
+ --steps_per_loop=${STEPS_PER_LOOP}
98
+ --enable_checkpoint_and_export=${ENABLE_CHECKPOINT}
99
+ --enable_tensorboard=${ENABLE_TENSORBOARD}
100
+ --epochs_between_evals=${EPOCHS_BETWEEN_EVALS}
101
+ --base_learning_rate=${BASE_LEARNING_RATE}
102
+ --warmup_epochs=${WARMUP_EPOCHS}
103
+ --optimizer=${OPTIMIZER}
104
+ --lr_schedule=polynomial
105
+ --label_smoothing=${LABEL_SMOOTH}
106
+ --weight_decay=${WEIGHT_DECAY}
107
+ $NUM_ACCUMULATION_STEPS
108
+ --single_l2_loss_op
109
+ ${USE_HOROVOD}
110
+ --modeling=${MODELING}
111
+ --data_loader_image_type=${DATA_TYPE}
112
+ --dtype=${DATA_TYPE}
113
+ --eval_offset_epochs=${EVAL_OFFSET_EPOCHS}
114
+ --report_accuracy_metrics=${REPORT_ACCURACY_METRICS}
115
+ --dist_eval=${DIST_EVAL}
116
+ --target_accuracy=${STOP_THRESHOLD}
117
+ --enable_device_warmup=${ENABLE_DEVICE_WARMUP}
118
+ --lars_decay_epochs=${LARS_DECAY_EPOCHS}
119
+ --momentum=${LR_MOMENTUM}
120
+ --skip_eval=${SKIP_EVAL}
121
+ --use_synthetic_data=${SYNTHETIC_DATA}
122
+ --dataset_cache=${DATASET_CACHE}
123
+ --num_train_files=${NUM_TRAIN_FILES}
124
+ --num_eval_files=${NUM_EVAL_FILES}
125
+ "
126
+ echo ${TRAIN_COMMAND}
127
+
128
+ echo "[run] General Settings:"
129
+ echo "[run] RESNET_SIZE" $RESNET_SIZE
130
+ echo "[run] IMAGENET_DIR" $IMAGENET_DIR
131
+ echo "[run] BATCH_SIZE" $BATCH_SIZE
132
+ echo "[run] NUM_WORKERS" $NUM_WORKERS
133
+ echo "[run] TRAIN_EPOCHS" $TRAIN_EPOCHS
134
+ echo "[run] TRAIN_STEPS" $TRAIN_STEPS
135
+ echo "[run] DISPLAY_STEPS" $DISPLAY_STEPS
136
+ echo "[run] USE_LARS_OPTIMIZER" $USE_LARS_OPTIMIZER
137
+ echo "[run] CPU_BIND_TYPE" $CPU_BIND_TYPE
138
+ echo "[run] EPOCHS_BETWEEN_EVALS" $EPOCHS_BETWEEN_EVALS
139
+ echo "[run] TRAIN_AND_EVAL" $TRAIN_AND_EVAL
140
+ echo "[run] TF_BF16_CONVERSION" $TF_BF16_CONVERSION
141
+ echo "[run] DATASET_CACHE" $DATASET_CACHE
142
+ echo "[run] USE_HOROVOD" $USE_HOROVOD
143
+ echo
144
+ echo "[run] Learning Setting:"
145
+ echo "[run] WEIGHT_DECAY" $WEIGHT_DECAY
146
+ echo "[run] NUM_ACCUMULATION_STEPS" $NUM_ACCUMULATION_STEPS
147
+ echo "[run] LABEL_SMOOTH" $LABEL_SMOOTH
148
+ echo "[run] BASE_LEARNING_RATE" $BASE_LEARNING_RATE
149
+ echo "[run] WARMUP_EPOCHS" $WARMUP_EPOCHS
150
+ echo "[run] USE_MLPERF" $USE_MLPERF
151
+ echo "[run] NO_EVAL" $NO_EVAL
152
+ echo "[run] STOP_THRESHOLD" $STOP_THRESHOLD
153
+ echo "[run] LR_MOMENTUM" $LR_MOMENTUM
154
+ echo "[run] EVAL_OFFSET_EPOCHS" $EVAL_OFFSET_EPOCHS
155
+ echo "[run] LARS_DECAY_EPOCHS" $LARS_DECAY_EPOCHS
156
+ echo "[run] SYNTHETIC_DATA" $SYNTHETIC_DATA
157
+
158
+ if [[ ! -z $USE_HOROVOD ]] && [[ $CPU_BIND_TYPE == "numa" ]]; then
159
+ LOCAL_SNC_VALUE=$(( OMPI_COMM_WORLD_LOCAL_RANK ))
160
+ if [[ $HLS_TYPE == "HLS2" ]]; then
161
+ export NUMA_MAPPING_DIR=$BASE_PATH
162
+ bash list_affinity_topology_bare_metal.sh
163
+ CPU_RANGE=`cat $NUMA_MAPPING_DIR/.habana_moduleID$LOCAL_SNC_VALUE`
164
+ fi
165
+ LD_PRELOAD=${PRELOAD_PATH} numactl --physcpubind=${CPU_RANGE} ${TRAIN_COMMAND}
166
+ else
167
+ LD_PRELOAD=${PRELOAD_PATH} ${TRAIN_COMMAND}
168
+ fi
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/resnet/scripts/unpack_imagenet.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ ###############################################################################
3
+ # Copyright (c) 2023, Habana Labs Ltd. All rights reserved.
4
+ ###############################################################################
5
+
6
+ function print_synopsis()
7
+ {
8
+ cat << HEREDOC
9
+
10
+ Usage: $program -ta|--train-archive PATH -va|--validation-archive PATH
11
+ -o|--output-path PATH [-j|--jobs-number COUNT] [-h|--help]
12
+
13
+ Required arguments:
14
+ -ta, --train-archive PATH path to ImageNet training archive
15
+ -va, --validation-archive PATH path to ImageNet validation archive
16
+ -o, --output-path PATH path to folder where ImageNet will be upacked
17
+
18
+ Optional arguments:
19
+ -j, --jobs-number COUNT number of jobs used when unpacking training archive
20
+ default value is 16
21
+ -h, --help print this help message
22
+
23
+ HEREDOC
24
+ }
25
+
26
+ function parse_args()
27
+ {
28
+ export TRAIN_ARCHIVE_PATH=""
29
+ export VAL_ARCHIVE_PATH=""
30
+ export OUTPUT_PATH=""
31
+ export JOBS_NUMBER=16
32
+
33
+ while [ -n "$1" ]; do
34
+ case "$1" in
35
+ -ta | --train-archive )
36
+ export TRAIN_ARCHIVE_PATH=$2
37
+ shift 2
38
+ ;;
39
+ -va | --validation-archive )
40
+ export VAL_ARCHIVE_PATH=$2
41
+ shift 2
42
+ ;;
43
+ -o | --output-path )
44
+ export OUTPUT_PATH=$2
45
+ shift 2
46
+ ;;
47
+ -j | --jobs-number )
48
+ export JOBS_NUMBER=$2
49
+ shift 2
50
+ ;;
51
+ -h | --help )
52
+ print_synopsis
53
+ exit 0
54
+ ;;
55
+ * )
56
+ echo "error: invalid parameter: $1"
57
+ print_synopsis
58
+ exit 1
59
+ ;;
60
+ esac
61
+ done
62
+
63
+ if [[ ! -f "$TRAIN_ARCHIVE_PATH" ]]; then
64
+ echo "Please specify correct path to traing archive using -ta, --train-archive."
65
+ print_synopsis
66
+ exit 1
67
+ fi
68
+
69
+ if [[ ! -f "$VAL_ARCHIVE_PATH" ]]; then
70
+ echo "Please specify correct path to validation archive using -va, --validation-archive."
71
+ print_synopsis
72
+ exit 1
73
+ fi
74
+
75
+ if [[ -z "$OUTPUT_PATH" ]]; then
76
+ echo "Please specify output path using -o, --output-path."
77
+ print_synopsis
78
+ exit 1
79
+ fi
80
+ }
81
+
82
+ function reset_folder()
83
+ {
84
+ rm -rf $1
85
+ mkdir -p $1
86
+ }
87
+
88
+ function upack_train_subarchive()
89
+ {
90
+ ARCHIVE_NAME=$1
91
+ ARCHIVE_INDEX=$2
92
+ NO_OF_ARCHIVES=$3
93
+ PRINT="$ARCHIVE_INDEX/$NO_OF_ARCHIVES: $ARCHIVE_NAME"
94
+ echo "Upacking $PRINT."
95
+
96
+ pushd $TRAIN_PATH > /dev/null
97
+
98
+ DIR=`basename $ARCHIVE_NAME .tar`
99
+ mkdir $DIR
100
+ tar xf $ARCHIVE_NAME -C $DIR
101
+ rm $ARCHIVE_NAME
102
+
103
+ popd > /dev/null
104
+ echo "Finished upacking $PRINT."
105
+ }
106
+
107
+ function unpack_train()
108
+ {
109
+ export TRAIN_PATH="$OUTPUT_PATH/train"
110
+ export TMP_PATH="$OUTPUT_PATH/tmp"
111
+ reset_folder $TRAIN_PATH
112
+ reset_folder $TMP_PATH
113
+
114
+ echo "Unpacking training data."
115
+ tar xf $TRAIN_ARCHIVE_PATH -C $TMP_PATH
116
+
117
+ echo "Unpacking subarchives."
118
+ pushd $TMP_PATH > /dev/null
119
+ ARCHIVES_COUNT=$(ls *.tar | wc -l)
120
+ ARCHIVE_IDX=0
121
+ for ARCHIVE in *.tar; do
122
+ ((ARCHIVE_IDX++))
123
+
124
+ while : ; do
125
+ JOBS_COUNT=$(ls $TRAIN_PATH/*.tar 2> /dev/null | wc -l)
126
+ if [ "$JOBS_COUNT" -lt "$JOBS_NUMBER" ]; then
127
+ break
128
+ fi
129
+ sleep 1s
130
+ done
131
+
132
+ mv $ARCHIVE $TRAIN_PATH/
133
+ upack_train_subarchive $ARCHIVE $ARCHIVE_IDX $ARCHIVES_COUNT &
134
+ done
135
+ popd > /dev/null
136
+
137
+ wait
138
+ rm -rf $TMP_PATH
139
+ echo "Imagenet training data ready."
140
+ }
141
+
142
+ function unpack_val()
143
+ {
144
+ export VAL_PATH="$OUTPUT_PATH/val"
145
+ reset_folder $VAL_PATH
146
+
147
+ echo "Unpacking validation data."
148
+ tar xf $VAL_ARCHIVE_PATH -C $VAL_PATH
149
+
150
+ echo "Reorganizing validation folder."
151
+ export VALPREP_ADDR=https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh
152
+ pushd $VAL_PATH > /dev/null
153
+ wget -qO- $VALPREP_ADDR | bash
154
+ popd > /dev/null
155
+
156
+ echo "Imagenet validation data ready."
157
+ }
158
+
159
+ parse_args "$@"
160
+
161
+ unpack_train &
162
+ unpack_val &
163
+
164
+ wait
docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N16-PT.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "submitter": "Intel-HabanaLabs",
3
+ "division": "closed",
4
+ "status": "available",
5
+ "system_name": "HLS-Gaudi2-N16-PT",
6
+ "number_of_nodes": "16",
7
+ "host_processors_per_node": "2",
8
+ "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380",
9
+ "host_processor_core_count": "40",
10
+ "host_processor_vcpu_count": "80",
11
+ "host_processor_frequency": "2.3 GHz",
12
+ "host_processor_caches": "L1d cache: 3.8 MiB, L1i cache: 2.5 MiB, L2 cache: 100 MiB, L3 cache: 120 MiB",
13
+ "host_processor_interconnect": "UPI",
14
+ "host_memory_capacity": "1024 GB",
15
+ "host_memory_configuration": "DDR4-3200",
16
+ "host_storage_type": "Weka",
17
+ "host_storage_capacity": "1 PB",
18
+ "host_networking": "2x Mellanox ConnectX-5 Ex 100Gb/s Ethernet",
19
+ "host_networking_topology": "L3 Fat Tree",
20
+ "accelerators_per_node": "8",
21
+ "accelerator_model_name": "Intel® Gaudi® 2 AI Accelerator",
22
+ "accelerator_host_interconnect": "4x PCIe 4.0 x16",
23
+ "accelerator_frequency": "1800MHz",
24
+ "accelerator_on-chip_memories": "6",
25
+ "accelerator_memory_configuration": "HBM2E",
26
+ "accelerator_memory_capacity": "96 GB",
27
+ "accelerator_interconnect": "24x 100Gb/s Ethernet",
28
+ "accelerator_interconnect_topology": "10x L3 Fat Tree",
29
+ "cooling": "Air-cooled",
30
+ "hw_notes": "",
31
+ "framework": "PyTorch 2.0.1a0",
32
+ "other_software_stack": "synapseAI 1.13.99",
33
+ "operating_system": "Ubuntu 20.04",
34
+ "sw_notes": ""
35
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N2-PT.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "submitter": "Intel-HabanaLabs",
3
+ "division": "closed",
4
+ "status": "available",
5
+ "system_name": "HLS-Gaudi2-N2-PT",
6
+ "number_of_nodes": "2",
7
+ "host_processors_per_node": "2",
8
+ "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380",
9
+ "host_processor_core_count": "40",
10
+ "host_processor_vcpu_count": "80",
11
+ "host_processor_frequency": "2.3 GHz",
12
+ "host_processor_caches": "L1d cache: 3.8 MiB, L1i cache: 2.5 MiB, L2 cache: 100 MiB, L3 cache: 120 MiB",
13
+ "host_processor_interconnect": "UPI",
14
+ "host_memory_capacity": "1024 GB",
15
+ "host_memory_configuration": "DDR4-3200",
16
+ "host_storage_type": "Weka",
17
+ "host_storage_capacity": "1 PB",
18
+ "host_networking": "2x Mellanox ConnectX-5 Ex 100Gb/s Ethernet",
19
+ "host_networking_topology": "L3 Fat Tree",
20
+ "accelerators_per_node": "8",
21
+ "accelerator_model_name": "Intel® Gaudi® 2 AI Accelerator",
22
+ "accelerator_host_interconnect": "4x PCIe 4.0 x16",
23
+ "accelerator_frequency": "1800MHz",
24
+ "accelerator_on-chip_memories": "6",
25
+ "accelerator_memory_configuration": "HBM2E",
26
+ "accelerator_memory_capacity": "96 GB",
27
+ "accelerator_interconnect": "24x 100Gb/s Ethernet",
28
+ "accelerator_interconnect_topology": "10x L3 Fat Tree",
29
+ "cooling": "Air-cooled",
30
+ "hw_notes": "",
31
+ "framework": "PyTorch 2.0.1a0",
32
+ "other_software_stack": "synapseAI 1.13.99",
33
+ "operating_system": "Ubuntu 20.04",
34
+ "sw_notes": ""
35
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N32-PT.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "submitter": "Intel-HabanaLabs",
3
+ "division": "closed",
4
+ "status": "available",
5
+ "system_name": "HLS-Gaudi2-N32-PT",
6
+ "number_of_nodes": "32",
7
+ "host_processors_per_node": "2",
8
+ "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380",
9
+ "host_processor_core_count": "40",
10
+ "host_processor_vcpu_count": "80",
11
+ "host_processor_frequency": "2.3 GHz",
12
+ "host_processor_caches": "L1d cache: 3.8 MiB, L1i cache: 2.5 MiB, L2 cache: 100 MiB, L3 cache: 120 MiB",
13
+ "host_processor_interconnect": "UPI",
14
+ "host_memory_capacity": "1024 GB",
15
+ "host_memory_configuration": "DDR4-3200",
16
+ "host_storage_type": "Weka",
17
+ "host_storage_capacity": "1 PB",
18
+ "host_networking": "2x Mellanox ConnectX-5 Ex 100Gb/s Ethernet",
19
+ "host_networking_topology": "L3 Fat Tree",
20
+ "accelerators_per_node": "8",
21
+ "accelerator_model_name": "Intel® Gaudi® 2 AI Accelerator",
22
+ "accelerator_host_interconnect": "4x PCIe 4.0 x16",
23
+ "accelerator_frequency": "1800MHz",
24
+ "accelerator_on-chip_memories": "6",
25
+ "accelerator_memory_configuration": "HBM2E",
26
+ "accelerator_memory_capacity": "96 GB",
27
+ "accelerator_interconnect": "24x 100Gb/s Ethernet",
28
+ "accelerator_interconnect_topology": "10x L3 Fat Tree",
29
+ "cooling": "Air-cooled",
30
+ "hw_notes": "",
31
+ "framework": "PyTorch 2.0.1a0",
32
+ "other_software_stack": "synapseAI 1.13.99",
33
+ "operating_system": "Ubuntu 20.04",
34
+ "sw_notes": ""
35
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N4-PT.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "submitter": "Intel-HabanaLabs",
3
+ "division": "closed",
4
+ "status": "available",
5
+ "system_name": "HLS-Gaudi2-N4-PT",
6
+ "number_of_nodes": "4",
7
+ "host_processors_per_node": "2",
8
+ "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380",
9
+ "host_processor_core_count": "40",
10
+ "host_processor_vcpu_count": "80",
11
+ "host_processor_frequency": "2.3 GHz",
12
+ "host_processor_caches": "L1d cache: 3.8 MiB, L1i cache: 2.5 MiB, L2 cache: 100 MiB, L3 cache: 120 MiB",
13
+ "host_processor_interconnect": "UPI",
14
+ "host_memory_capacity": "1024 GB",
15
+ "host_memory_configuration": "DDR4-3200",
16
+ "host_storage_type": "Weka",
17
+ "host_storage_capacity": "1 PB",
18
+ "host_networking": "2x Mellanox ConnectX-5 Ex 100Gb/s Ethernet",
19
+ "host_networking_topology": "L3 Fat Tree",
20
+ "accelerators_per_node": "8",
21
+ "accelerator_model_name": "Intel® Gaudi® 2 AI Accelerator",
22
+ "accelerator_host_interconnect": "4x PCIe 4.0 x16",
23
+ "accelerator_frequency": "1800MHz",
24
+ "accelerator_on-chip_memories": "6",
25
+ "accelerator_memory_configuration": "HBM2E",
26
+ "accelerator_memory_capacity": "96 GB",
27
+ "accelerator_interconnect": "24x 100Gb/s Ethernet",
28
+ "accelerator_interconnect_topology": "10x L3 Fat Tree",
29
+ "cooling": "Air-cooled",
30
+ "hw_notes": "",
31
+ "framework": "PyTorch 2.0.1a0",
32
+ "other_software_stack": "synapseAI 1.13.99",
33
+ "operating_system": "Ubuntu 20.04",
34
+ "sw_notes": ""
35
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N48-PT.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "submitter": "Intel-HabanaLabs",
3
+ "division": "closed",
4
+ "status": "available",
5
+ "system_name": "HLS-Gaudi2-N48-PT",
6
+ "number_of_nodes": "48",
7
+ "host_processors_per_node": "2",
8
+ "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380",
9
+ "host_processor_core_count": "40",
10
+ "host_processor_vcpu_count": "80",
11
+ "host_processor_frequency": "2.3 GHz",
12
+ "host_processor_caches": "L1d cache: 3.8 MiB, L1i cache: 2.5 MiB, L2 cache: 100 MiB, L3 cache: 120 MiB",
13
+ "host_processor_interconnect": "UPI",
14
+ "host_memory_capacity": "1024 GB",
15
+ "host_memory_configuration": "DDR4-3200",
16
+ "host_storage_type": "Weka",
17
+ "host_storage_capacity": "1 PB",
18
+ "host_networking": "2x Mellanox ConnectX-5 Ex 100Gb/s Ethernet",
19
+ "host_networking_topology": "L3 Fat Tree",
20
+ "accelerators_per_node": "8",
21
+ "accelerator_model_name": "Intel® Gaudi® 2 AI Accelerator",
22
+ "accelerator_host_interconnect": "4x PCIe 4.0 x16",
23
+ "accelerator_frequency": "1800MHz",
24
+ "accelerator_on-chip_memories": "6",
25
+ "accelerator_memory_configuration": "HBM2E",
26
+ "accelerator_memory_capacity": "96 GB",
27
+ "accelerator_interconnect": "24x 100Gb/s Ethernet",
28
+ "accelerator_interconnect_topology": "10x L3 Fat Tree",
29
+ "cooling": "Air-cooled",
30
+ "hw_notes": "",
31
+ "framework": "PyTorch 2.0.1a0",
32
+ "other_software_stack": "synapseAI 1.13.99",
33
+ "operating_system": "Ubuntu 20.04",
34
+ "sw_notes": ""
35
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-N8-PT.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "submitter": "Intel-HabanaLabs",
3
+ "division": "closed",
4
+ "status": "available",
5
+ "system_name": "HLS-Gaudi2-N8-PT",
6
+ "number_of_nodes": "8",
7
+ "host_processors_per_node": "2",
8
+ "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380",
9
+ "host_processor_core_count": "40",
10
+ "host_processor_vcpu_count": "80",
11
+ "host_processor_frequency": "2.3 GHz",
12
+ "host_processor_caches": "L1d cache: 3.8 MiB, L1i cache: 2.5 MiB, L2 cache: 100 MiB, L3 cache: 120 MiB",
13
+ "host_processor_interconnect": "UPI",
14
+ "host_memory_capacity": "1024 GB",
15
+ "host_memory_configuration": "DDR4-3200",
16
+ "host_storage_type": "Weka",
17
+ "host_storage_capacity": "1 PB",
18
+ "host_networking": "2x Mellanox ConnectX-5 Ex 100Gb/s Ethernet",
19
+ "host_networking_topology": "L3 Fat Tree",
20
+ "accelerators_per_node": "8",
21
+ "accelerator_model_name": "Intel® Gaudi® 2 AI Accelerator",
22
+ "accelerator_host_interconnect": "4x PCIe 4.0 x16",
23
+ "accelerator_frequency": "1800MHz",
24
+ "accelerator_on-chip_memories": "6",
25
+ "accelerator_memory_configuration": "HBM2E",
26
+ "accelerator_memory_capacity": "96 GB",
27
+ "accelerator_interconnect": "24x 100Gb/s Ethernet",
28
+ "accelerator_interconnect_topology": "10x L3 Fat Tree",
29
+ "cooling": "Air-cooled",
30
+ "hw_notes": "",
31
+ "framework": "PyTorch 2.0.1a0",
32
+ "other_software_stack": "synapseAI 1.12.0",
33
+ "operating_system": "Ubuntu 20.04",
34
+ "sw_notes": ""
35
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-PT.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "submitter": "Intel-HabanaLabs",
3
+ "division": "closed",
4
+ "status": "available",
5
+ "system_name": "HLS-Gaudi2-PT",
6
+ "number_of_nodes": "1",
7
+ "host_processors_per_node": "2",
8
+ "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380",
9
+ "host_processor_core_count": "40",
10
+ "host_processor_vcpu_count": "80",
11
+ "host_processor_frequency": "2.3 GHz",
12
+ "host_processor_caches": "L1d cache: 3.8 MiB, L1i cache: 2.5 MiB, L2 cache: 100 MiB, L3 cache: 120 MiB",
13
+ "host_processor_interconnect": "UPI",
14
+ "host_memory_capacity": "1024 GB",
15
+ "host_memory_configuration": "DDR4-3200",
16
+ "host_storage_type": "Weka",
17
+ "host_storage_capacity": "1 PB",
18
+ "host_networking": "2x Mellanox ConnectX-5 Ex 100Gb/s Ethernet",
19
+ "host_networking_topology": "L3 Fat Tree",
20
+ "accelerators_per_node": "8",
21
+ "accelerator_model_name": "Intel® Gaudi® 2 AI Accelerator",
22
+ "accelerator_host_interconnect": "4x PCIe 4.0 x16",
23
+ "accelerator_frequency": "1800MHz",
24
+ "accelerator_on-chip_memories": "6",
25
+ "accelerator_memory_configuration": "HBM2E",
26
+ "accelerator_memory_capacity": "96 GB",
27
+ "accelerator_interconnect": "24x 100Gb/s Ethernet",
28
+ "accelerator_interconnect_topology": "10x L3 Fat Tree",
29
+ "cooling": "Air-cooled",
30
+ "hw_notes": "",
31
+ "framework": "PyTorch 2.0.1a0",
32
+ "other_software_stack": "synapseAI 1.13.99",
33
+ "operating_system": "Ubuntu 20.04",
34
+ "sw_notes": ""
35
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/systems/HLS-Gaudi2-TF.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "submitter": "Intel-HabanaLabs",
3
+ "division": "closed",
4
+ "status": "available",
5
+ "system_name": "HLS-Gaudi2-TF",
6
+ "number_of_nodes": "1",
7
+ "host_processors_per_node": "2",
8
+ "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380",
9
+ "host_processor_core_count": "40",
10
+ "host_processor_vcpu_count": "80",
11
+ "host_processor_frequency": "2.3 GHz",
12
+ "host_processor_caches": "L1d cache: 3.8 MiB, L1i cache: 2.5 MiB, L2 cache: 100 MiB, L3 cache: 120 MiB",
13
+ "host_processor_interconnect": "UPI",
14
+ "host_memory_capacity": "1024 GB",
15
+ "host_memory_configuration": "DDR4-3200",
16
+ "host_storage_type": "Weka",
17
+ "host_storage_capacity": "1 PB",
18
+ "host_networking": "2x Mellanox ConnectX-5 Ex 100Gb/s Ethernet",
19
+ "host_networking_topology": "L3 Fat Tree",
20
+ "accelerators_per_node": "8",
21
+ "accelerator_model_name": "Intel® Gaudi® 2 AI Accelerator",
22
+ "accelerator_host_interconnect": "4x PCIe 4.0 x16",
23
+ "accelerator_frequency": "1800MHz",
24
+ "accelerator_on-chip_memories": "6",
25
+ "accelerator_memory_configuration": "HBM2E",
26
+ "accelerator_memory_capacity": "96 GB",
27
+ "accelerator_interconnect": "24x 100Gb/s Ethernet",
28
+ "accelerator_interconnect_topology": "10x L3 Fat Tree",
29
+ "cooling": "Air-cooled",
30
+ "hw_notes": "",
31
+ "framework": "TensorFlow 2.13.0",
32
+ "other_software_stack": "synapseAI 1.13.99",
33
+ "operating_system": "Ubuntu 20.04",
34
+ "sw_notes": ""
35
+ }
docker/bloom13b/Model-References/TensorFlow/common/debug.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ ###############################################################################
16
+ # Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
17
+ ###############################################################################
18
+
19
+ from absl import flags
20
+ from absl import logging
21
+ from tensorflow.core.protobuf import debug_event_pb2
22
+ from tensorflow.python.debug.lib import debug_events_writer
23
+ from tensorflow.python.framework import op_callbacks
24
+ from tensorflow.python.ops import gen_debug_ops
25
+ import tensorflow as tf
26
+ import re
27
+ import os
28
+ import json
29
+ try:
30
+ import horovod.tensorflow as hvd
31
+ except ImportError:
32
+ hvd = None
33
+
34
+ def horovod_enabled():
35
+ return hvd is not None and hvd.is_initialized()
36
+
37
+
38
+ flags.DEFINE_string(name='dump_config', default=None,
39
+ help='Defines config for tensor dumping')
40
+
41
+
42
+ class _DumpCallback(object):
43
+ def __init__(self, dump_root, tensor_debug_mode, circular_buffer_size, op_regex, output_regex=None):
44
+ self._dump_root = dump_root
45
+ if horovod_enabled():
46
+ self._dump_root = os.path.join(
47
+ self._dump_root, f"rank_{hvd.rank()}")
48
+ self._tensor_debug_mode = debug_event_pb2.TensorDebugMode.Value(
49
+ tensor_debug_mode)
50
+ self._circular_buffer_size = circular_buffer_size
51
+ self._op_regex = re.compile(op_regex) if isinstance(
52
+ op_regex, str) else op_regex
53
+ self._output_regex = re.compile(output_regex) if isinstance(
54
+ output_regex, str) else output_regex
55
+ self._tfdbg_run_id = ''
56
+ self._dump_op_counter = 0
57
+
58
+ debug_writer_args = {
59
+ "dump_root": self._dump_root,
60
+ "circular_buffer_size": self._circular_buffer_size
61
+ }
62
+
63
+ if not tf.__version__.startswith("2.2"):
64
+ debug_writer_args["tfdbg_run_id"] = self._tfdbg_run_id
65
+
66
+ self._writer = debug_events_writer.DebugEventsWriter(
67
+ **debug_writer_args)
68
+
69
+ def callback(self, op_type, inputs, attrs, outputs, op_name=None, graph=None):
70
+ if op_name is not None and self._op_regex.match(op_name):
71
+ graph_name = "missing-graph-name"
72
+ if graph is not None and hasattr(graph, "name"):
73
+ graph_name = graph.name
74
+
75
+ logging.info("Adding dump op for '%s' of type '%s' from graph '%s'" % (
76
+ op_name, op_type, graph_name))
77
+
78
+ new_outputs = []
79
+
80
+ for output_slot, output in enumerate(outputs):
81
+ if self._output_regex is not None and not self._output_regex.match(output.name):
82
+ logging.info("Skipped output: " + output.name)
83
+ new_outputs.append(output)
84
+ continue
85
+ debug_identity_op_kwargs = {
86
+ "tfdbg_context_id": graph_name,
87
+ "op_name": op_name,
88
+ "output_slot": output_slot,
89
+ "tensor_debug_mode": self._tensor_debug_mode,
90
+ "debug_urls": ["file://%s" % self._dump_root],
91
+ "name": "dump_%d" % self._dump_op_counter
92
+ }
93
+
94
+ if not tf.__version__.startswith("2.2"):
95
+ debug_identity_op_kwargs["circular_buffer_size"] = self._circular_buffer_size
96
+ debug_identity_op_kwargs["tfdbg_run_id"] = self._tfdbg_run_id
97
+
98
+ self._dump_op_counter = self._dump_op_counter + 1
99
+ new_outputs.append(gen_debug_ops.debug_identity_v2(
100
+ output, **debug_identity_op_kwargs))
101
+
102
+ return new_outputs
103
+ else:
104
+ return None
105
+
106
+ def __enter__(self, *args, **kwargs):
107
+ op_callbacks.add_op_callback(self.callback)
108
+ logging.info("Enabled tensor dumping")
109
+
110
+ def __exit__(self, *args, **kwargs):
111
+ op_callbacks.remove_op_callback(self.callback)
112
+ logging.info("Disabled tensor dumping")
113
+
114
+ def __del__(self):
115
+ self._writer.Close()
116
+
117
+
118
+ class _Dummy(object):
119
+ def __enter__(self, *args, **kwargs):
120
+ pass
121
+
122
+ def __exit__(self, *args, **kwargs):
123
+ pass
124
+
125
+
126
+ def dump_callback(config_file=None):
127
+ if config_file is not None:
128
+ kwargs = json.load(open(config_file, 'r'))
129
+ return _DumpCallback(**kwargs)
130
+ try:
131
+ kwargs = json.load(open(flags.FLAGS.dump_config, 'r'))
132
+ return _DumpCallback(**kwargs)
133
+ except:
134
+ return _Dummy()
docker/bloom13b/Model-References/TensorFlow/common/tb_utils.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import tensorflow as tf
4
+ from copy import deepcopy
5
+ from collections import defaultdict
6
+ from tensorboard.plugins.hparams import api as hp
7
+ from tensorflow.python.eager import context
8
+ from tensorflow.keras import backend as K
9
+ from tensorflow.python.ops import summary_ops_v2
10
+ from tensorflow.python.summary import summary as tf_summary
11
+ from tensorflow.python.training.summary_io import SummaryWriterCache
12
+ from tensorflow.compat.v1.keras.callbacks import TensorBoard, Callback
13
+ from tensorflow.python.training.session_run_hook import SessionRunHook, SessionRunArgs
14
+
15
+
16
+ def _remove_prefix(s, prefix):
17
+ if s.startswith(prefix):
18
+ s = s[len(prefix):]
19
+ return s
20
+
21
+
22
+ def _parse_precision(hparams: dict):
23
+ # Check if 'hparams' contain data type.
24
+ if 'dtype' in hparams or 'data_type' in hparams:
25
+ param_name = 'dtype' if 'dtype' in hparams else 'data_type'
26
+ return hparams[param_name]
27
+
28
+ # Check if bf16 conversion flags are set.
29
+ flag = os.environ.get('TF_BF16_CONVERSION', '0')
30
+ flag = flag.lower()
31
+ try:
32
+ value = int(flag)
33
+ except:
34
+ value = -1
35
+
36
+ if flag == 'false' or value == 0:
37
+ return 'fp32'
38
+ elif flag == 'true' or value == 1:
39
+ return 'bf16'
40
+ return flag
41
+
42
+
43
+ def _set_precision_if_missing(hparams: dict):
44
+ if 'precision' not in hparams:
45
+ hparams['precision'] = _parse_precision(hparams)
46
+ return hparams
47
+
48
+
49
+ def _copy_and_clean_hparams(hparams: dict):
50
+ hparams_ = dict()
51
+ for name, value in hparams.items():
52
+ if isinstance(value, (str, bool, int, float)):
53
+ hparams_[name] = value
54
+ continue
55
+
56
+ try:
57
+ hparams_[name] = str(value)
58
+ except:
59
+ tf.compat.v1.logging.info(
60
+ f'Conversion of parameter "{name}" to string failed. '
61
+ 'Parameter will not be saved.')
62
+
63
+ return hparams_
64
+
65
+
66
+ def write_hparams_v1(writer, hparams: dict):
67
+ hparams = _copy_and_clean_hparams(hparams)
68
+ hparams = _set_precision_if_missing(hparams)
69
+
70
+ with tf.compat.v1.Graph().as_default():
71
+ if isinstance(writer, str):
72
+ writer = SummaryWriterCache.get(writer)
73
+ summary = hp.hparams_pb(hparams).SerializeToString()
74
+ writer.add_summary(summary)
75
+
76
+
77
+ def write_hparams_v2(writer, hparams: dict):
78
+ hparams = _copy_and_clean_hparams(hparams)
79
+ hparams = _set_precision_if_missing(hparams)
80
+
81
+ with writer.as_default():
82
+ hp.hparams(hparams)
83
+
84
+
85
+ class ExamplesPerSecondEstimatorHook(tf.compat.v1.train.StepCounterHook):
86
+ """Calculate and report global_step/sec and examples/sec during runtime."""
87
+ # Copy-pasted from tensorflow_estimator/python/estimator/tpu/tpu_estimator.py
88
+
89
+ def __init__(self,
90
+ batch_size=None,
91
+ every_n_steps=1,
92
+ every_n_secs=None,
93
+ output_dir=None,
94
+ summary_writer=None,
95
+ extra_metrics=None,
96
+ log_global_step=False,
97
+ verbose=False,
98
+ tags_to_print=None):
99
+ super().__init__(
100
+ every_n_steps=every_n_steps,
101
+ every_n_secs=every_n_secs,
102
+ output_dir=output_dir,
103
+ summary_writer=summary_writer)
104
+ self._metrics = extra_metrics or {}
105
+ self._verbose = verbose
106
+ self._tags_to_print = tags_to_print
107
+ if log_global_step:
108
+ # Because estimator will log global_step/sec by default
109
+ # when log_step_count_steps is not None saving it here
110
+ # would duplicate events in TensorBoard.
111
+ # Use log_global_step=True when RunConfig.log_step_count_step=None
112
+ self._metrics['global_step/sec'] = 1
113
+ if batch_size is not None:
114
+ self._metrics['examples/sec'] = batch_size
115
+
116
+ def _add_summary(self, tag, value, step):
117
+ Summary = tf.compat.v1.Summary
118
+ global_step_summary = Summary(value=[
119
+ Summary.Value(tag=tag, simple_value=value)
120
+ ])
121
+ self._summary_writer.add_summary(global_step_summary, step)
122
+ if (self._verbose or
123
+ (self._tags_to_print is not None and tag in self._tags_to_print)):
124
+ tf.compat.v1.logging.info(f'{tag}: {value}')
125
+
126
+ def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
127
+ global_step_per_sec = elapsed_steps / elapsed_time
128
+ if self._summary_writer is not None:
129
+ for name, factor in self._metrics.items():
130
+ value = factor * global_step_per_sec
131
+ self._add_summary(name, value, global_step)
132
+
133
+ def after_create_session(self, session, coord):
134
+ self._timer.reset()
135
+
136
+
137
+ class ExamplesPerSecondKerasHookV1(Callback):
138
+ def __init__(self,
139
+ every_n_steps=1,
140
+ every_n_secs=None,
141
+ output_dir=None,
142
+ summary_writer=None,
143
+ batch_size=None):
144
+ self.writer = summary_writer or SummaryWriterCache.get(output_dir)
145
+ self._timer = tf.compat.v1.train.SecondOrStepTimer(
146
+ every_n_secs, every_n_steps)
147
+ self._global_step = 0
148
+ self._total_examples = 0
149
+ self._should_trigger = True
150
+ self._batch_size = batch_size
151
+
152
+ def on_train_begin(self, logs=None):
153
+ self._timer.reset()
154
+
155
+ def on_train_batch_begin(self, batch, logs=None):
156
+ # batch is index within current epoch, if we want to dump data through all epochs then we need to use global_step
157
+ self._should_trigger = self._timer.should_trigger_for_step(self._global_step)
158
+
159
+ def on_predict_batch_end(self, batch, logs=None):
160
+ self._global_step += 1
161
+
162
+ def on_train_batch_end(self, batch, logs=None):
163
+ step = self._global_step
164
+ self._total_examples += logs.get('size', 0)
165
+ if self._should_trigger:
166
+ elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
167
+ step)
168
+ if elapsed_time is not None:
169
+ total_examples = self._total_examples
170
+ if self._batch_size is not None:
171
+ total_examples = self._batch_size * elapsed_steps
172
+ self._log_and_record(
173
+ elapsed_steps, elapsed_time, step, total_examples)
174
+ self._total_examples = 0
175
+ self._global_step += 1
176
+
177
+ def _log_and_record(self, elapsed_steps, elapsed_time,
178
+ global_step, total_examples=None):
179
+ Summary = tf.compat.v1.Summary
180
+ global_step_per_sec = elapsed_steps / elapsed_time
181
+ if self.writer is not None:
182
+ global_step_summary = Summary(value=[
183
+ Summary.Value(
184
+ tag='global_step/sec', simple_value=global_step_per_sec)
185
+ ])
186
+ self.writer.add_summary(global_step_summary, global_step)
187
+ if total_examples is not None:
188
+ examples_per_sec = total_examples / elapsed_time
189
+ example_summary = Summary(value=[
190
+ Summary.Value(tag='examples/sec',
191
+ simple_value=examples_per_sec)
192
+ ])
193
+ self.writer.add_summary(example_summary, global_step)
194
+
195
+
196
+ class ExamplesPerSecondKerasHookV2(ExamplesPerSecondKerasHookV1):
197
+ def __init__(self,
198
+ every_n_steps=1,
199
+ every_n_secs=None,
200
+ output_dir=None,
201
+ summary_writer=None,
202
+ batch_size=None):
203
+ writer = summary_writer or summary_ops_v2.create_file_writer_v2(output_dir)
204
+ super().__init__(every_n_steps, every_n_secs, output_dir, writer, batch_size)
205
+
206
+ def _log_and_record(self, elapsed_steps, elapsed_time,
207
+ global_step, total_examples=None):
208
+ global_step_per_sec = elapsed_steps / elapsed_time
209
+ if self.writer is not None:
210
+ with self.writer.as_default(), summary_ops_v2.always_record_summaries():
211
+ summary_ops_v2.scalar('global_step/sec', global_step_per_sec,
212
+ step=global_step)
213
+ if total_examples is not None:
214
+ examples_per_sec = total_examples / elapsed_time
215
+ summary_ops_v2.scalar('examples/sec', examples_per_sec,
216
+ step=global_step)
217
+
218
+
219
+ ExamplesPerSecondKerasHook = ExamplesPerSecondKerasHookV1
220
+
221
+
222
+ class TBSummary(object):
223
+ """
224
+ Creates a proxy for FileWriter for TensorBoard.
225
+
226
+ :param log_dir: - path where experiment is running (usually the same as
227
+ model_dir in Estimator)
228
+ """
229
+
230
+ def __init__(self, log_dir: str):
231
+ super().__init__()
232
+ self._log_dir = log_dir
233
+
234
+ def __enter__(self):
235
+ return self
236
+
237
+ def __exit__(self, exc_type, exc_val, exc_tb):
238
+ pass
239
+
240
+ def add_scalar(self, tag, value, global_step=None):
241
+ with tf.compat.v1.Graph().as_default():
242
+ writer = SummaryWriterCache.get(self._log_dir)
243
+ summary = tf.compat.v1.Summary(
244
+ value=[tf.compat.v1.Summary.Value(tag=tag, simple_value=value)])
245
+ event = tf.compat.v1.Event(summary=summary)
246
+ event.wall_time = time.time()
247
+ event.step = global_step
248
+ writer.add_event(event)
249
+
250
+
251
+ class TensorBoardWithHParamsV1(TensorBoard):
252
+ """
253
+ Adds TensorBoard visualization to training process.
254
+
255
+ Writes training tfevent file into default log directory, but
256
+ stores evaluation in log_dir/eval subdirectory.
257
+ """
258
+
259
+ def __init__(self, hparams, *args, **kwargs):
260
+ super().__init__(*args, **kwargs)
261
+ self.hparams = hparams
262
+ self._train_summary = None
263
+ self._eval_summary = None
264
+
265
+ def _switch_writer(self, mode):
266
+ self.writer = self._train_summary if mode == 'train' else self._eval_summary
267
+
268
+ def _init_writer(self, model):
269
+ """Sets file writer."""
270
+ if context.executing_eagerly():
271
+ raise NotImplementedError('hook does not support eager execution')
272
+
273
+ self._train_summary = SummaryWriterCache.get(self.log_dir)
274
+ self._eval_summary = SummaryWriterCache.get(
275
+ os.path.join(self.log_dir, 'eval'))
276
+ self._switch_writer('train')
277
+
278
+ write_hparams_v1(self.writer, self.hparams)
279
+
280
+ def _write_custom_summaries(self, step, logs=None):
281
+ """
282
+ This methods works on the assumption that metrics containing `val`
283
+ in name are related to validation (that's the default in Keras).
284
+ """
285
+
286
+ logs = logs or {}
287
+ train_logs = {}
288
+ eval_logs = {}
289
+
290
+ for name, value in logs.items():
291
+ if 'val' in name:
292
+ if name.startswith('batch_val_'):
293
+ name = 'batch_' + _remove_prefix(name, 'batch_val_')
294
+ elif name.startswith('epoch_val_'):
295
+ name = _remove_prefix(name, 'epoch_val_')
296
+ eval_logs[name] = value
297
+ else:
298
+ if name.startswith('batch_'):
299
+ name = _remove_prefix(name, 'batch_')
300
+ train_logs[name] = value
301
+
302
+ self._switch_writer('eval')
303
+ super()._write_custom_summaries(step, eval_logs)
304
+ self._switch_writer('train')
305
+ super()._write_custom_summaries(step, train_logs)
306
+
307
+
308
+ class TensorBoardWithHParamsV2(TensorBoard):
309
+ """
310
+ Adds TensorBoard visualization to training process.
311
+
312
+ Writes training tfevent file into default log directory, but
313
+ stores evaluation in log_dir/eval subdirectory.
314
+ """
315
+
316
+ def __init__(self, hparams, *args, **kwargs):
317
+ super().__init__(*args, **kwargs)
318
+ self.hparams = hparams
319
+
320
+ def set_model(self, model):
321
+ """Sets Keras model and writes graph if specified."""
322
+ self.model = model
323
+ self._log_write_dir = self._get_log_write_dir()
324
+
325
+ self._train_dir = self._log_write_dir
326
+ self._train_step = self.model._train_counter # pylint: disable=protected-access
327
+
328
+ self._val_dir = os.path.join(self._log_write_dir, 'eval')
329
+ self._val_step = self.model._test_counter # pylint: disable=protected-access
330
+
331
+ self._writers = {} # Resets writers.
332
+
333
+ self._should_write_train_graph = False
334
+ if self.write_graph:
335
+ self._write_keras_model_summary()
336
+ self._should_write_train_graph = True
337
+ if self.embeddings_freq:
338
+ self._configure_embeddings()
339
+
340
+ write_hparams_v2(self._train_writer, self.hparams)
341
+
342
+ def _log_epoch_metrics(self, epoch, logs):
343
+ """Writes epoch metrics out as scalar summaries.
344
+
345
+ Arguments:
346
+ epoch: Int. The global step to use for TensorBoard.
347
+ logs: Dict. Keys are scalar summary names, values are scalars.
348
+ """
349
+ if not logs:
350
+ return
351
+
352
+ train_logs = {k: v for k,
353
+ v in logs.items() if not k.startswith('val_')}
354
+ val_logs = {k: v for k, v in logs.items() if k.startswith('val_')}
355
+ train_logs = self._collect_learning_rate(train_logs)
356
+
357
+ with summary_ops_v2.always_record_summaries():
358
+ if train_logs:
359
+ with self._train_writer.as_default():
360
+ for name, value in train_logs.items():
361
+ summary_ops_v2.scalar(name, value, step=epoch)
362
+ if val_logs:
363
+ with self._val_writer.as_default():
364
+ for name, value in val_logs.items():
365
+ name = name[4:] # Remove 'val_' prefix.
366
+ summary_ops_v2.scalar(name, value, step=epoch)
367
+
368
+ class TensorBoardHook(SessionRunHook):
369
+ def __init__(self,
370
+ output_dir="",
371
+ profile_steps=""
372
+ ):
373
+ self.output_dir = output_dir
374
+ profile_steps_error_message = (
375
+ 'profile_steps must be a comma separated pair of positive integers, '
376
+ 'specifying the first and last steps to be profiled.'
377
+ )
378
+ try:
379
+ profile_steps = [int(i) for i in profile_steps.split(',')]
380
+ except ValueError:
381
+ raise ValueError(profile_steps_error_message)
382
+ if len(profile_steps) != 2:
383
+ raise ValueError(profile_steps_error_message)
384
+ self.start_step, self.stop_step = profile_steps
385
+ if self.start_step < 0 or self.start_step > self.stop_step:
386
+ raise ValueError(profile_steps_error_message)
387
+ self._step = 0
388
+
389
+ def before_run(self, run_context):
390
+ self._step += 1
391
+ if self._step == self.start_step:
392
+ tf.profiler.experimental.start(self.output_dir)
393
+ elif self._step == self.stop_step + 1:
394
+ tf.profiler.experimental.stop()
395
+
396
+ return SessionRunArgs({})
397
+
398
+
399
+ class TimeToTrainKerasHook(Callback):
400
+ def __init__(self, output_dir=None, summary_writer=None):
401
+ self.writer = summary_writer or summary_ops_v2.create_file_writer_v2(output_dir)
402
+ self.counters = defaultdict(int)
403
+
404
+ def _add_event(self, tag, step):
405
+ if self.writer is not None:
406
+ with self.writer.as_default(), summary_ops_v2.always_record_summaries():
407
+ summary_ops_v2.scalar(tag, 0, step=step)
408
+
409
+ def on_epoch_begin(self, epoch, logs=None):
410
+ self._add_event("ttt/train/epoch/begin", epoch)
411
+
412
+ def on_epoch_end(self, epoch, logs=None):
413
+ self._add_event("ttt/train/epoch/end", epoch)
414
+
415
+ def on_train_begin(self, logs=None):
416
+ self._add_event("ttt/train/begin", self.counters["train"])
417
+
418
+ def on_train_end(self, logs=None):
419
+ self._add_event("ttt/train/end", self.counters["train"])
420
+ self.counters["train"] += 1
421
+
422
+ def on_test_begin(self, logs=None):
423
+ self._add_event("ttt/eval/begin", self.counters["eval"])
424
+
425
+ def on_test_end(self, logs=None):
426
+ self._add_event("ttt/eval/end", self.counters["eval"])
427
+ self.counters["eval"] += 1
428
+
429
+ def on_predict_begin(self, logs=None):
430
+ self._add_event("ttt/predict/begin", self.counters["predict"])
431
+
432
+ def on_predict_end(self, logs=None):
433
+ self._add_event("ttt/predict/end", self.counters["predict"])
434
+ self.counters["predict"] += 1
435
+
436
+
437
+ class TimeToTrainEstimatorHook(tf.estimator.SessionRunHook):
438
+ def __init__(self, train_or_eval, output_dir):
439
+ assert train_or_eval in ("eval", "train")
440
+ self._summary_writer = None
441
+ self._output_dir = output_dir
442
+ self._tag = train_or_eval
443
+ self._counter = 0
444
+
445
+ def _add_event(self, tag, value):
446
+ summary = tf.compat.v1.Summary(
447
+ value=[
448
+ tf.compat.v1.Summary.Value(
449
+ tag=tag,
450
+ simple_value=0)
451
+ ]
452
+ )
453
+ event = tf.compat.v1.Event(summary=summary)
454
+ event.wall_time = time.time()
455
+ event.step = self._counter
456
+ self._summary_writer.add_event(event)
457
+
458
+ def begin(self):
459
+ if self._summary_writer is None and self._output_dir:
460
+ self._summary_writer = SummaryWriterCache.get(self._output_dir)
461
+ self._add_event(f"ttt/{self._tag}/begin", self._counter)
462
+
463
+ def after_create_session(self, session, coord):
464
+ pass
465
+
466
+ def before_run(self, run_context):
467
+ pass
468
+
469
+ def after_run(self, run_context, run_values):
470
+ pass
471
+
472
+ def end(self, session):
473
+ self._add_event(f"ttt/{self._tag}/end", self._counter)
474
+ self._counter += 1
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/pack_sequences_ops.cc ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "third_party/tensorflow/core/framework/op_kernel.h"
2
+ #include "third_party/tensorflow/core/framework/shape_inference.h"
3
+ #include "third_party/tensorflow/core/framework/tensor.h"
4
+ #include "third_party/tensorflow/core/framework/types.h"
5
+
6
+ namespace tensor2tensor {
7
+ namespace {
8
+
9
+ using ::tensorflow::DEVICE_CPU;
10
+ using ::tensorflow::OpKernel;
11
+ using ::tensorflow::OpKernelConstruction;
12
+ using ::tensorflow::OpKernelContext;
13
+ using ::tensorflow::Status;
14
+ using ::tensorflow::Tensor;
15
+ using ::tensorflow::TensorShape;
16
+ using ::tensorflow::shape_inference::InferenceContext;
17
+
18
+ // TODO(noam): this op packs a dataset of pairs of sequences (inputs, targets)
19
+ // Generalize later to an arbitrary number of sequences.
20
+ REGISTER_OP("PackSequences2")
21
+ .Input("inputs: int64")
22
+ .Input("targets: int64")
23
+ .Input("inputs_max_length: int32")
24
+ .Input("targets_max_length: int32")
25
+ .Output("inputs_packed: int64")
26
+ .Output("inputs_segmentation: int32")
27
+ .Output("inputs_position: int32")
28
+ .Output("targets_packed: int64")
29
+ .Output("targets_segmentation: int32")
30
+ .Output("targets_position: int32")
31
+ .SetShapeFn([](InferenceContext* ctx) {
32
+ for (int i=0; i < ctx->num_outputs(); i++) {
33
+ ctx->set_output(i, ctx->Matrix(ctx->UnknownDim(),
34
+ ctx->UnknownDim()));
35
+ }
36
+ return Status::OK();
37
+ });
38
+
39
+ class PackSequences2Op : public OpKernel {
40
+ public:
41
+ explicit PackSequences2Op(
42
+ OpKernelConstruction* ctx) : OpKernel(ctx) {
43
+ }
44
+
45
+ void Compute(OpKernelContext* ctx) override {
46
+ auto inputs = ctx->input(0).matrix<int64>();
47
+ auto targets = ctx->input(1).matrix<int64>();
48
+ int inputs_max_length = ctx->input(2).scalar<int32>()();
49
+ int targets_max_length = ctx->input(3).scalar<int32>()();
50
+ int n = inputs.dimension(0);
51
+ std::vector<int> inputs_lengths(n);
52
+ std::vector<int> targets_lengths(n);
53
+ int padded_inputs_length =
54
+ std::min(static_cast<int>(inputs.dimension(1)), inputs_max_length);
55
+ for (int i = 0; i < n; i++) {
56
+ for (int j = 0; j < padded_inputs_length; j++) {
57
+ if (inputs(i, j) != 0)
58
+ inputs_lengths[i]++;
59
+ }
60
+ }
61
+ int padded_targets_length =
62
+ std::min(static_cast<int>(targets.dimension(1)), targets_max_length);
63
+ for (int i = 0; i < n; i++) {
64
+ for (int j = 0; j < padded_targets_length; j++) {
65
+ if (targets(i, j) != 0)
66
+ targets_lengths[i]++;
67
+ }
68
+ }
69
+ int num_combined = 0;
70
+ std::vector<int> combined_inputs_length;
71
+ std::vector<int> combined_targets_length;
72
+ std::vector<std::vector<int> > combined_sequence_ids;
73
+ for (int seq_id = 0; seq_id < n; seq_id++) {
74
+ int inputs_length = inputs_lengths[seq_id];
75
+ int targets_length = targets_lengths[seq_id];
76
+ for (int combined_id = std::max(0, num_combined - 1000); true;
77
+ combined_id++) {
78
+ if (combined_id == num_combined) {
79
+ combined_inputs_length.push_back(inputs_length);
80
+ combined_targets_length.push_back(targets_length);
81
+ combined_sequence_ids.push_back(std::vector<int>(1, seq_id));
82
+ num_combined++;
83
+ break;
84
+ } else if (
85
+ (combined_inputs_length[combined_id] + inputs_length
86
+ <= inputs_max_length) &&
87
+ (combined_targets_length[combined_id] + targets_length
88
+ <= targets_max_length)) {
89
+ combined_inputs_length[combined_id] += inputs_length;
90
+ combined_targets_length[combined_id] += targets_length;
91
+ combined_sequence_ids[combined_id].push_back(seq_id);
92
+ break;
93
+ }
94
+ }
95
+ }
96
+
97
+ auto output_shape_inputs = TensorShape(
98
+ {static_cast<int64>(num_combined),
99
+ static_cast<int64>(inputs_max_length)});
100
+ auto output_shape_targets = TensorShape(
101
+ {static_cast<int64>(num_combined),
102
+ static_cast<int64>(targets_max_length)});
103
+
104
+ Tensor* inputs_packed;
105
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(
106
+ 0, output_shape_inputs, &inputs_packed));
107
+ auto inputs_packed_m = inputs_packed->matrix<int64>();
108
+ inputs_packed_m.setZero();
109
+
110
+ Tensor* inputs_segmentation;
111
+ OP_REQUIRES_OK(
112
+ ctx, ctx->allocate_output(
113
+ 1, output_shape_inputs, &inputs_segmentation));
114
+ auto inputs_segmentation_m = inputs_segmentation->matrix<int32>();
115
+ inputs_segmentation_m.setZero();
116
+
117
+ Tensor* inputs_position;
118
+ OP_REQUIRES_OK(
119
+ ctx, ctx->allocate_output(2, output_shape_inputs, &inputs_position));
120
+ auto inputs_position_m = inputs_position->matrix<int32>();
121
+ inputs_position_m.setZero();
122
+
123
+ Tensor* targets_packed;
124
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(
125
+ 3, output_shape_targets, &targets_packed));
126
+ auto targets_packed_m = targets_packed->matrix<int64>();
127
+ targets_packed_m.setZero();
128
+
129
+ Tensor* targets_segmentation;
130
+ OP_REQUIRES_OK(
131
+ ctx, ctx->allocate_output(
132
+ 4, output_shape_targets, &targets_segmentation));
133
+ auto targets_segmentation_m = targets_segmentation->matrix<int32>();
134
+ targets_segmentation_m.setZero();
135
+
136
+ Tensor* targets_position;
137
+ OP_REQUIRES_OK(
138
+ ctx, ctx->allocate_output(5, output_shape_targets, &targets_position));
139
+ auto targets_position_m = targets_position->matrix<int32>();
140
+ targets_position_m.setZero();
141
+
142
+ for (int combined_id = 0; combined_id < num_combined; combined_id++) {
143
+ int inputs_pos = 0;
144
+ int targets_pos = 0;
145
+ for (int i=0; i < combined_sequence_ids[combined_id].size(); i++) {
146
+ int seq_id = combined_sequence_ids[combined_id][i];
147
+ for (int j=0; j < inputs_lengths[seq_id]; j++) {
148
+ inputs_packed_m(combined_id, inputs_pos) = inputs(seq_id, j);
149
+ inputs_segmentation_m(combined_id, inputs_pos) = i + 1;
150
+ inputs_position_m(combined_id, inputs_pos) = j;
151
+ inputs_pos++;
152
+ }
153
+ for (int j=0; j < targets_lengths[seq_id]; j++) {
154
+ targets_packed_m(combined_id, targets_pos) = targets(seq_id, j);
155
+ targets_segmentation_m(combined_id, targets_pos) = i + 1;
156
+ targets_position_m(combined_id, targets_pos) = j;
157
+ targets_pos++;
158
+ }
159
+ }
160
+ }
161
+ }
162
+ };
163
+
164
+ REGISTER_KERNEL_BUILDER(Name("PackSequences2").Device(DEVICE_CPU),
165
+ PackSequences2Op);
166
+
167
+ } // namespace
168
+ } // namespace tensor2tensor
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/pack_sequences_ops_test.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Tests for pack_sequences_ops."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ from TensorFlow.nlp.transformer.data_generators.ops import pack_sequences_ops
23
+ import tensorflow.compat.v1 as tf
24
+
25
+
26
+ class PackSequencesOpsTest(tf.test.TestCase):
27
+
28
+ def test_pack_sequences(self):
29
+ inputs = [
30
+ [1, 2, 3],
31
+ [4, 5, 0],
32
+ [6, 0, 0],
33
+ ]
34
+ targets = [
35
+ [10, 0, 0],
36
+ [20, 30, 40],
37
+ [50, 60, 0],
38
+ ]
39
+ max_length = 5
40
+ (inputs_packed, inputs_segmentation, inputs_position,
41
+ targets_packed, targets_segmentation, targets_position) = (
42
+ pack_sequences_ops.pack_sequences2(
43
+ inputs, targets, max_length, max_length))
44
+ self.assertAllEqual(
45
+ inputs_packed, [
46
+ [1, 2, 3, 4, 5],
47
+ [6, 0, 0, 0, 0],
48
+ ])
49
+ self.assertAllEqual(
50
+ inputs_segmentation, [
51
+ [1, 1, 1, 2, 2],
52
+ [1, 0, 0, 0, 0],
53
+ ])
54
+ self.assertAllEqual(
55
+ inputs_position, [
56
+ [0, 1, 2, 0, 1],
57
+ [0, 0, 0, 0, 0],
58
+ ])
59
+ self.assertAllEqual(
60
+ targets_packed, [
61
+ [10, 20, 30, 40, 0],
62
+ [50, 60, 0, 0, 0],
63
+ ])
64
+ self.assertAllEqual(
65
+ targets_segmentation, [
66
+ [1, 2, 2, 2, 0],
67
+ [1, 1, 0, 0, 0],
68
+ ])
69
+ self.assertAllEqual(
70
+ targets_position, [
71
+ [0, 0, 1, 2, 0],
72
+ [0, 1, 0, 0, 0],
73
+ ])
74
+
75
+
76
+ if __name__ == "__main__":
77
+ tf.enable_eager_execution()
78
+ tf.test.main()
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder.cc ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "third_party/py/tensor2tensor/data_generators/ops/subword_text_encoder.h"
2
+
3
+ #include "third_party/absl/strings/str_cat.h"
4
+ #include "third_party/absl/strings/str_split.h"
5
+ #include "third_party/absl/strings/string_view.h"
6
+ #include "third_party/icu/include/unicode/uchar.h"
7
+ #include "third_party/icu/include/unicode/utf8.h"
8
+ #include "third_party/tensorflow/core/framework/tensor.h"
9
+ #include "third_party/tensorflow/core/platform/env.h"
10
+
11
+ namespace tensor2tensor {
12
+ namespace {
13
+
14
+ using ::tensorflow::Env;
15
+
16
+ // End of Sequence token ID to insert at end of encoded text.
17
+ constexpr int64 kEosTokenId = 1;
18
+
19
+ } // namespace
20
+
21
+ SubwordTextEncoder::SubwordTextEncoder(const std::string& vocab_filename) {
22
+ // TODO(ormandi): Add a unified vocabulary reader function.
23
+ std::string vocab_contents;
24
+ TF_CHECK_OK(
25
+ ReadFileToString(Env::Default(), vocab_filename, &vocab_contents));
26
+ std::vector<absl::string_view> vocab_list =
27
+ absl::StrSplit(vocab_contents, '\n');
28
+ // Strip trailing newline by skipping last element, then strip the first and
29
+ // last chars to remove enclosing quotes.
30
+ auto vocab_size = vocab_list.size() - vocab_list.back().empty();
31
+ for (auto i = 0; i < vocab_size; ++i) {
32
+ absl::string_view token =
33
+ vocab_list[i].substr(1, vocab_list[i].length() - 2);
34
+ int char_index = 0;
35
+ do {
36
+ // Note throughout that these strings are unicode so we iterate over utf-8
37
+ // code points, which may be between 8-32 bits long, using U8_NEXT. It is
38
+ // important never to iterate directly over ascii characters or models
39
+ // will fail to handle non-ascii alphabets properly.
40
+ UChar32 c;
41
+ U8_NEXT(token, char_index, token.length(), c);
42
+ CHECK_GE(c, 0);
43
+ alphabet_.insert(c);
44
+ } while (char_index < token.length());
45
+ vocab_.insert({std::string(token), i});
46
+ }
47
+ }
48
+
49
+ void SubwordTextEncoder::Encode(absl::string_view text, std::vector<int>* ids) {
50
+ ids->clear();
51
+ int token_start = 0;
52
+ int token_end = 0;
53
+ UChar32 c;
54
+ UChar32 next_c;
55
+ U8_NEXT(text, token_end, text.length(), c);
56
+ CHECK_GE(c, 0);
57
+ while (token_end <= text.length()) {
58
+ int next_end = token_end;
59
+ U8_NEXT(text, next_end, text.length(), next_c);
60
+ CHECK_GE(next_c, 0);
61
+ // Subtoken break when switching from non-alphanum to alphanum, or when
62
+ // reaching the end of the original token.
63
+ if (u_isalnum(next_c) != u_isalnum(c) || token_end >= text.length()) {
64
+ absl::string_view next_token =
65
+ text.substr(token_start, token_end - token_start);
66
+ if (next_token != " ") {
67
+ EncodeSubtokens(next_token, ids);
68
+ }
69
+ token_start = token_end;
70
+ }
71
+ token_end = next_end;
72
+ c = next_c;
73
+ }
74
+ ids->push_back(kEosTokenId);
75
+ }
76
+
77
+ void SubwordTextEncoder::EncodeSubtokens(
78
+ absl::string_view token, std::vector<int> *ids) {
79
+ std::string token_s = EscapeToken(token);
80
+ token = token_s;
81
+ int subtoken_start = 0;
82
+ // TODO(noam): this algorithm is quadratic in the length of the token.
83
+ // We should instead start with a length equal to the maximum subtoken
84
+ // length in the vocabulary.
85
+ int subtoken_end = token.length();
86
+ while (subtoken_start < token.length()) {
87
+ absl::string_view subtoken =
88
+ token.substr(subtoken_start, subtoken_end - subtoken_start);
89
+ auto iter = vocab_.find(subtoken);
90
+ if (iter != vocab_.end()) {
91
+ ids->push_back(iter->second);
92
+ subtoken_start = subtoken_end;
93
+ // TODO(noam): again, set subtoken_end forward only enough to catch
94
+ // the longest subtoken in the vocabulary.
95
+ subtoken_end = token.length();
96
+ } else {
97
+ U8_BACK_1((const uint8_t*)token_s.data(), 0, subtoken_end);
98
+ if (subtoken_end <= subtoken_start) {
99
+ LOG(FATAL) << "Unencodable tokens found.";
100
+ }
101
+ }
102
+ }
103
+ }
104
+
105
+ std::string SubwordTextEncoder::EscapeToken(absl::string_view token) {
106
+ std::string token_s;
107
+ int i = 0;
108
+ do {
109
+ int prev = i;
110
+ UChar32 c;
111
+ U8_NEXT(token, i, token.length(), c);
112
+ CHECK_GE(c, 0);
113
+ if (c == '_') {
114
+ absl::StrAppend(&token_s, "\\u");
115
+ } else if (c == '\\') {
116
+ absl::StrAppend(&token_s, "\\\\");
117
+ } else if (c == '\n' || alphabet_.find(c) == alphabet_.end()) {
118
+ absl::StrAppend(&token_s, "\\", c, ";");
119
+ } else {
120
+ absl::StrAppend(&token_s, token.substr(prev, i - prev));
121
+ }
122
+ } while (i < token.length());
123
+ absl::StrAppend(&token_s, "_");
124
+ return token_s;
125
+ }
126
+
127
+ } // namespace tensor2tensor
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef TENSOR2TESNOR_DATA_GENERATORS_OPS_SUBWORD_TEXT_ENCODER_H_
2
+ #define TENSOR2TESNOR_DATA_GENERATORS_OPS_SUBWORD_TEXT_ENCODER_H_
3
+
4
+ #include "third_party/absl/container/flat_hash_map.h"
5
+ #include "third_party/absl/container/flat_hash_set.h"
6
+ #include "third_party/absl/strings/string_view.h"
7
+ #include "third_party/icu/include/unicode/uchar.h"
8
+ #include "third_party/tensorflow/core/framework/tensor.h"
9
+
10
+ namespace tensor2tensor {
11
+
12
+ // A subword text encoder with built in tokenizer.
13
+ //
14
+ // Equivalent to tensor2tensor's subword text
15
+ // https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/text_encoder.py,
16
+ // This code (or a suitable replacement) should eventually move into tfds
17
+ // and should be deleted from TensorFlow.nlp.transformer.
18
+
19
+ class SubwordTextEncoder {
20
+ public:
21
+ explicit SubwordTextEncoder(const std::string& vocab_filename);
22
+ virtual ~SubwordTextEncoder() {}
23
+
24
+ // Breaks up input text into subtokens.
25
+ void Encode(absl::string_view text, std::vector<int>* ids);
26
+
27
+ private:
28
+ // Given a full token as input, breaks the token up into subtokens and appends
29
+ // corresponding IDs to the ids vector.
30
+ void EncodeSubtokens(absl::string_view token, std::vector<int>* ids);
31
+
32
+ // Escapes a token so unencodable characters are replaced by escape sequences.
33
+ std::string EscapeToken(absl::string_view token);
34
+
35
+ // Maps subword tokens to IDs.
36
+ absl::flat_hash_map<std::string, int64> vocab_;
37
+ // A set containing all valid unicode code points that can be encoded without
38
+ // being escaped.
39
+ absl::flat_hash_set<UChar32> alphabet_;
40
+ };
41
+
42
+ } // namespace tensor2tensor
43
+
44
+ #endif // TENSOR2TESNOR_DATA_GENERATORS_OPS_SUBWORD_TEXT_ENCODER_H_
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder_ops.cc ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "third_party/py/tensor2tensor/data_generators/ops/subword_text_encoder.h"
2
+ #include "third_party/tensorflow/core/framework/op_kernel.h"
3
+ #include "third_party/tensorflow/core/framework/shape_inference.h"
4
+ #include "third_party/tensorflow/core/framework/tensor.h"
5
+ #include "third_party/tensorflow/core/framework/types.h"
6
+
7
+ namespace tensor2tensor {
8
+ namespace {
9
+
10
+ using ::tensorflow::DEVICE_CPU;
11
+ using ::tensorflow::OpKernel;
12
+ using ::tensorflow::OpKernelConstruction;
13
+ using ::tensorflow::OpKernelContext;
14
+ using ::tensorflow::Status;
15
+ using ::tensorflow::Tensor;
16
+ using ::tensorflow::TensorShape;
17
+ using ::tensorflow::tstring;
18
+ using ::tensorflow::shape_inference::InferenceContext;
19
+
20
+ REGISTER_OP("SubwordTextEncoderEncode")
21
+ .Input("s: string")
22
+ .Output("encoded: int64")
23
+ .Attr("vocab_filename: string")
24
+ .SetShapeFn([](InferenceContext* ctx) {
25
+ ctx->set_output(0, ctx->Vector(ctx->UnknownDim()));
26
+ return Status::OK();
27
+ });
28
+
29
+ class SubwordTextEncoderEncodeOp : public OpKernel {
30
+ public:
31
+ explicit SubwordTextEncoderEncodeOp(
32
+ OpKernelConstruction* ctx) : OpKernel(ctx) {
33
+ std::string vocab_filename;
34
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("vocab_filename", &vocab_filename));
35
+ encoder_ = absl::make_unique<SubwordTextEncoder>(vocab_filename);
36
+ }
37
+
38
+ void Compute(OpKernelContext* ctx) override {
39
+ // Get input string and deserialize into ArticleExample proto.
40
+ absl::string_view s = ctx->input(0).scalar<tstring>()();
41
+
42
+ // Construct encoded output tensors.
43
+ std::vector<int> encoded_ids;
44
+ encoder_->Encode(s, &encoded_ids);
45
+ Tensor* encoded;
46
+ OP_REQUIRES_OK(
47
+ ctx,
48
+ ctx->allocate_output(0, TensorShape(
49
+ {static_cast<int64>(encoded_ids.size())}), &encoded));
50
+ auto encoded_vec = encoded->vec<int64>();
51
+ // TODO(noam): find someone who remembers c++ eigen and ask the proper way
52
+ // to copy a std::Vector to an Eigen whatever-this-is
53
+ for (int i = 0; i < encoded_ids.size(); i++) {
54
+ encoded_vec(i) = encoded_ids[i];
55
+ }
56
+ }
57
+
58
+ private:
59
+ std::unique_ptr<SubwordTextEncoder> encoder_;
60
+ };
61
+
62
+ REGISTER_KERNEL_BUILDER(Name("SubwordTextEncoderEncode").Device(DEVICE_CPU),
63
+ SubwordTextEncoderEncodeOp);
64
+
65
+ } // namespace
66
+ } // namespace tensor2tensor
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder_ops_test.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Tensor2Tensor Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Tests for subword_text_encoder_ops."""
17
+
18
+ from __future__ import absolute_import
19
+ from __future__ import division
20
+ from __future__ import print_function
21
+
22
+ from TensorFlow.nlp.transformer.data_generators.ops import subword_text_encoder_ops
23
+ import tensorflow.compat.v1 as tf
24
+
25
+ vocab_file = (
26
+ "third_party/py/tensor2tensor/data_generators/ops/testdata/subwords")
27
+
28
+
29
+ class SubwordTextEncoderOpsTest(tf.test.TestCase):
30
+
31
+ def test_subword_text_encoder_encode(self):
32
+ s = "the quick brown fox jumps over the lazy dog"
33
+ encoded = subword_text_encoder_ops.subword_text_encoder_encode(
34
+ s, vocab_file)
35
+ self.assertAllEqual(encoded, [2, 3, 4, 5, 6, 7, 8, 9, 2, 11, 12, 1])
36
+
37
+
38
+ if __name__ == "__main__":
39
+ tf.enable_eager_execution()
40
+ tf.test.main()
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/subword_text_encoder_test.cc ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "third_party/py/tensor2tensor/data_generators/ops/subword_text_encoder.h"
2
+
3
+ #include "testing/base/public/gunit.h"
4
+ #include "third_party/tensorflow/core/framework/tensor.h"
5
+ #include "third_party/tensorflow/core/framework/tensor_testutil.h"
6
+
7
+ namespace tensor2tensor {
8
+ namespace {
9
+
10
+ TEST(SubwordTextEncoderTest, EncodesSubTokens) {
11
+ SubwordTextEncoder encoder("third_party/py/tensor2tensor/"
12
+ "data_generators/ops/testdata/subwords");
13
+ std::vector<int> t;
14
+ encoder.Encode("the quick brown fox jumps over the lazy dog", &t);
15
+ EXPECT_EQ(t, std::vector<int>({2, 3, 4, 5, 6, 7, 8, 9, 2, 11, 12, 1}));
16
+ }
17
+
18
+ TEST(SubwordTextEncoderTest, EncodesUnicodeSubTokens) {
19
+ SubwordTextEncoder encoder("third_party/py/tensor2tensor/"
20
+ "data_generators/ops/testdata/subwords");
21
+ std::vector<int> t;
22
+ encoder.Encode("ɧęĻĽÒ", &t);
23
+ EXPECT_EQ(t, std::vector<int>({13, 14, 1}));
24
+ }
25
+
26
+ TEST(SubwordTextEncoderTest, EncodesUnicodeCodePoints) {
27
+ SubwordTextEncoder encoder("third_party/py/tensor2tensor/"
28
+ "data_generators/ops/testdata/subwords");
29
+ std::vector<int> t;
30
+ encoder.Encode("⻦ ⻭", &t);
31
+ EXPECT_EQ(t, std::vector<int>({15, 18, 16, 17, 1}));
32
+ }
33
+
34
+ TEST(SubwordTextEncoderTest, EncodesCharactersNotInAlphabet) {
35
+ SubwordTextEncoder encoder("third_party/py/tensor2tensor/"
36
+ "data_generators/ops/testdata/subwords");
37
+ std::vector<int> t;
38
+ encoder.Encode("!", &t);
39
+ // Subtokens: '\', '3', '3', ';', '_', '<eos>', '<pad>'.
40
+ EXPECT_EQ(t, std::vector<int>({19, 23, 23, 30, 17, 1}));
41
+ }
42
+
43
+ } // namespace
44
+ } // namespace tensor2tensor
docker/bloom13b/Model-References/TensorFlow/nlp/transformer/data_generators/ops/testdata/subwords ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '<pad>'
2
+ '<eos>'
3
+ 'the_'
4
+ 'quick_'
5
+ 'brow'
6
+ 'n_'
7
+ 'fox_'
8
+ 'jump'
9
+ 's_'
10
+ 'over_'
11
+ 'the_'
12
+ 'lazy_'
13
+ 'dog_'
14
+ 'ɧę'
15
+ 'ĻĽÒ_'
16
+ '⻦'
17
+ '⻭'
18
+ '_'
19
+ ' '
20
+ '\'
21
+ '0'
22
+ '1'
23
+ '2'
24
+ '3'
25
+ '4'
26
+ '5'
27
+ '6'
28
+ '7'
29
+ '8'
30
+ '9'
31
+ ';'