applied-ai-018 commited on
Commit
feb6f45
·
verified ·
1 Parent(s): fcc8d8e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/README.md +164 -0
  2. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/accuracy_from_perf.config +8 -0
  3. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/functions.sh +93 -0
  4. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/.gitignore +5 -0
  5. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/DATASETS_MODELS.md +8 -0
  6. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/LICENSE +201 -0
  7. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/backend.py +314 -0
  8. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/backlog.py +56 -0
  9. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/calibration-list.txt +1000 -0
  10. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/configs/bf16.conf +9 -0
  11. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/configs/fp8-99.9.conf +9 -0
  12. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/configs/fp8-99.conf +9 -0
  13. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/dataset.py +90 -0
  14. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/habana_generation_utils.py +543 -0
  15. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/hgu_options.py +31 -0
  16. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/main.py +117 -0
  17. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/mlperf.conf +64 -0
  18. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/modeling_gptj.py +782 -0
  19. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/prepare-calibration.py +59 -0
  20. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/quantization/configuration/config.py +69 -0
  21. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/socket_worker.py +268 -0
  22. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/user.conf +11 -0
  23. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/prepare_and_check_submission.py +189 -0
  24. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/run_mlperf_scenarios.py +241 -0
  25. docker/bloom13b/Model-References/MLPERF3.1/Inference/code/scenarios.yaml +38 -0
  26. docker/bloom13b/Model-References/MLPERF3.1/Inference/measurements/gptj-99.9/Offline/HLS-Gaudi2-PT_PyTorch_Offline.json +7 -0
  27. docker/bloom13b/Model-References/MLPERF3.1/Inference/measurements/gptj-99.9/Server/HLS-Gaudi2-PT_PyTorch_Server.json +7 -0
  28. docker/bloom13b/Model-References/MLPERF3.1/Inference/measurements/gptj-99.9/Server/README.md +25 -0
  29. docker/bloom13b/Model-References/MLPERF3.1/Inference/systems/HLS-Gaudi2-PT.json +38 -0
  30. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/HLS-Gaudi2-TF/defaults.cfg +40 -0
  31. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/HLS-Gaudi2-TF/launch_bert_hvd.sh +611 -0
  32. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/HLS-Gaudi2-TF/run.sh +164 -0
  33. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/chop_hdf5_files.py +150 -0
  34. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/create_pretraining_data.py +455 -0
  35. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/create_pretraining_data_wrapper.sh +30 -0
  36. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/pick_eval_samples.py +83 -0
  37. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/pick_eval_samples_varlength.py +76 -0
  38. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/prepare_data.sh +154 -0
  39. docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/tokenization.py +413 -0
  40. docker/intel_code/.gitignore +3 -0
  41. docker/intel_code/README.md +2 -0
  42. docker/intel_code/llama13b/Megatron-DeepSpeed/CODEOWNERS +1 -0
  43. docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_gpt.py +385 -0
  44. docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_gpt_core.py +127 -0
  45. docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_retro.py +123 -0
  46. docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_t5.py +163 -0
  47. docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_vision_classify.py +105 -0
  48. docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_vision_dino.py +110 -0
  49. docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_vision_inpaint.py +140 -0
  50. docker/intel_code/llama13b/Megatron-DeepSpeed/setup.py +111 -0
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/README.md ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Habana MLPerf™ inference submission
2
+ This directory provides instructions to reproduce Habana's results for MLPerf™ inference submission.\
3
+ MLPerf™ is a trademark and service mark of MLCommons Association in the United States and other countries.\
4
+ All rights reserved. Unauthorized use is strictly prohibited.
5
+
6
+ - [Habana MLPerf™ inference submission](#habana-mlperf-inference-submission)
7
+ - [Setup](#setup)
8
+ - [Prepare MLPerf Directory](#prepare-mlperf-directory)
9
+ - [Build and Deploy HabanaLabs Container](#build-and-deploy-habanalabs-container)
10
+ - [Download Checkpoint](#download-checkpoint)
11
+ - [Download Dataset](#download-dataset)
12
+ - [Reproduce Results](#reproduce-results)
13
+ - [99 and 99.9 Accuracy](#99-and-999-accuracy)
14
+ - [Get Started](#get-started)
15
+ - [Generate Results](#generate-results)
16
+ - [Performance Optimization with FP8 Flow](#performance-optimization-with-fp8-flow)
17
+ - [Environment Variables](#environment-variables)
18
+ - [Supported Configurations](#supported-configurations)
19
+ - [Changelog](#changelog)
20
+
21
+ ## Setup
22
+
23
+ Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) to set up the environment.
24
+
25
+ ### Prepare MLPerf Directory
26
+
27
+ Perform the following:
28
+
29
+ 1. Follow the instructions provided in the [Gaudi Installation
30
+ Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) to set up the
31
+ environment including the `$PYTHON` environment variable.
32
+ The guide will walk you through the process of setting up your system to run the benchmarks on Gaudi.
33
+
34
+ 2. Clone Model-References repository and switch to the branch that matches your SynapseAI version. You can run the
35
+ [`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options)
36
+ utility to determine the SynapseAI version.
37
+
38
+ ```bash
39
+ export MLPERF_ROOT=/path/to/mlperf/root
40
+ cd $MLPERF_ROOT
41
+ git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References
42
+ export MLPERF_DIR=$MLPERF_ROOT/Model-References/MLPERF3.1/Inference
43
+ ```
44
+
45
+ ### Build and Deploy HabanaLabs Container
46
+
47
+ To build MLPerf inference 3.1 container, perform the following:
48
+
49
+ 1. Set the environment variables for the docker command.
50
+ * To find a docker image, go to [gaudi-docker](https://vault.habana.ai/ui/repos/tree/General/gaudi-docker).
51
+ * Open gaudi-docker directory, and select the folder that matches the SynapseAI version (determined by running [`hl-smi`](https://docs.habana.ai/en/latest/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options)).
52
+ * Navigate to subdirectories, choose system and framework version.
53
+ * Choose the docker build version. Most often 'latest' will be used.
54
+ * Navigate to "Docker Info" tab and note "Title" string.
55
+ * Set `DOCKER_IMAGE` to "Title" string with `vault.habana.ai/gaudi-docker/` prefix. See the examples below.
56
+ * Example on PyTorch Container:
57
+ ```bash
58
+ # NOTE: The below is only an example value. Replace [SynapseAI version] and [PT version] to match your setup and Supported Configuration.
59
+ export DOCKER_IMAGE=vault.habana.ai/gaudi-docker/[SynapseAI version]/ubuntu20.04/habanalabs/pytorch-installer-[PT Version]:latest
60
+ ```
61
+
62
+
63
+ 2. Create `mlperf-habana container` by running the following command.
64
+
65
+ ```bash
66
+ docker run --privileged --security-opt seccomp=unconfined \
67
+ --name mlperf-habana -td \
68
+ -v /dev:/dev \
69
+ --device=/dev:/dev \
70
+ -v /sys/kernel/debug:/sys/kernel/debug \
71
+ -v /tmp:/tmp \
72
+ -v $MLPERF_DIR:/root/Habana/ \
73
+ --cap-add=sys_nice --cap-add=SYS_PTRACE \
74
+ --user root --workdir=/root --net=host \
75
+ --ulimit memlock=-1:-1 ${DOCKER_IMAGE}
76
+ ```
77
+
78
+ 3. Start the docker.
79
+ ```bash
80
+ docker exec -it mlperf-habana bash
81
+ ```
82
+
83
+ ### Download Checkpoint
84
+ ```bash
85
+ mkdir -p /mnt/weka/data/pytorch/
86
+ pushd /mnt/weka/data/pytorch/
87
+ wget https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download --output-document checkpoint.zip
88
+ unzip -q checkpoint.zip && rm checkpoint.zip
89
+ popd
90
+ ```
91
+
92
+ ### Download Dataset
93
+ ```bash
94
+ pushd /root/Habana/code/gptj-99.9/gpt-j
95
+ python download_cnndm.py
96
+ cp data/cnn_eval.json /mnt/weka/data/pytorch/gpt-j/cnn_eval.json
97
+ popd
98
+ ```
99
+
100
+ ## Reproduce Results
101
+ ### 99 and 99.9 Accuracy
102
+ The same script was submitted for both 99 and 99.9 benchmarks - no additional improvements were made for low accuracy (99), and 99.9 results were used for 99 as well.
103
+
104
+ ### Get Started
105
+ Install the requirements and build the latest loadgen.
106
+
107
+ ```bash
108
+ cd /root/Habana/code
109
+ source functions.sh
110
+ build_mlperf_inference
111
+ ```
112
+ ### Generate Results
113
+ **To generate full submission results, run the following command:**
114
+ ```bash
115
+ build_mlperf_inference --output-dir <path_to_output_dir> --submission gptj-99.9-fp8
116
+ ```
117
+ The command produces results from accuracy and performance runs for both Offline and Server scenarios.
118
+ Logs can be found under /output_dir/logs/model/, e.g. /results/logs/gptj-99.9-fp8/
119
+
120
+
121
+ **To generate results for Offline and Server scenarios separately, run the following commands:**
122
+ ```bash
123
+ build_mlperf_inference --output-dir <path_to_output_dir> --submission gptj-99.9-fp8_Offline
124
+ ```
125
+
126
+ ```bash
127
+ build_mlperf_inference --output-dir <path_to_output_dir> --submission gptj-99.9-fp8_Server
128
+ ```
129
+ Logs can be found under /output_dir/logs/model/scenario/, e.g. /results/logs/gptj-99.9-fp8/Offline/
130
+
131
+ **To generate results for accuracy and performance separately, add ```--mode``` flag as in one of the following commands:**
132
+ ```bash
133
+ build_mlperf_inference --output-dir <path_to_output_dir> --submission gptj-99.9-fp8_Server --mode acc
134
+ ```
135
+ ```bash
136
+ build_mlperf_inference --output-dir <path_to_output_dir> --submission gptj-99.9-fp8_Offline --mode perf
137
+ ```
138
+
139
+ Logs can be found under /output_dir/logs/model/scenario/mode/, e.g. /results/logs/gptj-99.9-fp8/Offline/accuracy/
140
+
141
+ ## Performance Optimization with FP8 Flow
142
+ To optimize performance, we set heavy-performance ops to operate in fp8-143.
143
+
144
+ All fp8 ops are working with a fixed fp8 exponent bias = 7 and no scaling is required.
145
+
146
+ ### Environment Variables
147
+ The following outlines custom ENV variables used in the GPT-J submission script:
148
+
149
+ | Enviroment Variable | Effect |
150
+ |------------------------------------------------------------------------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
151
+ | PT_USE_FP8_143=1 | Sets PT backend fp8 flavor to fp8_143 |
152
+ | UPDATE_MME_OUTPUT_PRECISION_FILTER="v_proj,matmul_av" | Allows the specified MME layer to output fp8 for performance optimization. |
153
+ | SCALES_FILE_PATH=quantization/measurements/per_tensor_scales_gpt_j.json | Loads per-tensor scales required for fp8 quantization. If not provided, no scaling is applied. |
154
+ | ENABLE_EXPERIMENTAL_FLAGS=true | Enables the above flags |
155
+
156
+ ## Supported Configurations
157
+
158
+ | Validated on | SynapseAI Version | Framework Version(s) | Mode |
159
+ | :----------: | :---------------: | :------------------: | :------: |
160
+ | Gaudi2 | 1.14.0 | PyTorch 2.1.1 | Inference |
161
+
162
+ ## Changelog
163
+ ### 1.13.0
164
+ - Published MLPerf™ inference 3.1 GPT-J script
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/accuracy_from_perf.config ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # The format of this config file is 'key = value'.
2
+ # The key has the format 'model.scenario.key'. Value is mostly int64_t.
3
+ # Model maybe '*' as wildcard. In that case the value applies to all models.
4
+ # All times are in milli seconds
5
+
6
+ # mode dictionary (0 = submission, 1 = accuracy, 2 = performance, 3 = find peak perf)
7
+ *.*.mode = 2
8
+ *.*.accuracy_log_sampling_target = 24576
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/functions.sh ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ ###############################################################################
4
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
5
+ ###############################################################################
6
+
7
+ [[ $0 != $BASH_SOURCE ]] || echo "This script must be sourced!"
8
+
9
+ export MLPERF_INFERENCE_CODE_DIR=$(realpath $(dirname $BASH_SOURCE) )
10
+
11
+ function mlperf_inference_usage()
12
+ {
13
+ echo -e "\n usage: build_mlperf_inference [options]\n"
14
+ echo -e "options:\n"
15
+ echo -e " --output-dir Path to save logs, results and summary; optional"
16
+ echo -e " --skip-reqs Skip installing requirements, downloading MLCommons Inference and building loadgen; optional"
17
+ echo -e " --compliance Create a submission package compliant with MLCommons submission checker; optional"
18
+ echo -e " --submission List of scenarios to run; optional"
19
+ echo -e " -h, --help Prints this help"
20
+
21
+ }
22
+
23
+ build_mlperf_inference()
24
+ {
25
+ output_dir=$(pwd)/results
26
+ submission_args=""
27
+ compliance=false
28
+ skip_reqs=false
29
+
30
+ while [ -n "$1" ];
31
+ do
32
+ case $1 in
33
+
34
+ -h | --help )
35
+ mlperf_inference_usage
36
+ return 0
37
+ ;;
38
+ --output-dir )
39
+ output_dir=$2
40
+ shift 2
41
+ ;;
42
+ --compliance )
43
+ compliance=true
44
+ shift 1
45
+ ;;
46
+ --skip-reqs )
47
+ shift
48
+ skip_reqs=true
49
+ ;;
50
+ --submission )
51
+ shift
52
+ submission_args=$@
53
+ break
54
+ ;;
55
+ --precommit )
56
+ shift
57
+ submission_args="gptj-99-quick"
58
+ break
59
+ ;;
60
+ --promotion )
61
+ shift
62
+ submission_args="gptj-99-quick"
63
+ break
64
+ ;;
65
+ esac
66
+ done
67
+
68
+ if [ "$skip_reqs" == "false" ]; then
69
+ pip install -r $MLPERF_INFERENCE_CODE_DIR/gpt-j/requirements.txt
70
+
71
+ BUILD_DIR=$(mktemp -d -t mlperf.XXXX)
72
+ pushd $BUILD_DIR
73
+ git clone --depth 1 --recurse-submodules https://github.com/mlcommons/inference.git mlcommons_inference
74
+ cd mlcommons_inference/loadgen
75
+ CFLAGS="-std=c++14 -O3" python setup.py bdist_wheel
76
+ cd ..; pip install --force-reinstall loadgen/dist/`ls -r loadgen/dist/ | head -n1` ; cd -
77
+ popd
78
+ fi
79
+
80
+ if [ ! -z "$submission_args" ]; then
81
+ pushd $MLPERF_INFERENCE_CODE_DIR
82
+ if [ "$compliance" == "true" ]; then
83
+ python run_mlperf_scenarios.py $submission_args --output-dir $output_dir --mlperf-path $BUILD_DIR/mlcommons_inference
84
+ python prepare_and_check_submission.py $submission_args --output-dir $output_dir --mlperf-path $BUILD_DIR/mlcommons_inference --systems-dir-path $MLPERF_INFERENCE_CODE_DIR/../systems --measurements-dir-path $MLPERF_INFERENCE_CODE_DIR/../measurements
85
+ else
86
+ python run_mlperf_scenarios.py $submission_args --output-dir $output_dir
87
+ fi
88
+ popd
89
+
90
+ fi
91
+
92
+ rm -rf $BUILD_DIR
93
+ }
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ prof_*
2
+ .graph_dumps/*
3
+ __pycache__/*
4
+ build/*
5
+ data/*
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/DATASETS_MODELS.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Datasets
2
+
3
+ This is a comprehensive list of public datasets and models used by this repository.
4
+
5
+ | Name (Link/Source) | Framework | Use Case |
6
+ |--------------------| --------- | -------- |
7
+ | [cnn_dailymail (Hugging Face)](https://huggingface.co/datasets/cnn_dailymail) | PyTorch | Text Summarization |
8
+ | [gpt-j-6b (Hugging Face)](https://huggingface.co/EleutherAI/gpt-j-6b) | PyTorch | Text Summarization |
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/backend.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+
5
+ import time
6
+ import math
7
+ import array
8
+ import statistics
9
+ import torch
10
+ from contextlib import contextmanager
11
+ from transformers import AutoModelForCausalLM, AutoTokenizer
12
+ import mlperf_loadgen as lg
13
+
14
+ from dataset import Dataset
15
+ import habana_generation_utils as hgu
16
+ import modeling_gptj as hpu_modeling_gptj
17
+ import quantization.quantize as quantize
18
+ from torch.utils.tensorboard import SummaryWriter
19
+
20
+
21
+ gen_kwargs = {
22
+ "max_new_tokens": 128,
23
+ "min_new_tokens": 30,
24
+ }
25
+
26
+
27
+ def setup_pt_profiler(schedule):
28
+ activities = [torch.profiler.ProfilerActivity.CPU]
29
+ activities.extend([torch.profiler.ProfilerActivity.HPU])
30
+
31
+ profiler = torch.profiler.profile(
32
+ schedule=schedule,
33
+ activities=activities,
34
+ on_trace_ready=torch.profiler.tensorboard_trace_handler('.', use_gzip=True),
35
+ record_shapes=True,
36
+ with_stack=True)
37
+ return profiler
38
+
39
+
40
+ def setup_hltv_profiler(schedule):
41
+ import sys
42
+ import os
43
+ sys.path.append(os.environ['PYTORCH_MODULES_ROOT_PATH'])
44
+ from topologies.tools import SynapseProfilerApi, TraceType
45
+ api = SynapseProfilerApi()
46
+
47
+ class SynapseProfiler:
48
+ def check(self):
49
+ if schedule(self.cur_step) == torch.profiler.ProfilerAction.RECORD_AND_SAVE:
50
+ api.profiler_start(TraceType.TraceAll, 0)
51
+
52
+ def start(self):
53
+ self.cur_step = 0
54
+ self.check()
55
+
56
+ def step(self):
57
+ self.cur_step = self.cur_step + 1
58
+ self.check()
59
+
60
+ def stop(self):
61
+ api.profiler_stop(TraceType.TraceAll, 0)
62
+ api.profiler_get_trace_json(TraceType.TraceAll, 0)
63
+
64
+ return SynapseProfiler()
65
+
66
+
67
+ def setup_profiler(step, profile_type):
68
+ active = 1
69
+ warmup = 1 if step > 0 else 0
70
+ wait = max(step - warmup, 0)
71
+
72
+ schedule = torch.profiler.schedule(wait=wait, warmup=warmup, active=active, repeat=1)
73
+
74
+ if profile_type == 'tb':
75
+ return setup_pt_profiler(schedule)
76
+ else:
77
+ return setup_hltv_profiler(schedule)
78
+
79
+
80
+ class SUT_base():
81
+ def __init__(self, args, options):
82
+ print("Loading PyTorch model...")
83
+ self.dataset_path = args.dataset_path
84
+ self.model_path = args.model_path
85
+ self.batch_size = args.batch_size
86
+ self.input_length = 1919
87
+ self.max_length = self.input_length + gen_kwargs['max_new_tokens'] + 1
88
+ self.profile = args.profile
89
+ self.profile_type = args.profile_type
90
+ self.inference_times = []
91
+ self.tb_writer = SummaryWriter() if args.enable_tensorboard_logging else None
92
+ self.is_eager = args.eager
93
+
94
+ gen_kwargs["num_beams"] = options["num_beams"]
95
+ gen_kwargs["early_stopping"] = options["early_stopping"]
96
+
97
+ if args.device == "cuda":
98
+ assert torch.cuda.is_available(), "CUDA device is not available!"
99
+ elif args.device == "hpu":
100
+ import habana_frameworks.torch.core
101
+ assert torch.hpu.is_available(), "HPU device is not available!"
102
+ self.device = torch.device(args.device)
103
+
104
+ self.model = self.setup_model(args)
105
+
106
+ self.hgu_opts = hgu.GenerationOptions(
107
+ max_length=self.max_length,
108
+ min_length=self.input_length+gen_kwargs['min_new_tokens'],
109
+ max_input_length=self.max_length,
110
+ **options,
111
+ )
112
+ if self.profile:
113
+ self.hgu_opts.max_iterations = args.profile_tokens
114
+ if args.dtype == "float8":
115
+ self.hgu_opts.kv_cache_fp8 = True
116
+
117
+ self.tokenizer = AutoTokenizer.from_pretrained(
118
+ self.model_path,
119
+ model_max_length=self.max_length,
120
+ padding_side="left",
121
+ use_fast=True,)
122
+ self.tokenizer.pad_token = self.tokenizer.eos_token
123
+
124
+ self.data_object = Dataset(
125
+ self.model_path, self.dataset_path, total_count_override=args.max_examples)
126
+ self.qsl = lg.ConstructQSL(self.data_object.count, self.data_object.perf_count,
127
+ self.data_object.LoadSamplesToRam, self.data_object.UnloadSamplesFromRam)
128
+
129
+ def setup_model(self, args):
130
+ if self.device.type == "hpu":
131
+ model = hpu_modeling_gptj.GPTJForCausalLM.from_pretrained(
132
+ self.model_path,
133
+ low_cpu_mem_usage=True,
134
+ torch_dtype=torch.bfloat16
135
+ )
136
+ else:
137
+ is_gpu = self.device.type == "cuda"
138
+ model = AutoModelForCausalLM.from_pretrained(
139
+ self.model_path,
140
+ device_map="auto" if not is_gpu else None,
141
+ low_cpu_mem_usage=True if not is_gpu else False,
142
+ torch_dtype=torch.bfloat16
143
+ )
144
+
145
+ if model.config.pad_token_id is None:
146
+ model.config.pad_token_id = model.config.eos_token_id
147
+ model.to(torch.bfloat16)
148
+ model.to(self.device)
149
+
150
+ if self.device.type == "hpu":
151
+ if not self.is_eager:
152
+ import habana_frameworks.torch.hpu.graphs as htgraphs
153
+ model = htgraphs.wrap_in_hpu_graph(model)
154
+ if args.quantization_file:
155
+ model = quantize.setup_quantization(model, args.quantization_file)
156
+ return model
157
+
158
+ def warmup(self):
159
+ print("Warming up...")
160
+ dummy_tensor = torch.ones([self.batch_size, self.input_length], dtype=torch.int64)
161
+ input_batch = {
162
+ "input_ids": dummy_tensor, "attention_mask": dummy_tensor.detach().clone()
163
+ }
164
+ input_batch, _, _ = hgu.prepare_decoder_only_input_without_moving(
165
+ self.tokenizer.pad_token_id, self.hgu_opts, input_batch)
166
+
167
+ t_start = time.time()
168
+ _ = self.inference_call(input_batch).cpu().numpy()
169
+ t_end = time.time()
170
+ print("Warmup took {:.2f} ms".format((t_end-t_start)*1000))
171
+
172
+ def issue_queries(self, query_samples):
173
+ num_samples = len(query_samples)
174
+ batches = math.ceil(num_samples / self.batch_size)
175
+ print("Number of Samples in query_samples : ", num_samples)
176
+
177
+ profiler = None
178
+ if self.profile:
179
+ profiler = setup_profiler(batches - 1, self.profile_type)
180
+ profiler.start()
181
+ for batch_id in range(batches):
182
+ start_index = batch_id * self.batch_size
183
+ batch_size = min(num_samples - start_index, self.batch_size)
184
+
185
+ input_batch = self.prepare_input_batch(query_samples, start_index, batch_size)
186
+ input_batch, _, _ = hgu.prepare_decoder_only_input_without_moving(
187
+ self.tokenizer.pad_token_id, self.hgu_opts, input_batch)
188
+
189
+ with self.measure_and_save_time(batch_id):
190
+ output_batch = self.inference_call(input_batch).cpu().numpy()
191
+ if profiler:
192
+ profiler.step()
193
+
194
+ self.send_responses(query_samples, start_index, batch_size, output_batch)
195
+ if profiler:
196
+ profiler.stop()
197
+
198
+ def prepare_input_batch(self, query_samples, start_index, batch_size):
199
+ indices = [
200
+ query_samples[start_index + j].index for j in range(batch_size)
201
+ ]
202
+ while len(indices) < self.batch_size:
203
+ indices.append(indices[0])
204
+
205
+ input_ids = [
206
+ self.data_object.source_encoded_input_ids[index] for index in indices
207
+ ]
208
+ attention_masks = [
209
+ self.data_object.source_encoded_attn_masks[index] for index in indices
210
+ ]
211
+ return {
212
+ "input_ids": torch.cat(input_ids), "attention_mask": torch.cat(attention_masks)
213
+ }
214
+
215
+ @contextmanager
216
+ def measure_and_save_time(self, batch_id):
217
+ t_start = time.time()
218
+ yield
219
+ t_end = time.time()
220
+ time_taken = t_end - t_start
221
+ if self.tb_writer:
222
+ self.tb_writer.add_scalar('batch_time [seconds]', time_taken, batch_id)
223
+ print("Batch {} : {:.2f} ms".format(batch_id, (time_taken)*1000))
224
+ self.inference_times.append(time_taken)
225
+
226
+ def inference_call(self, input_batch):
227
+ with torch.inference_mode():
228
+ input_batch_lengths = [x.shape[0] for x in input_batch["input_ids"]]
229
+
230
+ if self.device.type == "hpu":
231
+ initial_ids, beam_trace = hgu.generate_on_prepared_input(
232
+ self.model, self.hgu_opts, input_batch, self.max_length, self.input_length)
233
+ output_batch = hgu.finalize_beams(
234
+ initial_ids, beam_trace, self.model.config, self.hgu_opts.length_penalty)
235
+ else:
236
+ output_batch = self.model.generate(
237
+ **input_batch, **gen_kwargs, pad_token_id=self.tokenizer.eos_token_id)
238
+
239
+ output_batch_truncated = []
240
+ for data, source_len in zip(output_batch, input_batch_lengths):
241
+ output_batch_truncated.append(data[source_len:])
242
+ output_batch_truncated = torch.stack(output_batch_truncated)
243
+ return output_batch_truncated
244
+
245
+ def send_responses(self, query_samples, start_index, batch_size, output_batch):
246
+ responses_array = [
247
+ array.array("B", output_batch[i].tobytes()) for i in range(batch_size)
248
+ ]
249
+ bi = [
250
+ response_array.buffer_info() for response_array in responses_array
251
+ ]
252
+ lg.QuerySamplesComplete([
253
+ lg.QuerySampleResponse(
254
+ query_samples[start_index + j].id, bi[j][0], bi[j][1]
255
+ ) for j in range(batch_size)
256
+ ])
257
+
258
+ def flush_queries(self):
259
+ pass
260
+
261
+ def close_log_file(self):
262
+ pass
263
+
264
+ def __del__(self):
265
+ if self.inference_times:
266
+ mean = statistics.fmean(self.inference_times)
267
+ print(f"Average performance: {self.batch_size / mean:.3f} samples/s")
268
+
269
+ if self.device.type == "hpu":
270
+ from habana_frameworks.torch.hpu.memory import memory_stats
271
+ GB = 1024**3
272
+ memory_stats_dict = memory_stats(self.device)
273
+ max_in_use = memory_stats_dict['MaxInUse'] / GB
274
+ limit = memory_stats_dict['Limit'] / GB
275
+ print(
276
+ "HPU memory usage: {:.1f} GB / {:.1f} GB ({:.0f}%)".format(
277
+ max_in_use, limit, max_in_use / limit * 100.0
278
+ )
279
+ )
280
+ print("Finished destroying SUT.")
281
+
282
+
283
+ class SUT_Offline(SUT_base):
284
+ def __init__(self, args, options):
285
+ SUT_base.__init__(self, args, options)
286
+ self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries)
287
+ self.warmup()
288
+ '''IssueQuery and inference methods implemented in Base class'''
289
+
290
+
291
+ class SUT_Server(SUT_base):
292
+ def __init__(self, args, options):
293
+ SUT_base.__init__(self, args, options)
294
+ self.batch_size = 1 # batching is not supported currently in Server mode
295
+ self.total_samples_done = 0
296
+ self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries)
297
+ self.warmup()
298
+
299
+ def issue_queries(self, query_samples):
300
+ input_batch = self.prepare_input_batch(query_samples, start_index=0, batch_size=1)
301
+ input_batch, _, _ = hgu.prepare_decoder_only_input_without_moving(
302
+ self.tokenizer.pad_token_id, self.hgu_opts, input_batch)
303
+
304
+ t_start = time.time()
305
+ output_batch = self.inference_call(input_batch).cpu().numpy()
306
+ t_end = time.time()
307
+ print("Sample time : {:.2f} ms".format((t_end-t_start)*1000))
308
+
309
+ self.send_responses(
310
+ query_samples, start_index=0, batch_size=1, output_batch=output_batch)
311
+
312
+ self.total_samples_done += 1
313
+ if self.total_samples_done % 5 == 0:
314
+ print("Completed : ", self.total_samples_done)
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/backlog.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+
5
+ import bisect
6
+ import itertools
7
+ import time
8
+
9
+
10
+ class Backlog:
11
+ def __init__(self, buckets, key_fn):
12
+ self.buckets = buckets
13
+ self.todo = [[] for b in buckets]
14
+ self.key_fn = key_fn
15
+
16
+ def find_bucket(self, key):
17
+ key_tuple = (key,0)
18
+ return bisect.bisect_left(self.buckets, key_tuple)
19
+
20
+ def add(self, queries):
21
+ for q in sorted(queries, key=self.key_fn, reverse=True):
22
+ self.todo[self.find_bucket(self.key_fn(q))].append((q, time.time()))
23
+
24
+ def next(self, max_size):
25
+ starting_bucket = self.find_bucket(max_size)
26
+ for bidx in range(starting_bucket, -1, -1):
27
+ while len(self.todo[bidx]) > 0:
28
+ yield self.todo[bidx].pop(0)
29
+
30
+ def next_n(self, max_size, n):
31
+ return list(itertools.islice(self.next(max_size), n))
32
+
33
+ def __len__(self):
34
+ return sum(len(b) for b in self.todo)
35
+
36
+ def get_load(self):
37
+ return [(b[0], len(t)) for b, t in zip(self.buckets, self.todo)]
38
+
39
+ def get_max_wait_time_from_bucket(self, bucket_size):
40
+ bucket_idx = self.find_bucket(bucket_size)
41
+ if len(self.todo[bucket_idx]) == 0:
42
+ return 0.0
43
+ return time.time() - self.todo[bucket_idx][0][-1]
44
+
45
+ if __name__ == '__main__':
46
+ import random
47
+ buckets = [256, 512, 768]
48
+ queries = [(random.choice(['A', 'B', 'C']), random.randrange(buckets[-1])) for _ in range(16)]
49
+
50
+ backlog = Backlog(buckets, lambda q: q[1])
51
+
52
+ backlog.add(queries)
53
+ print(backlog.todo)
54
+ print(768, backlog.next_n(768, 3))
55
+ print(256, backlog.next_n(256, 16))
56
+ print(backlog.todo)
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/calibration-list.txt ADDED
@@ -0,0 +1,1000 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ eceaa658027ad9625f832368198e11bd2fa38977
2
+ 70052e55c12c97a9bf6796a25b6ced8f3ec4be06
3
+ 9767fdf0a53da6ee9e2f75582cac5964d80e1b5d
4
+ 1f8c736647d06c42beb553b25a02e44ca15ca0fb
5
+ d3ce7d615ecc15f094d8130654812ad77cd604a3
6
+ 55086c3f69cb41b991d3db0c6b10b0aa374788b4
7
+ 2745f93afca3edf25dd9ccfd094eef06298f62cb
8
+ 343644770a597a2dfa7548ba165fa9c6bdc88245
9
+ e2cecb8734918ac6a2d9cc8afcfafb16b1781ae2
10
+ feba32aa9aa3b51fb451bc48a54e78d02efe977f
11
+ 9c2e4d2f6085ef9f237e6fe1baf83000a264cf93
12
+ d85158494b7041560466f153c4d050362f90a7e6
13
+ 1e14852c49e84434ca249951e0fe603610eb68f6
14
+ 369d721d1102f0cad726ad3426d79c965a224b28
15
+ b9898d6014353a7411c0cec222996431c832c35f
16
+ 7cbe104b3203061bb544267879fa316436a1ab5f
17
+ f48a6b4fa0827b4c6324bd47dc2e8954141b1a6a
18
+ acb5ce76c230bc66633414678bf254387c3d6c85
19
+ d70d5115ec3adc5fb3aee8b1e29c7f0f2db083be
20
+ ffbe89f592457d39ab9c28de4fd89fbac2150f81
21
+ d841808ba87a4aabbfe4427c53eb0e2e8a74995c
22
+ 2d4125c6162f9b4924a262a55bd8fe3faad0b3c7
23
+ 95fbe3b3a7e5fb6fa48289885df25d9a6e911d2d
24
+ f6ffa98e7d46000bee325e5284a2ac897ba4149d
25
+ 31e424f7a6fe1f5ec61486eec17a85c78ca2ce8c
26
+ 2165fd490b9d14ce0cd3784beb2f4a1d10028a1d
27
+ 4d6b1a85d264397e956c0a03f70de123ed4fff5f
28
+ 4d20111e71a06547613e161292a51cc44eb74da0
29
+ b90b35dfde9fc08fe1651e36bc02a3f1503e3b6e
30
+ 2d3b2eb21a6691c764aaa1f27030453fc44331ab
31
+ dbf02196bae40e0adbcd1790294748a560b1e45c
32
+ 0ef5c715acd7a70f51a9d800c9e01bfe69657bed
33
+ f0f65f40fc56b69bbfab5d88a157dc52ad967484
34
+ db3575fd124f65a7aeee7b4512b5e0fbebf2c8ea
35
+ 1234fafb7b6ecc9224d6d979536115771467f4ae
36
+ c31f79babaf4d93609ef3ee2966e25428a4fc130
37
+ 600b619001a9d840f5bb5ee3362787ee38df26fd
38
+ 5e68842017bc6f5806d962ae5ddea8490d763b82
39
+ fa87334256af163f8f56d8035d1be4c7909147e9
40
+ 826f2414623f8e444725f1c9af593f27b440ebdc
41
+ 3603896c0fbb49d6231ced092ff7d057db2c43f1
42
+ 4b8505e0219b78f9645fb12177d97b8e29821ee5
43
+ 3332226f8b4f6c46ed3c66ad0765c3720475254f
44
+ 97223b7119e264a598afe67e0ba82fbb97fedd2b
45
+ 87fd2fd13729ba13920591bcc96a83ddf61625e0
46
+ 2160c5d812611becf442b5079a7908c2f48f6de7
47
+ 559d3b10273acbd4b98ff5557aee92f33e50129d
48
+ 273c1d2936592874fb599f39dce8d6a0813a49b3
49
+ 3e6affd8cc6ead338996574fe1d0cb03ca983ea2
50
+ 3b733db6e80a89bb4062c1e661b9f9d4690ea0c8
51
+ 4a0f219f4b67d9cda499b71e3ee54bff5736f8c1
52
+ 064396600b73dc4418ef783fc82f4fe1ff038f6d
53
+ eee1cbcef13cd16096734414da992aa1a7048bee
54
+ e190b373844d5d3a59b9abb867de8f0fdffddeda
55
+ 8700aeab383d481d613759f19717ea4381df1433
56
+ 087b05a98112135d9fb1c7628fffb72ae2456e9e
57
+ f69c5a3c9ef4bfb195ad7ce2590021121a7afced
58
+ 82958d258a7fe963f8c9a001010365437bf15fc2
59
+ b6b37b9bc60519fd214d6e980fcbb16da066eb68
60
+ a49ac163d47c1e690f5d3237433194a9d0ab558a
61
+ aa35fa6f613b29bf80806552b1bf260f04bbedc2
62
+ c248fc3e54b451a4117f23f5adc88cb8067be3aa
63
+ f21eae7e796721088234b885bc6eae0daef05738
64
+ b5c4d6f671adfb997abb53a5f2f54519180df7b5
65
+ 457b2ab2b4edb94c4b67c1219451be80dc369e8b
66
+ e80b0028e44685e39581ced42c1e1ed9cf44f74e
67
+ c2d90734f9228cf3163187ad72405c90bb09d13b
68
+ a999f5732f9bbe0991e6e895f9bfd42bdda75bf1
69
+ cea6ac133923e62b186aeb17db37be6640a81200
70
+ 7facc85e37ababb8c029257f246fe0934f84a808
71
+ 21dcf444d4bec9d4b2a6ffb54112c4cbb797025f
72
+ f880779ad1b262aac438a8cf3a6df9c0ecebdada
73
+ 7313410020b93dea1f223d2ffc0b997385d7886c
74
+ b0b2948eac6b4e082bbd420da8dff3de6a187377
75
+ 360b51c738cde3fa09cef18c3d7672a1d20d3379
76
+ cdaa77a96e1d96a672548b6dc0bd83bffe6f1619
77
+ cd2cb113b1cd90e2ad235466df3a64dfc956877a
78
+ 7140dd21ed480a3f47a59c647c1f4e690939caf0
79
+ f2c9d3d8f0622e67574f386197b92570730fa61c
80
+ 010cd75a5b587285f7697cdb6db6526bcc0320b2
81
+ 86983b2cddcc91369ad7d4ff61c9e6d258c78b71
82
+ bad5a939cc0d695a97e7f3fedac53c93f04c3253
83
+ 0b8cb37a8d54e1761b3d99b8a6e6f921f07e00ae
84
+ 613111ac2a3831a7291656bee2def306453552d4
85
+ 0e29e9cf2a08c4b35ba840bde03b0537e3821a74
86
+ 2f425306562bf94bf5f4567b8c63c5b204a2c414
87
+ 8bdc6411954d7163b137c4970f6d6431aeeb9ee6
88
+ be9ef18a4a0a08f94a0340b2df0d0f83144299b0
89
+ cafdb2911d9af6038659354058586d6bd5174338
90
+ 0c14f1a16ad395dd9aff4437452cf555ae8858d2
91
+ c3db5ce828128fc91c5bbf59d144e825f49ad271
92
+ 273221df42c8dd9e0296cf3ec203c59fa205ecd2
93
+ 5abe61623d56bbb0d7bbc79f5ffa96732b3a1d97
94
+ 98d6f13e4f3ed36591298430dcb49bddf89003b5
95
+ 18686c211cc5f466c48fb8dfaf5eaa00cf3ef0ee
96
+ d85b6f625e9e57efaa1e3c21e626dd25e3414758
97
+ 69e2ec5703156e86990ba690b2a9d4dd0f5733b0
98
+ cc665a9316a9b22400183776d396940ffeea2fde
99
+ 319a27f92609641ee0f0fe1f6a88a9aab739b98a
100
+ 94817c53b53b3979c3d32d197ec26dc489921e6b
101
+ c803d0b957abefb26e1fe0ad87aea5fc80180a20
102
+ 6c5b9c68e7890f4b683e148ac62dace194b45b59
103
+ b377a0f9dac858e2bf7271bac531591140a56e33
104
+ 6604ac30dd3d9a29faf40def3e4549feab4d9d02
105
+ b7fc4ea36690353003cd172e597c456534bb2811
106
+ b42286dd9b577bcc261e838045e51133266f7fce
107
+ 73f9e1772ab372f36898dc95ef18711434827717
108
+ 838a481841bf69bbe3ac8a3b53da54d692ffd084
109
+ 0a1c17d84f846eecce39356447e4d556ffdd07fa
110
+ 98f8a09b98daa05cd412e83d724e4b914e8d921f
111
+ f859c4b5c39653753b21c575a98d525fc47c16bf
112
+ 2abf786570b9f9de945c70d9678cb67dd2a2e57e
113
+ f4a4d960ecfb87fe06e38f56310a14d23caddb42
114
+ d96bcf6baf6cf8c61d54ad9c9abd997bab870077
115
+ c6eaf9d97b059f3e824a1ab4ffdfe45494e5f8a1
116
+ 8bbbd3d05e22fc3372abdbb471799bd5f5380a75
117
+ 09dba9542ee64697d789d47ecd3bd53bc8b0d953
118
+ 7d42400c49f9cb313345f7d9ea74c51d0147a4f8
119
+ 1f58239bc2fa91c3787bda0c2b9ca5cffaa7510d
120
+ a0e187806f8ec8f02e0d329ac114ac44fe69a4b4
121
+ ad2e90052e15364c93011386322f6a5007314348
122
+ adba3dadc7c6a3177346ba9119a8df8f5b81ae0c
123
+ af2d712599be471d1ba0b91fa18c347220ca595d
124
+ 77c610007679f13e1f5d314ddcf4b14c7e57876b
125
+ 0b1d14b9f6619ced37003ff77f22dbd122fca645
126
+ 310fe57a8497cb5215c1d70d9f9a22ab91d5c054
127
+ 7056abbadea0a2eec779e157890219171bc98938
128
+ 3863aca32c99daf6ef8a0de6f60471bd9c54b885
129
+ b32151c4d36a4b42e9b832e14f539396627f8eae
130
+ f72c81d6c240a6dc8395c6ef33b64edc423b1fae
131
+ 4409a8f066166ba9aad02611f6979b44fc91afae
132
+ 75474ebca7f3413b4e814794d6ffc13663120bae
133
+ 55e430f0c68f6f0c4cc996fcb87c2e53233e2738
134
+ 5dc954a346ff3dba1629e03f3e6485235d6d4742
135
+ 95a92435cdb8f2b51ede4ce6220c5613c8dbfc2c
136
+ a39a529543dcbd6c0088cc301bd82173feb5f18d
137
+ 2b8179710437421d5a1f0d281515725ccebff3f3
138
+ 087f4bd441c0032caec0a1f65a139d336a09d133
139
+ 3636a522fd19a2ae4ff514319d5c1fc012c4bcb6
140
+ c5f38edba57d815658097ba5ebe37532ca160d7f
141
+ b0f17178ff8d37e5343119bd8917e262f386203d
142
+ dedac076c1649b0edf55353e9bd374c0cd4ad956
143
+ 9a3e0978753d5354eaebdcec8550641314c71b83
144
+ 0c9881539c0c5249e911dd70b37cf7f74327b97d
145
+ 80e02450839ab9c1a08082e404c6f0398ae2e92f
146
+ 6e85c4357ae3dccaae1b354641d22a359a100d47
147
+ b05f9fa99ca30d7ce2611a6deb139f2274d1ad3b
148
+ 306a3efeb8ab8919079525b9aa747093bdcab563
149
+ 98c31464a9052c1ccddc9cbb71c2529f3fba6f4d
150
+ 35ffb93ea7e6cb006d5019185815f03c67b94d77
151
+ 2f3f088adba0256b27e6da9efce4293106e34291
152
+ 02d287e484a9da84424d10eeb0c8f3ae52cbc70c
153
+ 33dedcbe9423f6031122f8be1f7c2c69ea4ad4ad
154
+ 03765604d9073697904c2dc4cf29e90b924f36f0
155
+ d2845ecaded68fdc5b372d10c3663441ec8b358d
156
+ 4cd67a6ead5211ad92e1faaaf71ef28f7da2f593
157
+ 806333db217efd7e2a4562bb73e695bad88e712c
158
+ 1307f117d423d143e6083faa99255c2bf2a2f3fd
159
+ c92b96b8114ec521af30fc090cef40c07f9544d6
160
+ 04b3d6adf722b3b33524c8d288c74e7db2632a2a
161
+ ee5db06cf8ca3010774965c3674c273c680c1611
162
+ 709e0af32c2463474cb8ffb85d2dbc07960037c5
163
+ d503ee11d8f7d43c67841ba1b6bd863a6180a223
164
+ 709555f0f163e09098b58d03898a9e0d6e7ca0b2
165
+ 010f73477cd20c14cab78ad9cef350ac8c0f55b3
166
+ 9e95f4744d105fbcbc32a12db7287cb64254325c
167
+ 6e47b7f6a76cc2728e61f4bdd30bef697d6490d2
168
+ 5620139a78269335505edf23a902bb0c9c264e3e
169
+ 70908608fd62696f99cb3f7a185b226fe32e475d
170
+ 0e907ac0dd02b47f4a0726790d01f0c57037ec2f
171
+ bab2d77edbb5ffb3a7938f16fabd7ad3cc83fae9
172
+ f5d5c855f2e708067e3532980aef101d20c40cab
173
+ c50895475b8a401824cc9f1bfaaa8fd7797e172c
174
+ 98690365a6ab1e82c25ca08c26db63a834c21fb9
175
+ 3368d60307efa2661820d3240854967fbbf6fbc9
176
+ 1f7b9e38af5bfddef1649c83b119e45063bbee34
177
+ 3fc0feedd683b49702d0da9d7d3c36b7be02ca09
178
+ 2014aa562c1d05dbbba727db120d9e163fb8f43a
179
+ e22249a8da0886b4c1338dbf2e54b766b13f4db6
180
+ 8344960457778ea0a4fdbba33e7eebb69aa979bf
181
+ 2c7fb7b897db7f304961e919cd5ef1a5a93877a4
182
+ e2b409c9d97825acad579abe22e0a37b685d6ca9
183
+ b2fadf8584ecefe2a32cc2eba6590d10dc8a9d26
184
+ 5f201c66bf26298986c3dd2aa84818a312a596e3
185
+ 8dffb657ffd1e331b99cc00cebf18645e219da12
186
+ 3236d66fb8a63916b6fd00c2f2ec417b5cde01b6
187
+ 95cf024d3f1e40344f16cf4faab052d6fb1e60cb
188
+ 9faac843955464da41331af273942e38561c9a8f
189
+ d383fbcb1a69ef97a660318a2b36486e5fdd6a44
190
+ 9939959cf9cb1a14497e63aec0b88a08ad3e451c
191
+ 3c0c3ce2681718b816289eeeb3ac343ddc037fc4
192
+ 259d8cccfcb9b9edc00d757ec6efecde6fc06110
193
+ 9057ca8e09723c9959f923a412e409ee793d0062
194
+ 79f5ac0831ef03c2ebb40d325758350937a55313
195
+ 3f36f6f4d3317275130051db2405459021f56b8b
196
+ e35c3d07dd54243acf4298ee0ce6ea7e4621e90f
197
+ 115a59d5c4916cb14b4c408bec36bbc6116043cc
198
+ 5d563efeae0cab135ec70ae4456a4e55bf598aff
199
+ 2d8d73eade954a63f892414accaf2db229ff3312
200
+ d42bd8a35e147633d3d750266939c6539aecece9
201
+ 27155662fda1f5febdbb42e6572dda8d9e31588a
202
+ a210a653a08fd0460b52c7eb68bdbde0c40ea63b
203
+ fb4be2d8538e5e4418042eb7d81491dc7e94dcc5
204
+ 53940e5d960d1b63e5ec84fae802fcd599b20f01
205
+ 213dc667b6c665a4257c4afef5e5fd39d42eb01a
206
+ 08a16f7ffb9968774fe4562acfb79aa6a1a59a2b
207
+ d1caa4726d8ac1d9ad611708038db896828f06f7
208
+ 67e3d20bbadc184c57efe184ce8ccc402de23bed
209
+ ae05bddb7e816fd0e14e95cc525e06caf9392918
210
+ b2bec4804d38db4d01520c4b65f410acb20e4d2a
211
+ 78a8c13605a8eda09a0ac0f04910b414eed6b765
212
+ ebdd1d2b3891d6f0de29ffa1eeed3f03bbef7912
213
+ d54da603155d9e507b81d7188e1baee2f984a99a
214
+ 18823ffe4e7d30056229c6b0c3b71f9c72c1d2e1
215
+ 86b8d10094b19ab1059b5dd7983f26fc2bb133ca
216
+ a16c62ef8dfc132a0a5c406e429a08e1d40b8756
217
+ 64e19e8802e2f598c5a84858a4b2c0c43b99877b
218
+ 3fe31bfa86777b3f4a1bcbb46650f683fa477935
219
+ d6e9929980eb730124e8cf6561991d43f19241e8
220
+ 20ffd27dc3be9eb895fe8a5ae3cffcd795ad100f
221
+ 3960b049e19e3217968723430f3595fb1d4e1dab
222
+ 846e738db5d5df03f621e5cca067016e84327f16
223
+ 2c20a17cc4846b8dc437fd00f84d08cd15d0c8d4
224
+ 16cbd9a93ee9067271748479378a31d24390e048
225
+ 77677862965b241d7b9c4ea61836ccf09b3e37a4
226
+ 3ec8db9d06345bf26aad0ccfd05408880946f4a7
227
+ 3747faa432e732538f1636c9aca56f068ec44a4d
228
+ f1959058e2074a54c0bddf7afc60131df132415a
229
+ 216558f2fb3e918840acc2fca7c81f27c7a80e3f
230
+ 1c7e55eaf41d1e43121755c1cd667d210e45a000
231
+ ce5c91d45d83f9f114814c8db9a1230b2d79eb02
232
+ e735d473af54e1ff29a66b379fff9e88ccd8a164
233
+ 7809602d8d9398f05b032bc399a922af1567c56a
234
+ e85a828dc7853ddcce5d7d919b07370236fd089d
235
+ 0f81b75410062d52138ab8a67ae49d03321e991f
236
+ c9bfa4787bab601fe2e0749b4fb1e44d3f168373
237
+ c03e79ef13869270df1be0c63ae86dddb7c21bf9
238
+ 91e1856c8de122ef09c10589afb9b3728bba9296
239
+ 0a661af9686af6c8f298c8309e8e1a96ef0cc08f
240
+ 08102a4509565732289f843007d08cfa72ea5456
241
+ 9b9b7248f513f621089a6cdb956828a3fa6da09b
242
+ 7e4ec1b8fa3a477f43f00075da2ed26a31db45f2
243
+ 4c5589e14718f8d6ef4027baa22b680f556d9ce9
244
+ f63c67c039b3bfb83b3d46f4250e3509c2e9394a
245
+ 8500584842b1a7abfce6a2453fb9f76c5b39d26d
246
+ 5f43c3db85393d73b57174a6e3c72884cf1402e6
247
+ 1401f556e033d9f10dbdf83e9b5bfcf6a84823d1
248
+ ff7e6d2a2c5fca5f33db717bd68228538fa09f37
249
+ 9d6addd57bfee73721c64830eb2d0fd27e8fb9bb
250
+ a55a50b6cd898fc79bf4657fef0f0ad44de6a5fc
251
+ 6af08a408468481f5847013cd8b7f9c0ec7296ec
252
+ 7d54beb04ea368c6386dc8174ffa1915b3414bb9
253
+ 93456d2e7f067d518838df8cd7f32ee85289f4bd
254
+ d603f66ebc365627756eab740140ed43f0e5f40d
255
+ e9217085cfc52f0fc47d91f2feb681a33f88fb59
256
+ d0912f63112be8069398b3f6c926c727469f1191
257
+ 2d1edae390d9f079095606c8bed0a83f5bd5d767
258
+ b6136dd5f245f26dece12bf294d524bf584bed69
259
+ c4a4d6e24e7753e098e09324e903c3fc2cb45f74
260
+ 8f49dee3dcf6b505e43475e3b7c15a5e25f0d85c
261
+ edc1c91f5eb0547c18877e123cf3ec248ac734d7
262
+ f9f269f3df343d14b11c40286b22f2c54d74d8af
263
+ 99c98449dd5a99222dae7cfb14bc060852f220e4
264
+ 017d27d00eb43678c15cb4a8dd4723a035323219
265
+ 61e137b37db0b3157c04fab0a5f4314fe4b03931
266
+ 43f54e39221310d45fed028b202e0e26490846be
267
+ ff5b1552320e183941d8d58f726f589324035284
268
+ f140814244c9e54cf4ff2085d7d52b2dd87d2737
269
+ 0264d85da73237f1967bcab20b2f99313a00250e
270
+ 7d2215881b5399038a625726794c523be20e567a
271
+ 77810c807b3c7452a00968927dc8b3b76c2aaf63
272
+ 361864a24f139d975cb02736e81d106b6b50de37
273
+ 5b08a44dcfaa7da30b066b62e688177ae4c27bc6
274
+ f0bb651f7498ac35c750d4216b3fbdc1c6e83508
275
+ 0865448cb045a8b9568e679dbdb5b752ba0e38fc
276
+ afbe85965b4aae74bc86d5c56c75fe55e782c7a0
277
+ c4e68babe61c2389be350f11dfc8e2c5ddc9f032
278
+ 7de82ac3cca30893284f93cc133d87276f39f8df
279
+ 0e983ee75b47509844fddf43d06a989b3448376f
280
+ 645ec5713498f91b494d39bbe8ac6619a20d45e8
281
+ 2a853caa0177515501abb206103e15fed7bf2315
282
+ 6b675d840afe29591d304e7b52a1edb442decf2e
283
+ aa4246332705bc11ed706555620cf99aecace692
284
+ 461aa6c463d8ed8a3485519f8347d3e8fd30d5f7
285
+ 0963c147bc9d5370ae2062863e776853744c64a7
286
+ b6bc7591f950b6647f2d5cbf11bcfaccd8da0ec8
287
+ aa3cbdb196eb266ebeb48c1be941df20ecc1bb90
288
+ 945cbe99df1af1b5db99d8dfcec142e5d0452065
289
+ 9c2b9de4b8928f63bfbaecc97bddee210e2cd38a
290
+ 6ae4c366fec9f8ffb28f74e03fee29f300e4b0c8
291
+ e9797953e895ec7596bb0c80d6c3e13a6170ba32
292
+ 4d63952d88ef8b61c631d92744b8b88d5900ba82
293
+ 6c668fb743f9af4bb080654040e6416f7e9b5605
294
+ 31d2a88aa62215e0046d4db0c0cfcb7390e16762
295
+ 941f9ba5091a41a41338a0b5c06ef998ab76bf92
296
+ 4f31114f7ead2ec76449bdfba502b576c8cbdc51
297
+ 636ed1de3d915dd13e94ea6f83ed418139898672
298
+ e1b8a490189840089a0e42f357d7e18aa04d695f
299
+ e92c4914629728b8c18cf61320cf4a34baa77300
300
+ 9afcefd2944149fff4d5b74f5b26a39288b7cd59
301
+ f802525632b1c8fa85b43911f07d8129694621c4
302
+ c7b0320fd85f3ef25cee88621de6eb541b399c36
303
+ ee5ee7b755e26ac0eabf5191e7747f6d72ddc84b
304
+ 65a03a7e863b3a5b97576bb3fdda2d8c4380c706
305
+ cefa54e79f57eec0b1273f69ff7149dcd90c7ee0
306
+ c2add6ebc7d17385f7e0d0d9fca5fc98115c68fc
307
+ 169f5f6ab3818fc14b9f2471ee0d8dbd61d5e566
308
+ 9862b8aab2db9c82fd1012792783a90ec79f7269
309
+ 0e1e33051f7d782d2643d645eff67157c37370a7
310
+ 8ab2735a3a614a5e95b2f53fabcc04cc482a0abd
311
+ 1d400e7242d8570c79f9f34c392ce02e217e01b8
312
+ c56d3923764328f6767dec2e5617f562cc88e791
313
+ f9689fb9656132e1c6d186851563f2b968643791
314
+ 08d845c78055627eb898cb74bc38274794351b17
315
+ ed3449c7f2b4a2f4f1548af509dc9ab1960e9fa9
316
+ 0892fc2908f83d76b147c3ba1847af0056a47e9f
317
+ eeb9ec2b66bfce439d6ad3f25e364d3b1d826bc4
318
+ 55dc9832dc56cae9f0bf180d2103a1d20c1b1ee8
319
+ b05f9fa99ca30d7ce2611a6deb139f2274d1ad3b
320
+ 3152602658285f9edadaa1d9cb7cc4948ab8fa54
321
+ ba620c801834cdcd41547b08712734e30e84ae52
322
+ c02f067640c67b1aa5013207c2c7782ac6b97399
323
+ 3349e092bb3bf21585d52e72e2c782692932b139
324
+ 26e816229351dfe7578c758ba07c4d2d2a891b2b
325
+ 064f086f49fa410b664d59a0494367c421ed2f8a
326
+ 1241b04b4380b1a796390d32183e3e738d7b82ff
327
+ 79879bbee2c8f0b46fe44c80949e24b3c11ff7fb
328
+ 2f4e7d5a0130b48ba687536a3bd5623fa906f9a8
329
+ a72c848bc3bffda7aed21ece2b07327153fc11f4
330
+ b10cfb970a746327ce47764050473ea27b15f649
331
+ b30975204e2d948c1ca8d33a9f6e755f86d8e200
332
+ e54abc8237ffb5e2172f192200fbde85a100cdcf
333
+ 0eb8e5740eecc2098cd862cb5d1ff41f9aa97eb5
334
+ 0807b672dd1a7ee6f8038649f70a66cfa3ba4fed
335
+ e22b4d2a35411b0b2270871f83c19e9f6efbfa67
336
+ 6bb9d73ac47b68b90872d97b9ac1e1aa34ae72fa
337
+ c83faf99c08fd4d44d9ee38d1c3ef84c273909f2
338
+ cf91d2b46870970ec013ea2ef0567f695ca80261
339
+ 151ea6f1dc4a40cf854a8b2d9fed22ea457d2afa
340
+ cd29f730499023601901dc9ca801c279637c5a81
341
+ 4fbaf01100e4d6ee1823f1b25ba309fe73ffb6d9
342
+ 49654512a36b27837b069fef447ebcc460b0c911
343
+ 09df70a379653872798f1284efe95240944f6af6
344
+ 43e9d988417d90e85868aa09b5c53e2ddf0364bf
345
+ 5730ccc0f1a125be76253006f14a6d3a39fec5ae
346
+ 65012f2f3ec9d16629eb8577d149de30257127dd
347
+ ae05bddb7e816fd0e14e95cc525e06caf9392918
348
+ a65f3d75c5cbf99deccb00c9b94f91b5ad52a050
349
+ bf6d04b98e0af89f073f4b71c5125017c9aa079b
350
+ 6f1f25365bd131c0caf19acf0f4fd02a3535f538
351
+ 23a87dcd1007f73c4a6278d230aacb6411c71266
352
+ a6d88d33454805c4c3b9f3c50b1b2482048c32a2
353
+ d6392fc14b8c5e61bb6342dfada3b5085dfa691a
354
+ ad61eb84269497ea2e8d9e6f3b1a504d9bf82d7f
355
+ b90e7d7139f69d50d53d5bab66a560785596bcb4
356
+ 237f4b3fafb5bca89627701cabdb01a61ddef306
357
+ 8df29408cf5fbf40bcdb5a73d9eff3e30b928638
358
+ 02f0cb84d4e8f2c78189f3008c327db6a7dde4b6
359
+ d7449a49a1e808c3d2f2c87f6b6b26dc8cbfe638
360
+ 096fb4a6df33a35a8a4c28cf6707d6093b8fb483
361
+ 54ce333c923bc3d8107ed1b803575c249d92a7ca
362
+ 390a82f6f49cad470b3278465d07a9320c163fd3
363
+ b44d920cfa42cb0ea8e279c4401f565577217323
364
+ 3c2a82b4460be3eb08988c038156f24e690ce149
365
+ fbfdc61792dcee3d0102859ed2681489b037339b
366
+ 5d788ec362a874cc113c2204b06fae82d1d70ea7
367
+ e4a186c4590ba156eb3e45862c2a5b4181e2fab3
368
+ 5f7c94ff1e4c755c47343046fa0ed6823124b85f
369
+ aae35772a13f84876be5fadd919f1265159acae6
370
+ 22680dc843e4692474815b0c9ca78b9f4f1a116a
371
+ 1c23f12e590b2cb4a89314c0b933f12b7193a37b
372
+ 674cc0ecfb854619f3e50df0e4baecc67c73724a
373
+ fe41f09089b134bad7f40be0ea4a6fa7a691655b
374
+ 12327d1afb02007b3b736570856176234edfa8bf
375
+ 26fe548ac5f3ff1e700b2cc6890c2d5b152234b4
376
+ 861bbb99177d314267023bf3699ec069f3bda6d6
377
+ cdaeefc0fc597b0b591b76d20f979c1207e98880
378
+ 2a370c32d2c1464da03bc2440bc96ad23059e428
379
+ 6bba3c06659ebeaeca823bb7517baac4425faae5
380
+ e7ddfb8e15144c1a1e48d8b98ce1a44b666c18ae
381
+ 62dd1f31cd4e2c8250c587b557f4c2be67d5e495
382
+ 3ab9f2c8f9512bf98dcd467971b3ffc6d612d308
383
+ 00120f91cfcab17bac165f7a4719019a628a9db3
384
+ 3ee94d218979b459196743ab0a3d2957f72422c3
385
+ 6970a0ff24ea28a7500763ec1b72a671bae412aa
386
+ ab356240d60a6e7d6efce1a9638415f13bcf6591
387
+ 39d0bcbfe75fb7bbbee7d4bb72a77cebf03e39cf
388
+ ae202f1474cd1ea41a5172230fb083e1bf932d17
389
+ 7dc9c7399c2e313bbc6264072f6a592b6915b082
390
+ fa79ac193bddc262fb35a468c8bdfaae536bbd7d
391
+ 412630c97420afb50e5278d3406e0cf4b08d0b77
392
+ eb1cbbbc594b324145c3155bae5614a2553a17c9
393
+ c948920cd3dde6dda8767bafc8173c0c37127430
394
+ 43ecf30e43686b2a11f6b329f5046a68082b7272
395
+ 49b2330923275c10d5faf66681fea724f9938893
396
+ ea6c2b9b8479325e3c081252b59c61047988736c
397
+ d28da9e2bb92814351486125aa35b16d112f3a76
398
+ 393a89e4d5b67ce3e29678656d73a0dbc2ee930f
399
+ daed716140202e583ad4cdc98fcd8b4b3aa5ae35
400
+ 51ce09bfb256ad9ac38a8e071f36b0097f6fa68c
401
+ 36beb23a74208850b1bec50c966e985aef7e4075
402
+ 52894d78609a1022f6dbc4ec4fe32fcc31fa9366
403
+ 3988b5a02009b7589973eeb2cb929f2d37c4f409
404
+ cbf788987b75c11aa5c49518c5b3cb45e2c177df
405
+ ba1d997f0df5b17a0899bb643a467e95332cf0d8
406
+ 4a0453760d334ccbc84d00ff67a87865fabae97b
407
+ d39cf4acbfe4fbc26e001266243644ae35beb712
408
+ d05b5e0e3619b87fc46e731ced5111f47ccda50d
409
+ bb98ef0258299046aecb9fe0070ec309d1cac401
410
+ 080b5e433c62201fde1221066f4d723e3ef427dd
411
+ 92a6b2e9d9e7da09ab27eee906a8c38c0219f390
412
+ 04d7b42001e7249bef064a175aa1236be8211e4c
413
+ 75855c9acc21043af85c9f161fdb0f68af165771
414
+ 2fc69a299b1a7ce0a9dd2552a910f319773f3eb5
415
+ 98ebcb90dfca1bfcc7bd2cecf53cc12f7dde6970
416
+ db8025d7b55de50ac56606023c838bb9975dceee
417
+ f1530622d659a31a36b4cc5b79d3ad9302ecb384
418
+ a9e946a8ee4153ae7e45143941da7f61fd04321a
419
+ c6f17e5ec10ffaed02b111f02af4afa86d347d3e
420
+ a7ce2524995e668268028e9f7237dbfbae3cadd1
421
+ 407f1d56cdeccb0e313c15ddaac53b186acdbf0c
422
+ a8c0086c0ce76f960aedff7a7d28d9009751cc34
423
+ e28316232274fd9444562d8f7b5b6949072d0a2c
424
+ ce5ba5110238d225394df833987db22b197dc93d
425
+ 32f66b696f66911ca0c73e36ee32708d59124f32
426
+ 242c50a749fe607d1a652db0e06f453d5d7f80f4
427
+ 4375cd51b7ac544735b9c89df29db30369b0eb9b
428
+ 1458f8a2a10e49686cbc0b5e14a97acc3aa78a30
429
+ c6ce5c4febbfb715453373d447c4b5572f5fcada
430
+ 0c145a6bf87af0340fe06024ad8ccea391c9134f
431
+ b5d3ce4d0549d7802002f2d5e90f05ac35f5db5a
432
+ 7944055eb6cd49f12b5d42ca0b971eaa6dd51e07
433
+ 63c0d33115ed9eedd51c7f34177a113f6b40156b
434
+ f2ab65bb852cc93aaeb521f0f65fc2ffd14e996c
435
+ c7147ba3242871a59b80860245ab60c3d04c5ecb
436
+ d9accb542e2321181468e8f7e490114b30c1cf53
437
+ 8ae25fe05b21fc819243746ad7caf4555e11df0f
438
+ a3ddfaae625902b0394f854d6b341b21684638c4
439
+ 1a71c481191a57d4ae387450f040d1da83c10eb1
440
+ 7045e190e940ad597893d85b3336afa77cabe20c
441
+ 714db9c2b78eb2e4b26fb94c3927bf372a993900
442
+ 85ea5ac1300194927b58530756575dbc84dd46af
443
+ 18de5b9eb32fade90cb550ca65052bc1e0095a99
444
+ 667ae97a088538b0b321579c5b5bd12fa101e04a
445
+ da9609278b099c165aa343793bc2e03c2ed17752
446
+ 27bb197b70f0475abd00cc0db2ffa53de84c9e75
447
+ debde12bfd41f1960cacadf1239f1b50db2624d9
448
+ c7ed3ba86b0e3978955714855a42b4a7d8c67233
449
+ e5572a346b97cbaadbe68f0ec35a09d923a66383
450
+ a729d22dafc85162347b87dd530c05caf64ae2f3
451
+ c12d47630071fbbaa5b10507aa97f02c58aa37c7
452
+ 6d9afc0ab84073e890da12d0332a5987ab659d68
453
+ 240b251022182eb14ad96aa9f558150f8cb4c543
454
+ 000e009f6b1d954d827c9a550f3f24a5474ee82b
455
+ a8fd170d0ff3f6178900977ce422ddcded7c6c43
456
+ 8c8d04a5274fd92716fcf0926aea0c06e83e7987
457
+ 61e06a233a45c987979139488084ccd0012d466c
458
+ 6d5f7fa2062d3e5ca89760dc09b13a16199d1359
459
+ 7e84933971ea1853295b9d73e4b75f3478498c72
460
+ e10c7f72bc3de187cb7adfa31a1f098d0f47bbf9
461
+ f450b642e3e32641ef9878aed22f732d314a8c4a
462
+ ef17977a93067d945566356f538640febe56157b
463
+ c2d40cafa9bdfbf9d04d096d09a6aab9584c3ef0
464
+ 87b1981c3d51bf560e628fec4e65e4bc8f54566a
465
+ f9f7fc8e9da723e776abddb6f7e836fe72136eaf
466
+ c2e3a01f8e6f8a021a1551f72529f92c9a7703e4
467
+ d7dcb4acee7cca3e54b11ff196c0c26528e665ae
468
+ 3c2452d9487b0b6b5426fc7c502b4d8115236051
469
+ e430e7bb4e1316f3652bcf9ac93ecda4aae3729b
470
+ e21cec4d51eaca6fef39f717a12355853c8e25a8
471
+ 0a5c494f13f21e009f531c2a56543d274a8c5932
472
+ 3f68dbe78481050ae64297153361f374956140ce
473
+ a54ee0a7552c6d6a5fd5ff6e0b67ecf511a8777a
474
+ a1d08c47cbfe06de1206493bee12f301386725e2
475
+ a91d9dc6a9ad7fa6aca2c9ca4d9c7aefd1503585
476
+ 991ee479aee8194c495fc11e06f91f7b33809161
477
+ 39405d7bb73434ad12a0106c15cc194689eb4de5
478
+ 89275be2a434addb83a29a275b63113f4500e328
479
+ 24a1a847d4d5f74b57a02c5898af9364aa83debf
480
+ 6ef88240d12dc57a102450dd26ee7a0510a848df
481
+ 3299e38fb5351c11d9beba7400722773f3b74e6c
482
+ 99f61cef6386573c8cce688a30fa2cc82a1dc05d
483
+ a49f8c20eea4af67ec54408cca737bff98628769
484
+ 7cdffadd7c11226ad6973a1707e404fef96dd541
485
+ d853339b5945d07de64c5d8738e89259dbd40401
486
+ 137797fd34ea672ca506d1c8848d0da355baf7d9
487
+ 9b00d0ce1008b4c3765bf57817dd5241bdeb5c8b
488
+ 81984aa7d079c46380e4baaf49d078c86466edc7
489
+ 78e6ca8bc83669866fdf9fc5ecc19797f4011261
490
+ 70172cb5f244c48d2cb41621c35858ccfdf31997
491
+ a509b26bb05b83f9fbdff3465a2acfef5b35ae30
492
+ 694c620e98472213a53e932214054137e278a073
493
+ 67518afdc981945e4cbf620ff05c773934607a44
494
+ 2d3dd578113df1602cf753d2b11a4e802f616990
495
+ 4abddb8c6e0df6689eee21bfe27aa231d0ae8dc9
496
+ ee3ed04d53a5c7d1f60a5fc4d7c6832a7f32d3bf
497
+ f76269f3e3c431fccf5d9991a8a5da27977646fd
498
+ ba30d4f9339aca62283d1df7756b42158f637931
499
+ 31dc8c2da848a7eb21c8287e23990cb3ee8b6307
500
+ 68ac8fb1f847b3307be2a6a9a0fe66235a5e8c4f
501
+ 8ce0548b386ac1e48150a945cf36dbb6a0bd0ae7
502
+ 2b4439bdb73d8cdb6637d275f426f13135d415fd
503
+ 3b0b095afa3ef1b73a2bae29a5a131bd02c0e714
504
+ 9170bc3cbca5d8f82b02ae1e33128c62fa2a00a1
505
+ 4ecac34a02791711bc456edca64c086b9aef357b
506
+ d70512d3069e6532b7069fd0c8fda28d75324293
507
+ 9e39cf719ab85cc10326ed1d9df2273e75b67b89
508
+ 65c44ce096871da2588e1c140ac91ef771fbae97
509
+ a8949852e1a6258f3e7146d5a0b073861d12dd56
510
+ 4baf74306ea4d6d60d89c2575484dbb111cbac83
511
+ 4a410aafb82e10f1cfee2062b5cf2e038a3d12af
512
+ 7c7cf4f235cd2c455b2826e96803b1a6a47ba4f8
513
+ d42801a7b4d67e49ed3d417db4efc7dc6d4b5ce0
514
+ 9964dc1ba45079060e594be6429829042854b4ff
515
+ db275395dd0a2455ae378265850a90a3025fac09
516
+ 65f5e35193414c2998a1b5de2f959cc785f1fd6c
517
+ 19c0f1cd0bf5780a7c2a8abd5d5d8dcdbcf2fb86
518
+ 279e575b7c82e95beab30d37836e1e56176d7ee3
519
+ 3945e22c503109659c8d463d4674d153a6f5e8ec
520
+ e553763df924e731b9aecc68342af73ccf47ad2e
521
+ 128c6f4780cc59cfb76bde414ce42bbd544efacf
522
+ ab3ae67419bec16ebce20cdbcf76f2e8508b35fd
523
+ 42986dc0132012b6150eb6066fdc1047d57fbf29
524
+ 7ca2a2cf7fcf4afe8dc3ebb4e7f8f9a599d5748e
525
+ 7c478b9c7f099c8149ecaf11b917d41b5cb36011
526
+ 6cdb121b56dcc9cced06a26d1b11bdc907e4aaa6
527
+ 2d60a7c9131bb6044c9636d42f6888295a519dcc
528
+ 883b0ca02ebd243bb393bfc6144974539735d64a
529
+ d4c710c356bbc78529b427336e4bf7163a904239
530
+ 3952896b9156a43e6e2193cb3ae8a71a0cff6923
531
+ 79b92f4b2e09bb0bab7af1b036c03bcf075e2682
532
+ 6e0f11b530638be7f478b43a715dd3bcd6b17d04
533
+ e7d4a4d0b37b35569ce85c261d6c9ce9b57558d2
534
+ bad9a32c0b3f74e4f54de56f37f24a265f45fce9
535
+ 4ec3467fa91e3889a3ba2d695c863b5207ebe9c4
536
+ d8a1ee24cafd2ad6a648b4b62be7b06f446b1a89
537
+ d61dcadd8afce804e85d4b40ec5eacdf37f04fed
538
+ 1a15d2dc834fbfb276c67a2cd73d7e8cf650bfb2
539
+ 08d5f0581bd3f7196c90036e40f615c43c97eec5
540
+ 020a823e09ab8f8c2f13f78aba48cd5549848cb7
541
+ fd091455b9ed6ec71dc4eabf4b59ffb5650ea2fc
542
+ c0d25282a77168ec25c503cdba87a0b16f73e759
543
+ 19094aa75ff7ab5a9331eeefd36c15a201b0ab62
544
+ 2e74bfb1f3a9e72a00f6727067bb42cc6d8c4db1
545
+ e3cfe5af31c4c3e4cb9bc30ddae635241b476b19
546
+ d030d0a5f16a4087cc56137190fa0e7ddf19dacf
547
+ fb15d9bff157666a98e09d0e75cb0f05d9998e51
548
+ 64cbcdde35d49cb5220009a855561f6a440c91c9
549
+ faf5e36c67de12654252e4890b40297de6f0f18e
550
+ f7ae25006ab8f8e2beeb218acbc5d273376d54ec
551
+ 0b95bd0ff7b9d14421fba10a50634c26f3bb0692
552
+ 9db0f5d741239f0adf441c69f7037f1143c99fa1
553
+ 1a528a69a27510d5b3036ecef3f8ce416cc8a9b0
554
+ 3f0ba68515f730c5edcd6b5f7a2487672238b381
555
+ 28c9b5d17fd3e52e27b5b8d6d5338f823f8abe96
556
+ 52a4638c81b3feb5cbd2b66987b1c7fc1ca7ae59
557
+ 10af2962663aabf4b56357038b430adb7b2d0986
558
+ 65961456d11269a4191a41b0f0a0f2d92fcb6907
559
+ 46e7cc2ba0bf218cb004f58ebc249e5e72b8c29f
560
+ b616399a316a7816941a498c09de81c3ecdf0f03
561
+ 878cbf5db93ed95a2ddac0927543addd0d6105f6
562
+ 6d48e2be404813f7d346516d519369ede95d7226
563
+ c968e9268088153bcf51f3555b80f69e7f162db3
564
+ 405f21274bd606e89a0366cd8aa82e6dbaf8050b
565
+ 2b9872f1248cda295127c4374dbe49850b81d95b
566
+ c94ea8bfd1a74b0d93a2a207a1234b0ef1f73d0a
567
+ b0f4c1bd78e59b33cd73b510dac2b45e3cd735ef
568
+ 158c5fc595eb5ebdd337f44438d98d5581a87756
569
+ a210729626a48d3c75bf2adef15d856d0a9e5918
570
+ 248a68920a184395f2fb66fe69f7a2b1276e0f95
571
+ 8ceae55b2d091350328e94bb7e3ad1b2048efd6d
572
+ 26c43e258d65949742057d164454efac73bbb63b
573
+ cd76aa45608dd3370639d3ae4d2e774ea7c3e5a9
574
+ b645b284600692840ecb34473db3394bc354472f
575
+ 9caea797113b583b5ab74990ea22db63d14c2f99
576
+ 9e9394a307c29b74289f20464554131438b34216
577
+ 90519a813017e881d6d95e4df8952a393d1a7726
578
+ 7dc1bba5f4ccf529d19ed517880a10491df307a0
579
+ 653abdecd41eb6b1cc3315bd4a6e5819d1831df8
580
+ a644add72093f735a99ce94b304e91703f250b94
581
+ ca1385936bd95b3005b923bff4ff0077816e3d68
582
+ e85217770edae3f88d5114fec35166bf7a80e4d4
583
+ 47bbe920c329ac749fa3dcfb10570fdeacc6fc3f
584
+ dc36563ebbfbccc065d91fa24fe84f9b0402ed68
585
+ 1274f8b5947b2a5f87801d40503ef5b8c883771e
586
+ 694ec8ba0a9dc85fe62b5cb5041b71198936be89
587
+ c2fa24208e4bf7d91592ed094f88713be35fa708
588
+ 1ee394eff8bbe8488411ecb68712b0a6f08280f5
589
+ 2849071dfbc3e18241f7a5243d4ca06e4418174e
590
+ c0880199e5c76be3640005137c2c383f0c84b57c
591
+ f0814435ad279f8e908c65049775a8676ce15f94
592
+ 8669e9660c67b2489c0e4308eefe20b8fb3d2cf7
593
+ 1c61acd00fa431d425fc79b0c90ecefafecb3ace
594
+ 999e5dd9f1857d3f65650882fa2cf6d19ae3b9ee
595
+ 04e19a432042f4044bf0d51e3657f890f10cfaca
596
+ 91a2a3da8d7ab82cb4034056381a44c4848ff19e
597
+ 1c0a7f35b6eef0226ed7af5ad8ac87ce07fff38e
598
+ 3458f834c56fbafded76527b01578e5ef34b9b42
599
+ 80bb4f72ab3dc526c23a3dff758e7777cf1b3c09
600
+ 6c4bccb7aeb5aac67d498b8da720199c63e277c1
601
+ 9061d2012210b95c86401af9dacb0e63ac871657
602
+ fbeb39fdb55d24b827bdd578cd6a471a0a1063b4
603
+ 748b0badc59cdacb0717ac7a55a490f7e0ab4d71
604
+ 70952d29c9b7db955fbabde8800a629665a0d24e
605
+ a576c35a831b1e889631b757ed86916341fc7202
606
+ a5858355506446cb36f949d98bfa811e7d37e76b
607
+ a5f97dba2ae0b7949bea49a0e7068a1c6ac42ae5
608
+ 1c16e278550ac208d9aa1a65d0a9795f4132bd5e
609
+ 5a2cf867e368a77f135a855cd1de59ba5fde99a1
610
+ 132d7c4172cc25eb59c7745e6d74cc4a4dd88dd6
611
+ 061dc0ca6fa3ee55aa7e688910169e4e6c74257b
612
+ e2ee40eb4145cb1450572a7837ef544802b99866
613
+ b2b0c6a0f14f3b76df69046861fd04972ac9f3ee
614
+ dfc83bc2dc59d24775e3e8228beddf9e654167d1
615
+ 8ea9a921345fa2ea894bd9b953081f15713224cf
616
+ c6b2ae26499e736ac081af57a7b41c39a7b97fda
617
+ abac6c071e35ac30cfe3317089061124ac301495
618
+ 5e0f057dcb8b6c21806b379cd349d85598f5bf39
619
+ b4623e58d28e1d790a508d26b754a752f70c288b
620
+ 621f194999cbead9449bbd7222c6e8852c5043c0
621
+ 5d952c025f36694c06917bb1a5395fa13ccb84d1
622
+ a0fffbb1fe7c929f520de855ce045b840272cca6
623
+ e2e8fe4405767b62766efa00f95dc7b501e9eff3
624
+ d24495608f98c48e6f3030d4af691b009d09cf41
625
+ 826c9569ab9e52eb031dd692baf84337eb217cd6
626
+ 470ec40f14d3d077afc6702a1c1c0bb4baaeec57
627
+ 281237526c3d4125250aa204bd6798e16cca4bc3
628
+ 2fe0fa9e25453b1797f4cf786c40eddd64483d3f
629
+ d231ee5ec82309024acd028a83ae876d9ffcce94
630
+ 71732a91bd25ea50aec127f95b7f8b8609db3da3
631
+ 793ec3ff30c242c570c9a9e8c95d78b05c7489ed
632
+ 4b5cb898edd34436e4065c5d3de05c2ec7d95153
633
+ 23d0930474aee4957dac9571e06d40757b5535aa
634
+ 992a3b15640c2613b5481fbe2cf022178e5f3ff8
635
+ e8c0ce0985596758a82b71bdb6759c72af43d06d
636
+ 846bb87419fc959197879e04dc9c15f3723555d7
637
+ 8c6907fb70ecd74ffba960283bf596155a7fb273
638
+ 1cc3cd345edba8eedbcf183afd6e746b5b29a422
639
+ f3e48a4d193edb98933989cf54dfb46310ffdd9f
640
+ 30a4a422254a4026dfb77d2660467994b18b1eb3
641
+ 5f5ea4d8846ad79c33bb149e6acb853ac78b4247
642
+ c6d0ae6864fc9ac5307e23d283b1ca4b291b21e7
643
+ 2de834aa1cf63d1e6b7098c5528e4d021f131f00
644
+ 55e79f31165cd20502922ceda572d3b7db9cb41c
645
+ 6ee19b994fea7c9447b05e9dab49350e2f8c1377
646
+ de44cbbe8fd64ea13caaddab77560a48806c2180
647
+ a2619695901d714b44c3941aad3689a40abcf363
648
+ 485385e26c8fae0a7efd34ee11ba645662074a13
649
+ 8035d023b91b92978788fcdcaa6062c38883f4ae
650
+ 38a7dee7cd042726d64a95d3c5c3d341d656d68e
651
+ 0a280b46be0fde5d87ed47fc7e970e3ed494cad6
652
+ aafdf3a5bd71126c9ad07d93285966dc04d40c85
653
+ 93cb2f4e32053398d3602e0cbdaa12e8bca062ef
654
+ 4ab039bae14499bc4f432f9f20a2509fe9310fb0
655
+ 29c411d098ce2a631503bc168ebb0ee6f65df497
656
+ 88ce7a6fa029b6e8f51c92f1666b02a404b827e6
657
+ 4f98a1294468c67a563b48d1ab6b4766a6d899ec
658
+ ef0e221c7edde75caf0d5bedc0d93745890a854f
659
+ 94a1929f8845d841bb1f47667c1e489ab21bbc56
660
+ 4ab4b43b33178d53a5a445283d6be39ac57106de
661
+ 5ae9d4c70fa9dd4cc70037f4920ef15d4fc63d2f
662
+ 08af6504291f39451465dd1f1df6466e61c4595b
663
+ 23be51cd2399b9825facbac2a88475450c5927b9
664
+ 750af4d960982c1655e9edc08470aada3c72a9a8
665
+ 47f51c462114ca3590df18c3c96da04a217b79a1
666
+ e9e46f551280ffcc98b45c3c9b18085ed14f38ba
667
+ cb0ce99d40bb9d942aeaf08cbb83b075927a96d0
668
+ 62045a56ba933fab5e2bc61be05eb5bfb81a8527
669
+ 9112ffd4afcd36702f2e6ff7aafd653edf2557f6
670
+ b1fadf90fed9739d72e98b56727e471e070b85b3
671
+ 75fba14591fb8de7567bd2378b5c5c114bad77b1
672
+ 7e81e309cfbc2385f04c4d377c4562efac6ca238
673
+ 0a38f333c2a4adc64c5f8d074508632418074755
674
+ f4aa472201d2337eef2115dae23439e0a6dd9663
675
+ d1adc7acf92d644b3e1a821668a05b024974c350
676
+ 699fc0441179a3cf82b303cbe25bd5a3be551ea0
677
+ 697a6cba6628b2f233f2a1cf317fe8127e4d05eb
678
+ 397e0ec274130aed3bc1bdc461bad41c485f629c
679
+ 02aba4aeba128defbfa587e3f07efdef724666b7
680
+ 5e31470b18e9dd499b9f8787056cf0e68d52e055
681
+ 9507c23d0741682f71cde608ae517c0c1ad2a4f4
682
+ 16c115ba95f7b71292bf5c00a1d425a8586c551c
683
+ 42ef162afc768b86b881bd4c59beb8839149d76a
684
+ a7714cc78a381995633711d95465e883b613ddd9
685
+ 91519a0367b9eaa66b5ffe27964ca4c913093aed
686
+ e90d736f7f5e32ff845a898036c529518cce0c6b
687
+ 82b0860745881e030c57a3d1bbfea46bc404bce2
688
+ 26c3e814a69b6335ab65aeb2c4a1e97015595206
689
+ 7990c00e8ae3117587f54a880d7d20d0578d4646
690
+ 260d91a2ece614587559ea3bf37f76e4d5a48beb
691
+ bc19bf14032da3bd5d3e6b86fdddd47f80152747
692
+ 1badd9612877ba84a92b025096fca1e0a36f07e6
693
+ 4f339c24142d0442f20301c1992d523946d1c6d3
694
+ fc89b29738b18fe4c0ece96bf00f2cbe687e45db
695
+ ae3e7378f86cac99783c3de50d0c073e79a92759
696
+ a1e6044b7e31b86d42b6dfb7ddaa1eb6bac2070a
697
+ 948ccfbc7690989a96170839cc5d622e12e0b044
698
+ 9ec16c5ec0fd561efdf57572fe22e3e768ecfdc4
699
+ c1849b79963362d71d09ff4cea2c46f9b3a03d89
700
+ e82a29a4c2fcb1ecaed942c6fb550a14b916345a
701
+ b8412b898cf77763bdb3da689bb1bc9d10447116
702
+ 147a073799722bed54c3606c8833cdd58b1aa1dd
703
+ 79f4216bdb44dc618f168d2a5061481350c9a38b
704
+ c4352c74759634af80f1f6acc69c55261dd12acb
705
+ b6a68adc1d771af97938d64d3c21ef4fcc99cfb8
706
+ b5f8a569e73948a0930d18622740f52ab91c1a42
707
+ fb784ea50559ccf087521510e7760473038cef2e
708
+ b7aadd7612b6a1970dedfdc175ec4780a8732703
709
+ 0146da64eabaab5d7f53e1ce58aeb9e74dea18f8
710
+ 7465d748cb38e50921b446953ef27b0c0fb6abe1
711
+ 4cc4e5eb162c622f786e1c98a9e00237f5687ee6
712
+ a95f85c1672b2ff74f860a5980d83b440715deae
713
+ afb99fceb6c0532a769b61a81e8dfeb7cb70a86a
714
+ 8a7b29224fce56d21e0b4d8b83cb42c32a4a2e29
715
+ 2a4c040c4c53d763d1263d8cf797e0b672c154fc
716
+ a81140222a3c6e2323cb290f353d595686473491
717
+ 642db49c7519de4227b0dca5b23144945bbf54ad
718
+ 40df1682f34e463ba031f077e211a1e8eb1b7e0c
719
+ cb778e62e3d6b15a836e50d65a18a269a8a82577
720
+ 3aed3c2855ec616b87c4cf79a69298ba45c427a0
721
+ f18f929339d5ab26ce8e26e716da4ad095474768
722
+ 8e03a13f5223f71d02f875b4fe4e48cdc1ea3738
723
+ 4443f53a766617ae7c30c48d8cb55d6fdc3ceb30
724
+ f38b2e2284f48100513689571ce9d41cff63bd4c
725
+ a5182de1c12d3ff131a5dedc6130e02b43c3b267
726
+ 9054ed7b6b3eca10003d19098e5d3c51a8dc071e
727
+ 2f0833d2eed57049454b3e0f41dc02eb7587bcb9
728
+ a9883044091acb92e2edc709d5136af372d06ebf
729
+ 1360b846750bd2c7e31cfe015c77c5968a9b541c
730
+ 156253c33dee7a50df8e9e5e78adcef72705f3c9
731
+ 42580982bcfa232a30d39a26fbcd605cb041e092
732
+ 5ea3d3524c6c824950289855e33037576e741d30
733
+ 87c21fd886502206ada74a652082ec8dbb0fe7c7
734
+ 958b77776ac602ff78ddee5ecc758ee170cb5fda
735
+ 0baa5e5fe65401be934349bd1f067b31a4a0f0d0
736
+ 0f18a06bd539d1de1e3abf38469e1d14030ed41b
737
+ cc06d014fb7fc7f5d0fae8c3576c134a281ca14c
738
+ cae072ed5710b9cea48c8cb0b011dc3a9cdceacf
739
+ cc7928b6593cb03aa125a5865684da7fc0405d74
740
+ 9f71dfac529fa72a12235f016cd481b02192c3bc
741
+ 2f77ee6c43941f768b9771bca5a02332d89bf80b
742
+ cb6bd932f7a9e1c4845bd0d974f8983f2d5d6968
743
+ a3b808778e2f90b6605b09b68cd7f3eb4659477c
744
+ 7e7f7e823c05955c57123af1b61dabeaa5221825
745
+ 784321dfdb51fa207c790a3f670f0022fd575775
746
+ 69891ae41f6320ec437455913c5dd6d76a0241d0
747
+ a243d306ec4c022a4199b6e160bdeba677415fc8
748
+ 1990eae2d51f66f9b5dd3b2d2beaa17f2b95599f
749
+ 4d55ea7163e490d3f37218482269961898c62a87
750
+ 4d33676bdf0c6738fa3088d5e972ceeadd3730e9
751
+ 54aefea5f3c14105ae08f09aa60ba5f6917b1b88
752
+ d0fb72f2df7282c349193c0ea47af281034a2c32
753
+ 284f81ec4d1297d3949ed95a114f4c10011abf40
754
+ 83fec7b4d265b21ae38e07c6e1046416b7758993
755
+ f78bc049eba41b15d9e2ea28bff38e508b0e71bc
756
+ bb940fa349ab09d69edcb5f3a8fe96e55cdb69b8
757
+ 4dce40768f628700555244e91a69c5775d6caf6c
758
+ d176f57c12f30fae319ccd5b50b3096837767ed5
759
+ 1bd87d9bd116cca4f00aa031cab25897d35418bf
760
+ 684d8d97b04fdbce1a08fffb59e1e280318cdfb7
761
+ 99b1b03fc906723790db2ebd04ecc51b8ed52052
762
+ ce7ff18c9588042aaf62c8c71c69f769a16c4a7a
763
+ ee14d42f2b34f4bb5bf90d8c813934aa5d6b5e01
764
+ d7f730f7658fb4af7d492e848fb759d031726e34
765
+ caefbcb40174cb97c8361dfaa7899beb20202509
766
+ fbc427e1bc2cf82ac3756c8c7de4249b52e56505
767
+ a2e4de47a027a36757d181f61e2d3fa6dde7274a
768
+ 85b17ae766f1da36b8ed0556a932d63bec08c785
769
+ 36ebbab9aeba7a8a04ceb800b2e445a85e4b2c0a
770
+ 5826b9a1cce4a960cbf4516004b194c988312730
771
+ a06ecf2bf25af0a6b32be1d6a82ba618d9ecbb33
772
+ 8f03971de78085457c1440e3ca545ae5cbb5230a
773
+ 06588a8ab74f068ec61b89de9ca03a28f5ebd6f4
774
+ 72bd7e434c944937912039c7cf79c07bd40241f4
775
+ 14b5d1ee3b508505b96a3f403f1b6685e110c3f5
776
+ 6ce0eec1ba71291ba928d4a825e582c919a2457a
777
+ c25eaa87d7ff1d1fd503bfb7049a41bbf282e916
778
+ 0ca9829040ed3d37f3df6341e28becc8df839409
779
+ 4e516c3549d4aa6a057dadc9f9f6f9aeabfe35db
780
+ 8a63d4ed82617bb5f3da2ab351138b4690c9e03b
781
+ 0956df18b019953eac5eeaf6eca49674af37e52a
782
+ a8dedb9efe2e9bfe658503702a0602fbefcc3316
783
+ c0787c279f755fe76464ca4fbc94e24add71e3ac
784
+ b11cae312129d1e47a4102f87ad8e1f0781d34c7
785
+ 248e4202dbe0d45e76e930b614578206b3dbc383
786
+ a0dc9ceccd24357326241c97c07df17c93e77420
787
+ 5468413e75a18f8d7acb2d26c2b80bddfc9adb99
788
+ 7c81d09ca7a80c686ba8530986cb53e555eb60a9
789
+ 8df13df883dbd7e8944d8098b74ebf3aeb4b735f
790
+ e2ee2a92b8f493b2960c4e1ba2abf4f2a54c6758
791
+ c470e5a7568645a10488f402443f3701f69403a4
792
+ 23e3897002ff686867b2372767d5d8f121cc9b4a
793
+ f1f670d0fe617fb374b15bcc20110b89b6082aa4
794
+ b17028b6a57a1301be1bb2021cf51d6fe4bdd354
795
+ d3c8813d44913745f4ec4253e048af17d4cb159f
796
+ b1b3a8940587229a063dc836cb0422065ac0d292
797
+ ff7672f15b344e93c02d0d3b9676b8070a735e93
798
+ 331073170c761735eab3c9a516903016c2aad8dd
799
+ 07605b39c50fbc320453c583ee749ae4f97126d3
800
+ e710845e4cc7eb6a1d99073dfbf6f9278c24bfa8
801
+ 0cce9dd80952ff900e8704e6115f9c1bacae894a
802
+ 2ffdb3f488210d4ebbe41759618bd8c6d15878bc
803
+ 16d1cc466220c90c009bad3f09c2a085bdd47d5a
804
+ 1a71e84c0c599408ec18a189dbd779d5e20d4e21
805
+ 51de15d048a6f3b0330e8da198b2d17260ce8c85
806
+ 36ac969d234f196366b404c9c714c3b8d30ddf6a
807
+ 244d6af0cf929f993a2ed2de0ede4f57d501eade
808
+ a834e49430e3b3b1cd596dc1338a028e7166643e
809
+ 2e489b53225f71cc5b73f9aaef5c692737c0f6bf
810
+ 1d52cef8af071ed110d3ef8feb3e4b275dfddd01
811
+ ea0e27967a6c62875355c5f423e4962835c5921a
812
+ 02535aacbfed4c3ab00a0945d59933dab54f6fd1
813
+ 0cf6cbdeca67c729a260b7c1f5710b7a1e0aefa5
814
+ ca42191efe091ea06d25dceef9ebd84df8ce75e4
815
+ c5001aeae4fe17f8b7ccc1d6c604727ae63c35e7
816
+ 730b9668fda289b194a3b66a53fd3745ef42ca32
817
+ 3207c07bc7ace3a01ad233641f1df91ab37a505e
818
+ de657634cef20a388d43127a184619105d110a27
819
+ dfe850a4b3c6c002dbee134a112f16f8e1b974c5
820
+ 1b0e7715e01a62130ac573c38834b09274a7a866
821
+ 99c6160a2f6e22b5040bb47a279f81b4224fb222
822
+ ceadf8419c256716569dce2c60d98dd703bf2cb1
823
+ d8fb13c8444f71e7f309d7ccdd7ef329a47a4df3
824
+ 75805d5f1f22bc6fcdff850c88a4fcce7dc3e17f
825
+ e6bb6b913b34b30af0e19a93bca4f55b39579f88
826
+ c29d27094de54106cc903c2e0dfeb89cdcf9ae02
827
+ 41ae19f40a339b6b47fceee00f512d849df292be
828
+ 04090c2dadbd1d446a8364df894344687131f841
829
+ ba47d6e2e838b11290d702d1fc03261d27ba59d5
830
+ ebd463e2b4b89a626e16b43071b06f3145cfb661
831
+ 78141478f00ac19912fa2b283e8c91e30eb3a7c6
832
+ d8a4f3fba1b67bb6848489e45a92e9c1229ff7d2
833
+ 929b032a966f563e8401285e4d96850b17f640da
834
+ 82a3799090db99bdf611599094170b85bd4eee4e
835
+ 620fdb835eb7e095e9a34f8a165843f81fe50328
836
+ 66f89413b6f050fb903d58b36ec961461145af82
837
+ fb824fa4ce932e860604ac21db4b555c6ad1114e
838
+ d8c925f283216521073497659088f4ba707311c9
839
+ a28c2815223f89026b6a198415a1291cd67eca0a
840
+ 8201b77f669191dd01caacbea1e3b5ffbab92962
841
+ dbfd44e667bdeeb17295ab40d123ddae70d3daff
842
+ f324f1736d24f14c7685df0f2a2cc4bb20999fa4
843
+ 296f977687e8ce959a2e38129ce1c0d31e755d8d
844
+ e4888dafd50eaf43e1476701bd26bf940865d973
845
+ 17ca1470986faac5115d246d3f9b78244b7215eb
846
+ 0904469f246fecf43062b2863bc81f730a96b20e
847
+ 09b7e506802fe6fa4a12154e322dddbc34553f9c
848
+ b526d3ce8d4649e96446e1e8947b674001fe16a5
849
+ 46665024a071b4916afcae4b9ed3cec0aaeabc7b
850
+ 36fee230f41e1fc89a26b1b7bc7e884862dbf56f
851
+ 702806939cff2095b2ff97a08d84bc14d1dfc5ae
852
+ 30ea107c7831a846dfe6828947249489468f3ef7
853
+ 8d51b266df630345c667bdbc07f172b906e627af
854
+ 3691b1bffd90518b4017ccefd8c15ffaa8d87d6d
855
+ 6c81ff344b4285b42f2733cadf42536addd736d2
856
+ 6c15f6261e0d7d09ca59071955ce30d09bbe97ae
857
+ aa3acca1a17c375731214851c56020878929a068
858
+ c4cf20cc2e3665ba0b7d948683bfa1e82aa9b7e2
859
+ 9ef570f878a8c2d9460a99ca523b835535de67d5
860
+ 7d76f893313ec0b855d1dd6ce9b8fc9bc77723cb
861
+ e7ad68df97b2c9bcdf6e56cc017301f84a7f9b4a
862
+ 8f5a3860948e5dc213ed825fb4715f0ffa013ce3
863
+ 27e795d99164a2372106c9e1f118cc19258e41a2
864
+ fb438b2cad9b7583f4eda4fbe6fe9e9cd1f59f10
865
+ 6c6e755b03472223c69700bb166d81d9adf080b1
866
+ 8cfbb990201cb91fff3db779885041d2b5c52c1e
867
+ eef5130bd17ede5cacf8be5881eab0c09a538bda
868
+ f20d8a304a9009a79a54867664bce33473947272
869
+ aef5dcb164dba680b436bbb37faeeccbbc4fe2b4
870
+ b412608a7f30af28fb8615e4b522b7dcecabe212
871
+ 799415d8ea5094bd6cca8c178d6d8531827da191
872
+ ef5ba9f7f4c954dc6208e9a47fdaa730602fa27c
873
+ 6204780ab854a5443a52c343534637fc227dd70b
874
+ 58733bf4d2489d1823a432b2f515f22fa835a88b
875
+ a8f30d02868c8ffc924271d9da99e0c180477a1c
876
+ c8baff658f6506e04d7f530d9b266ba2d4b632b7
877
+ 87a31d871a336bc60987492515a20ef25d18d0d4
878
+ e2929ddb475b033444f85c3cf7e5ca38e84ed7e6
879
+ c7fb0295ad6226798e65332c841f6a1508eb9efe
880
+ db80674c14610f0b964fb574ac32c6984cd226f5
881
+ ebed652b9c7ae1784ab032b2023445e8b8cbaa41
882
+ 08ac1c4c2c7589f889b2bce3687a724d0c636c40
883
+ dae2a0e1c908135eebc98a0db33ca435ebe7ad5a
884
+ 3d415472346209c9e90706dfd313728a0ea15003
885
+ df08670661d8887644542806a8d69046e3ba87ab
886
+ 32a96ae444a08d6ae828b34539aec76a835a95e4
887
+ 4a0465f87d082b8e9a22608da161f232e8d6f464
888
+ c7f47a4f22bcd6f11e6ee97e9687b5e917d9e495
889
+ 9b4dec196b29bcc98a377d6f433638a85177e0c9
890
+ c0f1425ba0cdac23bc342587ce6ea6cb53515c55
891
+ ca6f373a6c76d4a4284240fe5e88c130bd56d27c
892
+ 785966c05fb5fe10addeca3a86f1857329957fb0
893
+ fd71a64340425384294a115d3a42bc8069ac9f67
894
+ 08784cd3a744ca0750c746910124a6056d46f608
895
+ ee9284abb97ecdc3ed78a4807303124652924051
896
+ c9cfae108e2aaea3b12010dfd0404f7bbffa5c2a
897
+ d7a63c81f8bed7df99b942d88a380c100e74accd
898
+ 23fc6eff1bbf238513e2f9c76e40762f01b1737d
899
+ 485c7afff53fcb4f694a5b3cfdc09c372cf73e18
900
+ 8656b25529d3e5aabde19eb42d10aec5d8af2088
901
+ fe954e108708531e155eadf4945fff5e432c57b3
902
+ 0f5fe6ee00187bde832acb092e42431f9fa8430c
903
+ 8827ce43536f7246e21f643fdcc1b1ad44c05a12
904
+ 869e1a290cb6ce44eada26c00d5abee0e5c2ecd5
905
+ a2215dc789a33e1ab3be1dfcc03e8f7f02d046d5
906
+ 62a233d2e55b159001ed622fb96b9444fce9c11d
907
+ 26fff6559df5149b98c3366e7c01236daaf2b1d1
908
+ 115e024f021871b307a7a315aef720bbffe1d54c
909
+ 19719df575d3ae0d8c93c037f7f1972b9e10f1ba
910
+ 3628f33e8ef1350912bab8d4ae467c7e1f3056fd
911
+ e123e08e23278c95e399b3b11da411325135da21
912
+ 77b3598df08e6f3a2b4ae157904e30d5aa2ad49a
913
+ e071ff877d67787d0a6582ac3dcbcb627dec9ac4
914
+ 722a05a34115832ebdfa990a99bd999d097a0ce5
915
+ 3dcebec3361c047d19cf639879437ed5b769e7f2
916
+ 8f37fd4e1147e623fe6f8cc6d190c304467d999e
917
+ 5339c690ad044e082f8a31bfe929099d7e75531d
918
+ 82fd9658604cefb93728b198e73889872ce7d70a
919
+ 804983224e3f5cccbd52b26bebc53b88369c448c
920
+ 562f9fdd5811793c11970c856d21c7f0c32118b8
921
+ 5914ca61115649643f88ae110eaf3da4b112e6e4
922
+ f44b0a2d303b725a7f5c82048d7423858e78e490
923
+ de5e6430a7c1166ca82500ab7fb82cb95cc643c1
924
+ c174eab1c3615c3ba5dbc0c6c30ac67ab6b47024
925
+ aae0341ca8ab04c9c169f4dde3e2e943d758422a
926
+ 517dc966b1379d84a9ef741ff9ca43e281868c60
927
+ 436fd9441cf9517b6e8b5162db78031228b18d9a
928
+ cd7e1db2eb4709309b43cc400d6619aff480484b
929
+ 5c100f2e25d49a90b25685b9d3bb17a35e325374
930
+ 362892feaf8dbf44a0429d3676f9b5e4ea6a46a2
931
+ ca8c5f96adddb61025107907704ec344143b0088
932
+ 8c97077f3dc6794837f887a8d57bc8d3c05e8b4b
933
+ 04ba3c53b4068a8bcf31bbfc674d520ab2843a2d
934
+ c91c677ceb1093b393d46dd21252147c3ddecd1f
935
+ d85824d0d1dbc389c30ba584837d82e85c5bcd37
936
+ c0f7e29dfb195770d68e6ee608c7129e72a89e23
937
+ e55510a4c7ea27d0e47137479fcb16562f8d380f
938
+ 8845f1c8a8b45987a6fe69bcd89060ba38475d2d
939
+ 8ebf95f844971fbacb819e2e05fea4e27402a34c
940
+ 5be6327602aabcb3fafaf439f69ebc621601d30a
941
+ 698560f44a2c58c87988498dcbe51e30ea62c989
942
+ 29f215bf015e848c5af9a9c70e1e3e052016704f
943
+ 6211582a40d5a1d67e930e337ea11f1b3538ef5e
944
+ 80cb64b8ae5710be8044127b678bbc0e010e79a2
945
+ 7e613e66f3b7da299b8f4689cfa31da7bb381e31
946
+ 128131f6fd6e7bb018806ed5f150a207ae8c7e69
947
+ f686c1c3a2fce19f177aafc281d6c724977a6dfe
948
+ 56f58b9bd5e4a5c6aec7f2c5a4a04a702fc3f2dc
949
+ e63e6ef318e5cc205518f7fc052da7020742f55a
950
+ c19c8562df56700121a61f5cdbc8525a46197e1f
951
+ 7ffa78b92966e11b0142829ae17c871b9f6b5c15
952
+ 426952f1145f112142141f26556313924ce7465a
953
+ f975e857f57f0f6d96ed50006a7b4e126edf1f1a
954
+ 8a6220895e1d634d0aa0f41ce6882c98d7b495d0
955
+ 12da0f4b955b911a893158bd3beb9b24f1a0043e
956
+ ff8441521f15f11db3c60850a1ee551b81661fef
957
+ 0b88599d7b1e25e59f2da8338520ec3325de9337
958
+ 0fda61a11326021d7ff0071b6bd8b2b3517100c8
959
+ acdaf288f8a96f77e2c34104fadf26c04307f5fc
960
+ 16d04e701ed59f32ea3c4226b553b6f0f50c7426
961
+ 7b759405d39047b5aa0f0c22d91c3d254fbaeba1
962
+ facb5a7732d083c66484c9b3dbb274ff1d6a1ee1
963
+ f959116e0606392633e8d8eaeb710664e4532c6c
964
+ febbd51aa5181f74d56f3d0e01d38e264444f825
965
+ 90ffbc94fcd43cdbd2e54f5cad75d2a7d659bdd8
966
+ 61cfcbd1b8ef945165acef5e7145762bb510453d
967
+ 5477b6eb53ccc404db0ac820d3d858052bdebbe2
968
+ 4c6156e3830087141b0014005bf955f1a87e1edc
969
+ 12dc55dc446574144eb863292c3565736ce0bfc3
970
+ a761ce0dc6d89ad3170a3b69e3d2c71bfd014b8e
971
+ 8cf55dd9b1bd7a4c8350c81e469d92ec956af62a
972
+ 8671360c5d830f38316ccc4f63362ded7a2d20a6
973
+ 97f1a15d8196c514517e76f1d80571fa769e28b3
974
+ 85b2c0a31be506ef27e0ca124be5a89c628de120
975
+ 935dfa6867b975280d63f75cdef228372adc40ef
976
+ 63367984bfa6dcb0ae560d7bab812c622481920c
977
+ ec10a4353082865abdb5697560390653c3902065
978
+ b7974f532d25aa2eda5e16e5dc58d3f726373c03
979
+ f804d65a6009874a0c4d555b6e9d8d14cbf935ef
980
+ cf251f22dbe2c976d340eaa8469e1ea21ff88a42
981
+ 6998dad2b81635da9c328ef829a8b1125393c38b
982
+ 2a073e0f2e510318e83c16ad9141f5c1a31cf6a2
983
+ eb6f4e52212ccb96b2aa08e0af5949dc8c67a024
984
+ 09b9f830b520e68635c45a604641874e0f2bfeb0
985
+ 17a33c4bc2856e52acf16f3f86dd7053e340ffc5
986
+ 81f4e9eee7d046992f4091cd2d82a6a82981b354
987
+ 5a6746c9041d494e8f794e4ecfb6a7c941f5ccce
988
+ 5249fba450a5865325c2b47ce5fac5a585b2ca23
989
+ e35df1cddab6e311e0b4f0b732c555c51e8a739d
990
+ 8f95ac3d57280ec506907f000e60b9bcb065b4bf
991
+ 2750ae3dac18bcf9eecdf9127e5aedaeac19a67e
992
+ dc4d88520f9221eea943cdc54bd89e21e52677ca
993
+ bdfc42f3dce77e9e964ba2922c19faba2ca563ee
994
+ c3b349b83e4fa2389ee59ea9ca036001b358ca02
995
+ 3c992e03d64ea763d4b6db96e3371143294172b8
996
+ f40f581bb9a644dc31feeea1bdc3dd6bbc42ccca
997
+ d59c8256b9451b83457299244fa9f81d0369081f
998
+ b015c20c7868a98a3cee9878553502c708fd96a0
999
+ b6e30268a7f110d767dac9144454d2c6fe49eb34
1000
+ dbfc2a5e7753d96913593c41db73a32dac062ff8
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/configs/bf16.conf ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # The format of this config file is 'key = value'.
2
+ # The key has the format 'model.scenario.key'. Value is mostly int64_t.
3
+ # Model maybe '*' as wildcard. In that case the value applies to all models.
4
+ # All times are in milli seconds
5
+
6
+ *.Server.target_qps = 28
7
+ *.Server.target_latency = 20000
8
+ *.Server.min_query_count = 49152
9
+ *.Offline.min_query_count = 98304
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/configs/fp8-99.9.conf ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # The format of this config file is 'key = value'.
2
+ # The key has the format 'model.scenario.key'. Value is mostly int64_t.
3
+ # Model maybe '*' as wildcard. In that case the value applies to all models.
4
+ # All times are in milli seconds
5
+
6
+ *.Server.target_qps = 77.7
7
+ *.Server.target_latency = 20000
8
+ *.Server.min_query_count = 98304
9
+ *.Offline.min_query_count = 786432
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/configs/fp8-99.conf ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # The format of this config file is 'key = value'.
2
+ # The key has the format 'model.scenario.key'. Value is mostly int64_t.
3
+ # Model maybe '*' as wildcard. In that case the value applies to all models.
4
+ # All times are in milli seconds
5
+
6
+ *.Server.target_qps = 77.7
7
+ *.Server.target_latency = 20000
8
+ *.Server.min_query_count = 98304
9
+ *.Offline.min_query_count = 786432
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/dataset.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, BatchEncoding
2
+ from torch.nn.functional import pad
3
+
4
+ import utils
5
+ import torch
6
+
7
+ PROMPT_DICT = {
8
+ "prompt_input": (
9
+ "Below is an instruction that describes a task, paired with an input that provides further context. "
10
+ "Write a response that appropriately completes the request.\n\n"
11
+ "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:"
12
+ ),
13
+ "prompt_no_input": (
14
+ "Below is an instruction that describes a task. "
15
+ "Write a response that appropriately completes the request.\n\n"
16
+ "### Instruction:\n{instruction}\n\n### Response:"
17
+ ),
18
+ }
19
+
20
+
21
+ class Dataset():
22
+ def __init__(self, model_path, dataset_path, total_count_override=None, perf_count_override=None, add_padding=True, fake_data=False):
23
+ print("Constructing QSL")
24
+
25
+ self.model_path = model_path
26
+ self.dataset_path = dataset_path
27
+ self.add_padding = add_padding
28
+ self.fake_data = fake_data
29
+
30
+ self.tokenizer = AutoTokenizer.from_pretrained(
31
+ self.model_path,
32
+ model_max_length=2048,
33
+ padding_side="left",
34
+ use_fast=True,)
35
+ self.tokenizer.pad_token = self.tokenizer.eos_token
36
+
37
+ self.list_data_dict = utils.jload(self.dataset_path)
38
+
39
+ prompt_input, prompt_no_input = PROMPT_DICT["prompt_input"], PROMPT_DICT["prompt_no_input"]
40
+ self.sources = [prompt_input.format_map(
41
+ example) for example in self.list_data_dict]
42
+ self.targets = [
43
+ f"{example['output']}" for example in self.list_data_dict]
44
+
45
+ self.source_encoded_input_ids, self.source_encoded_attn_masks = self.encode_samples()
46
+
47
+ self.count = total_count_override or len(self.sources)
48
+ self.perf_count = perf_count_override or self.count
49
+
50
+ def encode_samples(self):
51
+ def pad_tensor(tensor, value=0):
52
+ max_length = 1919
53
+ return pad(tensor, (max_length - tensor.shape[-1], 0), value=value)
54
+
55
+ print("Encoding Samples")
56
+
57
+ max_length = 1919
58
+ min_length = 30
59
+ total_samples = len(self.sources)
60
+
61
+ source_encoded_input_ids = []
62
+ source_encoded_attn_masks = []
63
+
64
+ for i in range(total_samples):
65
+ if not self.fake_data:
66
+ source_encoded = self.tokenizer(self.sources[i], return_tensors="pt",
67
+ padding=True, truncation=True,
68
+ max_length=max_length)
69
+ else:
70
+ # Hack to generate a deterministic semi-random sequence without using random.*
71
+ length = min_length + len(self.sources[i]) % (max_length - min_length)
72
+ source_encoded = BatchEncoding({
73
+ 'input_ids': torch.ones((1, length), dtype=torch.int64),
74
+ 'attention_mask': torch.ones((1, length), dtype=torch.int64)})
75
+ if self.add_padding:
76
+ source_encoded.input_ids = pad_tensor(source_encoded.input_ids, self.tokenizer.pad_token_id)
77
+ source_encoded.attention_mask = pad_tensor(source_encoded.attention_mask)
78
+ source_encoded_input_ids.append(source_encoded.input_ids)
79
+ source_encoded_attn_masks.append(source_encoded.attention_mask)
80
+
81
+ return source_encoded_input_ids, source_encoded_attn_masks
82
+
83
+ def LoadSamplesToRam(self, sample_list):
84
+ pass
85
+
86
+ def UnloadSamplesFromRam(self, sample_list):
87
+ pass
88
+
89
+ def __del__(self):
90
+ print("Finished destroying QSL.")
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/habana_generation_utils.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ ###############################################################################
3
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
4
+ ###############################################################################
5
+ import time
6
+ import os
7
+ import glob
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from enum import Enum
11
+ import habana_frameworks.torch.core as htcore
12
+
13
+ from collections import UserDict
14
+
15
+
16
+ def boolean(string):
17
+ char = string.lower()[0]
18
+ assert char == 't' or char == 'f', f"Invalid value: {string} - it should start with either 't' or 'f'"
19
+ return char == 't'
20
+
21
+
22
+ def flip(dictionary):
23
+ return {v: k for k, v in dictionary.items()}
24
+
25
+
26
+ def unwrap_ds(model):
27
+ if hasattr(model, 'module'):
28
+ return model.module
29
+ return model
30
+
31
+
32
+ def defined(v):
33
+ return v is not None
34
+
35
+
36
+ class Option:
37
+ def __init__(self, opt_type, default=None, help=None, is_custom=False):
38
+ self.opt_type = opt_type
39
+ self.default = default
40
+ self.is_custom = is_custom
41
+ self.help = help
42
+
43
+ def describe(self, name):
44
+ type_str = FLIPPED_SUPPORTED_TYPES[self.opt_type]
45
+ default_str = f'={self.default}' if defined(self.default) else ''
46
+ custom_str = ' [custom]' if self.is_custom else ''
47
+ help_str = f'\n\t{self.help}' if self.help else ''
48
+ return f'{name}:{type_str}{default_str}{custom_str}{help_str}'
49
+
50
+
51
+ class CustomOption(Option):
52
+ def __init__(self, opt_type, **kwargs):
53
+ super().__init__(opt_type, **kwargs, is_custom=True)
54
+
55
+
56
+ SUPPORTED_TYPES = {
57
+ 'int': int,
58
+ 'bool': boolean,
59
+ 'float': float,
60
+ }
61
+ FLIPPED_SUPPORTED_TYPES = flip(SUPPORTED_TYPES)
62
+
63
+ OPTIONS = {
64
+ # HF options
65
+ 'max_length': Option(int, default=128, help='Maximum input + output length. Overriden by max_new_tokens.'),
66
+ 'max_new_tokens': Option(int, help='Maximum number of tokens to generate.'),
67
+ 'min_length': Option(int, help='Minimum input + output length. Overriden by min_new_tokens.'),
68
+ 'min_new_tokens': Option(int, help='Minimum number of tokens to generate.'),
69
+
70
+ 'num_beams': Option(int, default=1, help='Number of beams. When num_beams=1 greedy_search is used, otherwise beam_search.'),
71
+ 'early_stopping': Option(boolean, default=False, help='Exit beam-search when N hypothesis are found'),
72
+ 'early_stopping_delay': Option(int, default=1, help='Determines how many iterations to schedule before checking for early exit condition'),
73
+ 'do_sample': Option(boolean, default=False, help='Enable sampling. Affects both greedy_search and beam_search.'),
74
+ 'temperature': Option(float, help='Value > 1.0 increase sampling randomness. Value < 1.0 makes tokens with best score more likely to be selected.'),
75
+ 'top_k': Option(int, help='Limit sampling to top_k best tokens at each step.'),
76
+ 'top_p': Option(float, help='Limit sampling to a minimal set of tokens S such as P(S) >= top_p.'),
77
+ 'repetition_penalty': Option(float, help='Penalize repeating tokens. Value > 1 makes tokens that have already appeared less likely.'),
78
+ 'no_repeat_ngram_size': Option(int, help='Forbid ngrams that have already appeared from reappearing.'),
79
+ 'length_penalty': Option(float, default=1.0, help='Applied as exponent to beam length. Value > 1.0 encourages longer sequences (because of log used in scoring). Value < 0.0 encourages shorter sequences. Beam-search only.'),
80
+ 'use_cache': Option(boolean, default=True, help='Run with KV-cache enabled.'),
81
+
82
+ # Generic HPU options
83
+ 'use_graphs': CustomOption(boolean, default=True, help='Use HPU graphs if possible.'),
84
+ 'ignore_eos': CustomOption(boolean, default=True, help='Run greedy_search for full max_length to avoid device<>CPU synchronization.'),
85
+ 'max_iterations': CustomOption(int, help='Limit number of iterations. Useful for profiling and debugging.'),
86
+
87
+ # Model specific HPU options
88
+ 'static_shapes': CustomOption(boolean, help='Run with static shapes to avoid graph recompilations.'),
89
+ 'bucket_width': CustomOption(int, help='Pad shapes to a multiple of bucket width when static_shapes are used.'),
90
+ 'max_input_length': CustomOption(int, help='Maximum length of input when static_shapes are used.'),
91
+ 'trim_logits': CustomOption(boolean, help='Calculate logits only for the last token in the initial run of the model.'),
92
+ 'limit_graphs': CustomOption(boolean, help='Use hpu graphs only for iterations > 0.'),
93
+ 'reuse_cache': CustomOption(boolean, help='Reuse kv-cache memory between prompts.'),
94
+ 'kv_cache_fp8': CustomOption(boolean, default=False, help='store kv-cache in float8 when kv-cache is used'),
95
+
96
+ 'use_position_ids': CustomOption(boolean, default=True, help='Use position ids in GPT-J'),
97
+ 'kv_cache_margin': CustomOption(int, help='Update only last K entries in KV-cache. Requires reuse_cache.'),
98
+ }
99
+
100
+ MIN_INF = float('-inf')
101
+
102
+
103
+ def custom_options():
104
+ return [k for k, v in OPTIONS.items() if v.is_custom]
105
+
106
+
107
+ def generate_option_help():
108
+ result = 'Options need to be specified in the form of KV1,KV2,[...] where each KV is either KEY_N=VALUE_N or KEY_N:TYPE_N=VALUE_N. '
109
+ result += '\nKnown options:'
110
+ for name, op in OPTIONS.items():
111
+ result = result + '\n ' + op.describe(name)
112
+ result += '\nOptions that are not listed above but are supported by HF API can be passed by explicitly specifing their type. For example: penalty_alpha:float=0.5 . Note: this is only supported in "vanilla" and "compatibility" generation modes.'
113
+ result += '\nOptions marked as "custom" are only used when running in "optimized" generation mode.'
114
+ return result
115
+
116
+
117
+ def parse_key_type_value(ktv):
118
+ if '=' in ktv:
119
+ # Full key/type/value
120
+ # key[:type]=value
121
+ kt, value = ktv.split('=')
122
+ kt = kt.split(':')
123
+ name = kt[0]
124
+ if len(kt) > 1:
125
+ opt_type = kt[1]
126
+ assert opt_type in SUPPORTED_TYPES, f'Unsupported type: {opt_type}. Supported types: {list(SUPPORTED_TYPES.keys())}'
127
+ opt_type = SUPPORTED_TYPES[opt_type]
128
+ else:
129
+ assert name in OPTIONS, f'Cannot deduce type! Unknown option:{name}! Please specify type or use one of the following options: {list(OPTIONS.keys())}'
130
+ opt_type = OPTIONS[name].opt_type
131
+ return (name, opt_type(value))
132
+ else:
133
+ # Boolean shorthand
134
+ # [!]key
135
+ if ktv.startswith('!'):
136
+ return (ktv[1:], False)
137
+ else:
138
+ return (ktv, True)
139
+
140
+
141
+ def parse_options(string, default_values={}):
142
+ if string is None:
143
+ return GenerationOptions(default_values)
144
+ kvs = [parse_key_type_value(ktv) for ktv in string.split(',')]
145
+ return GenerationOptions(default_values=default_values, **dict(kvs))
146
+
147
+
148
+ class GenerationOptions(dict):
149
+ def __init__(self, default_values={}, **args):
150
+ super().__init__(self, **args)
151
+ self.set_defaults(default_values)
152
+
153
+ def filter(self, *keywords):
154
+ result = GenerationOptions(**self)
155
+ for k in keywords:
156
+ result.pop(k, None)
157
+ return result
158
+
159
+ def set_defaults(self, default_values):
160
+ for k, v in default_values.items():
161
+ if k not in self:
162
+ self[k] = v
163
+ for k, v in OPTIONS.items():
164
+ if defined(v.default) and k not in self:
165
+ self[k] = v.default
166
+
167
+ def __getattr__(self, key):
168
+ if key in self.keys():
169
+ return self[key]
170
+ return None
171
+
172
+ def set(self, key, value):
173
+ self[key] = value
174
+
175
+ def print(self):
176
+ print("Generation options:")
177
+ for k, v in sorted(self.items()):
178
+ print(' ', f'{k}={v}')
179
+
180
+
181
+ def fast_topk(tensor, k, dim):
182
+ min_inf = torch.tensor(MIN_INF, dtype=tensor.dtype, device=tensor.device)
183
+ best = []
184
+ for i in range(k):
185
+ value, index = torch.max(tensor, dim=dim)
186
+ best.append((value.unsqueeze(-1), index.unsqueeze(-1)))
187
+ if (i + 1 < k):
188
+ tensor.scatter_(dim, index.unsqueeze(-1), min_inf.unsqueeze(0).expand(tensor.size(0), 1))
189
+ best_value, best_index = zip(*best)
190
+ best_value = torch.cat([b for b in best_value], dim=-1)
191
+ best_index = torch.cat([b for b in best_index], dim=-1)
192
+ return best_value, best_index
193
+
194
+
195
+ if os.environ.get('TOPK_ALGORITHM', 'FAST') == 'NATIVE':
196
+ TOPK_IMPL = torch.topk
197
+ else:
198
+ TOPK_IMPL = fast_topk
199
+
200
+
201
+ class SelectionBeam():
202
+ def __init__(self, batch_size, beam_size):
203
+ self.batch_size = batch_size
204
+ self.beam_size = beam_size
205
+
206
+ def __call__(self, logits, eos_token_id):
207
+ eos_logits = logits[:, eos_token_id].clone()
208
+ logits[:, eos_token_id] = MIN_INF
209
+ logits = logits.view(self.batch_size, -1)
210
+ topk = TOPK_IMPL(logits, k=self.beam_size, dim=-1)
211
+ return (*topk, eos_logits)
212
+
213
+
214
+ def get_device(model):
215
+ if hasattr(model, 'device'):
216
+ return model.device
217
+ if hasattr(model, 'module'):
218
+ return model.module.device
219
+ assert False, 'Cannot extract device!'
220
+ return None
221
+
222
+
223
+ def is_on_hpu(obj):
224
+ return str(get_device(obj)).startswith('hpu')
225
+
226
+
227
+ @torch.no_grad()
228
+ def generate_on_prepared_input(model,
229
+ options,
230
+ model_inputs,
231
+ max_length,
232
+ input_length):
233
+ if options.use_cache and options.reuse_cache:
234
+ model_inputs['reuse_cache'] = True
235
+ bs, _ = model_inputs['input_ids'].shape
236
+ unwrap_ds(model).allocate_kv_cache(bs * options.num_beams, max_length, options.kv_cache_fp8)
237
+
238
+ device = get_device(model)
239
+ model_inputs = move(model_inputs, device)
240
+
241
+ initial_ids = model_inputs['input_ids']
242
+ bs = initial_ids.shape[0]
243
+ selection_algorithm = SelectionBeam(bs, options.num_beams)
244
+ beam_trace = beam_search(model, options, selection_algorithm, max_length, input_length, model_inputs)
245
+ return initial_ids.cpu(), move(beam_trace, 'cpu')
246
+
247
+
248
+ def calculate_input_padding(input_length, options):
249
+ if not options.static_shapes:
250
+ return 0
251
+ if defined(options.bucket_width):
252
+ return round_up(input_length, options.bucket_width) - input_length
253
+ if defined(options.max_input_length):
254
+ return options.max_input_length - input_length
255
+ assert False, "Running with static_shapes requires setting either 'bucket_width' or 'max_input_length'"
256
+
257
+
258
+ def calculate_max_length(input_length, options):
259
+ if defined(options.max_new_tokens) and defined(options.bucket_width):
260
+ return round_up(input_length + options.max_new_tokens, options.bucket_width)
261
+ if defined(options.max_new_tokens) and defined(options.max_input_length):
262
+ return options.max_input_length + options.max_new_tokens
263
+ if defined(options.max_input_length):
264
+ assert options.max_length >= options.max_input_length, \
265
+ f"max_input_length={options.max_input_length} is bigger then max_length={options.max_length}! Either increase max_length or specify max_new_tokens."
266
+ return options.max_length
267
+
268
+
269
+ def prepare_decoder_only_input_without_moving(pad_token_id, options, model_args):
270
+ input_ids = model_args['input_ids']
271
+ attention_mask = model_args['attention_mask']
272
+
273
+ input_ids = input_ids.to(torch.int32)
274
+ attention_mask = attention_mask.to(torch.bfloat16)
275
+
276
+ input_length = input_ids.shape[-1]
277
+ input_padding = calculate_input_padding(input_length, options)
278
+ max_length = calculate_max_length(input_length, options)
279
+
280
+ if options.static_shapes:
281
+ model_args['token_idx'] = torch.tensor(input_length)
282
+ if input_padding > 0:
283
+ input_ids = F.pad(input_ids, (0, input_padding), value=pad_token_id)
284
+ attention_mask = F.pad(attention_mask, (0, input_padding), value=0)
285
+
286
+ position_ids = attention_mask.int().cumsum(-1) - 1
287
+ start_end = torch.full((input_ids.shape[0], 2), input_length, dtype=torch.int32)
288
+ start_end[:, 0] -= position_ids[:, -1].to(torch.int32)
289
+
290
+ attention_mask = (1.0 - attention_mask) * torch.finfo(attention_mask.dtype).min
291
+ attention_mask = attention_mask.unsqueeze(1)
292
+
293
+ model_args['input_ids'] = input_ids
294
+ model_args['attention_mask'] = attention_mask
295
+ model_args['position_ids'] = position_ids
296
+ model_args['start_end'] = start_end
297
+ model_args['use_cache'] = options.use_cache
298
+ if options.trim_logits:
299
+ model_args['trim_logits'] = True
300
+
301
+ return model_args, max_length, input_length
302
+
303
+
304
+ def round_up(n, multiple):
305
+ return (n + multiple - 1) // multiple * multiple
306
+
307
+
308
+ def calc_iterations(input_length, max_length, options):
309
+ if defined(options.max_new_tokens):
310
+ iterations = options.max_new_tokens
311
+ else:
312
+ iterations = max_length - input_length
313
+ if defined(options.max_iterations):
314
+ iterations = min(iterations, options.max_iterations)
315
+ return range(max(iterations, 0))
316
+
317
+
318
+ @torch.no_grad()
319
+ def beam_search(model,
320
+ options,
321
+ selection_algorithm,
322
+ max_length,
323
+ input_length,
324
+ model_input):
325
+
326
+ if model.config.is_encoder_decoder:
327
+ input_ids_key = 'decoder_input_ids'
328
+ attention_mask_key = 'decoder_attention_mask'
329
+ else:
330
+ input_ids_key = 'input_ids'
331
+ attention_mask_key = 'attention_mask'
332
+ past_key = 'past_key_values'
333
+
334
+ input_ids = model_input[input_ids_key]
335
+ attention_mask = model_input[attention_mask_key]
336
+
337
+ token_idx = model_input.get('token_idx', None)
338
+ position_ids = model_input.pop('position_ids')
339
+
340
+ MIN_LENGTH = 30
341
+ MAX_LENGTH = 128
342
+ bs = input_ids.shape[0]
343
+ beam_scores = torch.zeros((bs,), device=input_ids.device, dtype=torch.float32)
344
+ beam_trace_scores = torch.zeros((MAX_LENGTH, bs * options.num_beams), device=input_ids.device, dtype=torch.float32)
345
+ beam_trace_indices = torch.zeros((MAX_LENGTH, bs * options.num_beams), device=input_ids.device, dtype=torch.int32)
346
+ beam_trace_tokens = torch.zeros((MAX_LENGTH, bs * options.num_beams), device=input_ids.device, dtype=torch.int32)
347
+ beam_trace_eos = torch.zeros((MAX_LENGTH, bs * options.num_beams), device=input_ids.device, dtype=torch.float32)
348
+ beam_trace_idx = torch.tensor(0, device=input_ids.device)
349
+
350
+ total_eos_tokens = torch.zeros((1), device=input_ids.device, dtype=torch.int32).repeat(bs)
351
+ max_eos_tokens = torch.tensor(options.num_beams, device=input_ids.device, dtype=torch.int32).repeat(bs)
352
+
353
+ model_input['kv_cache_shape'] = (bs * options.num_beams, input_ids.shape[-1])
354
+
355
+ if options.early_stopping:
356
+ checks = [None] * options.early_stopping_delay
357
+
358
+ start = torch.full([bs], input_length, dtype=torch.int32, device=input_ids.device)
359
+ end = torch.full([bs], input_length, dtype=torch.int32, device=input_ids.device)
360
+ mul = torch.tensor([[64, 16, 4, 1]], dtype=torch.int32, device=input_ids.device)
361
+
362
+ htcore.mark_step()
363
+
364
+ for i in calc_iterations(input_length, max_length, options):
365
+ first_step = (i == 0)
366
+
367
+ embed_positions = model.transformer.embed_positions.repeat(position_ids.shape[0], 1, 1)
368
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
369
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
370
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
371
+ output_size = 2 * sin.shape[2]
372
+
373
+ model_input['sin'] = torch.repeat_interleave(sin, 2, dim=2, output_size=output_size).unsqueeze(2)
374
+ model_input['cos'] = torch.repeat_interleave(cos, 2, dim=2, output_size=output_size).unsqueeze(2)
375
+
376
+ model_output = model(**model_input)
377
+
378
+ logits = model_output['logits']
379
+ if token_idx is None or logits.shape[-2] == 1:
380
+ next_token_logits = logits[:, -1, :].unsqueeze(-2)
381
+ else:
382
+ next_token_logits = logits.index_select(-2, token_idx - 1)
383
+
384
+ next_token_logits = next_token_logits.squeeze(-2)
385
+ vocab_size = next_token_logits.shape[-1]
386
+
387
+ if i < MIN_LENGTH:
388
+ next_token_logits[:, model.config.eos_token_id] = MIN_INF
389
+
390
+ next_token_logits = F.log_softmax(next_token_logits, dim=-1, dtype=torch.float32) + beam_scores.unsqueeze(-1)
391
+ next_token_values, next_token_indices, eos_scores = selection_algorithm(next_token_logits, model.config.eos_token_id)
392
+ beam_scores = next_token_values.flatten()
393
+ beam_indices = next_token_indices.div(vocab_size, rounding_mode='floor').flatten().to(torch.int32)
394
+ beam_tokens = next_token_indices.remainder(vocab_size).flatten().to(torch.int32)
395
+
396
+ if first_step:
397
+ model_input[past_key] = unwrap_ds(model).reorder_kv_cache_first_token(model_input['kv_cache_shape'])
398
+ else:
399
+ indices = beam_indices.view(bs, options.num_beams)
400
+ indices = torch.sum(indices * mul, axis=-1).to(torch.uint8)
401
+ end.add_(1)
402
+ model_input[past_key] = unwrap_ds(model).reorder_kv_cache_next_token(start, end, indices, model_input['kv_cache_shape'])
403
+
404
+ if options.early_stopping and i >= MIN_LENGTH:
405
+ bs_beam_scores = beam_scores.reshape((bs, -1))
406
+ bs_eos_scores = eos_scores.reshape((bs, -1))
407
+ scores = torch.cat([bs_beam_scores, bs_eos_scores], dim=-1)
408
+ best_indices = torch.topk(scores, options.num_beams)[1]
409
+ eos_tokens = (best_indices >= options.num_beams).sum(dim=-1, dtype=torch.int32)
410
+ total_eos_tokens.add_(eos_tokens)
411
+ is_finished = (total_eos_tokens >= max_eos_tokens)
412
+ end = torch.logical_not(is_finished).to(torch.int32) * end
413
+ cur_check_idx = i % options.early_stopping_delay
414
+ checks[cur_check_idx] = is_finished.all()
415
+
416
+ if first_step:
417
+ eos_scores = eos_scores.repeat_interleave(options.num_beams, dim=0, output_size=options.num_beams * bs)
418
+ beam_trace_scores.index_copy_(0, beam_trace_idx, beam_scores.unsqueeze(0))
419
+ beam_trace_indices.index_copy_(0, beam_trace_idx, beam_indices.unsqueeze(0))
420
+ beam_trace_tokens.index_copy_(0, beam_trace_idx, beam_tokens.unsqueeze(0))
421
+ beam_trace_eos.index_copy_(0, beam_trace_idx, eos_scores.unsqueeze(0))
422
+ beam_trace_idx.add_(1)
423
+
424
+ if first_step:
425
+ attention_mask = torch.repeat_interleave(
426
+ attention_mask, options.num_beams, dim=0, output_size=options.num_beams * bs
427
+ )
428
+ attention_mask.index_fill_(2, token_idx, 0)
429
+
430
+ next_tokens = beam_tokens.unsqueeze(-1)
431
+
432
+ token_idx.add_(1)
433
+
434
+ model_input[input_ids_key] = next_tokens
435
+ model_input[attention_mask_key] = attention_mask
436
+
437
+ if first_step:
438
+ model_input["start_end"] = None
439
+
440
+ if first_step:
441
+ position_ids = position_ids[:, -1].unsqueeze(-1)
442
+ position_ids = position_ids.repeat_interleave(options.num_beams, dim=0, output_size=options.num_beams * bs)
443
+ else:
444
+ position_ids.add_(1)
445
+
446
+ if options.early_stopping and i >= MIN_LENGTH:
447
+ next_check_idx = (i + 1) % options.early_stopping_delay
448
+ all_done = checks[next_check_idx]
449
+ if all_done is not None and all_done.cpu().item():
450
+ break
451
+
452
+ return (beam_trace_idx, beam_trace_scores, beam_trace_indices, beam_trace_tokens, beam_trace_eos)
453
+
454
+
455
+ def finalize_beams(initial_ids, beam_trace, model_config, length_penalty):
456
+ beam_trace_idx, beam_trace_scores, beam_trace_indices, beam_trace_tokens, beam_trace_eos = beam_trace
457
+
458
+ bs = initial_ids.shape[0]
459
+ num_beams = beam_trace_scores.shape[1] // bs
460
+
461
+ beam_trace_idx = beam_trace_idx.item()
462
+ beam_trace_scores = beam_trace_scores[:beam_trace_idx, :].reshape(beam_trace_idx, bs, -1)
463
+ beam_trace_indices = beam_trace_indices[:beam_trace_idx, :].reshape(beam_trace_idx, bs, -1)
464
+ beam_trace_tokens = beam_trace_tokens[:beam_trace_idx, :].reshape(beam_trace_idx, bs, -1)
465
+ beam_trace_eos = beam_trace_eos[:beam_trace_idx, :].reshape(beam_trace_idx, bs, -1)
466
+
467
+ input_lengths = torch.tensor(initial_ids.size(-1)) - torch.eq(initial_ids, model_config.eos_token_id).sum(-1)
468
+
469
+ results = []
470
+ for batch in range(bs):
471
+ best_score = (False, MIN_INF)
472
+ best_beam = 0
473
+ best_step = 0
474
+ total_finished = 0
475
+ for step in range(beam_trace_idx):
476
+ #b_len = initial_ids.shape[-1] + step
477
+ b_len = input_lengths[batch] + step
478
+ p_scores = torch.cat([beam_trace_scores[step, batch], beam_trace_eos[step, batch]])
479
+ scores = p_scores / (b_len ** length_penalty)
480
+ top_scores, top_beams = torch.sort(scores, dim=-1, descending=True, stable=True)
481
+ # print(batch, step, top_scores.numpy().tolist(), top_beams.numpy().tolist())
482
+ for beam in top_beams[:num_beams]:
483
+ beam = beam.item()
484
+ finished = beam >= num_beams
485
+ score = (finished, scores[beam])
486
+ total_finished += finished
487
+ # print("\t", beam, score)
488
+ if score > best_score or (not best_score[0] and beam == 0):
489
+ best_beam = beam
490
+ best_score = score
491
+ best_step = step
492
+ # print('new best', score, 'vs', best_score)
493
+ if total_finished >= num_beams:
494
+ break
495
+
496
+ idx = best_beam
497
+ tokens = []
498
+ for step in range(best_step, -1, -1):
499
+ if idx >= num_beams:
500
+ tokens.append(model_config.eos_token_id)
501
+ idx = idx - num_beams
502
+ else:
503
+ tokens.append(beam_trace_tokens[step, batch, idx].item())
504
+ idx = beam_trace_indices[step, batch, idx].item()
505
+ tokens.reverse()
506
+ results.append(tokens)
507
+
508
+ max_length = max(len(r) for r in results)
509
+ results = [torch.tensor(r) for r in results]
510
+ results = torch.cat([expand_if_needed(r, max_length, model_config.pad_token_id).unsqueeze(0) for r in results], dim=0)
511
+ results = torch.cat([initial_ids, results], dim=-1)
512
+
513
+ return results
514
+
515
+
516
+ def map_tensors(obj, fn):
517
+ constructor = type(obj)
518
+ if isinstance(obj, tuple):
519
+ return constructor(map_tensors(v, fn) for v in obj)
520
+ if isinstance(obj, list):
521
+ return constructor([map_tensors(v, fn) for v in obj])
522
+ if isinstance(obj, dict) or isinstance(obj, UserDict):
523
+ return constructor({k: map_tensors(v, fn) for k, v in obj.items()})
524
+ if isinstance(obj, torch.Tensor):
525
+ return fn(obj)
526
+ return obj
527
+
528
+
529
+ def move(obj, device):
530
+ return map_tensors(obj, lambda t: t.to(device))
531
+
532
+
533
+ def expand_if_needed(tensor, new_size, value, dim=-1):
534
+ orig_len = tensor.shape[dim]
535
+ padding_len = new_size - orig_len
536
+ if padding_len > 0:
537
+ if dim == -1:
538
+ return F.pad(tensor, (0, padding_len), value=value)
539
+ elif dim == -2:
540
+ return F.pad(tensor, (0, 0, 0, padding_len), value=value)
541
+ else:
542
+ assert False, f'Unsupported dim value: {dim}'
543
+ return tensor
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/hgu_options.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+
5
+ import habana_generation_utils as hgu
6
+
7
+
8
+ default_options = {
9
+ "early_stopping": True,
10
+ "early_stopping_delay": 2, # schedule an extra step before checking early_stopping, i.e. schedule-0, skip-check-1, schedule-1, check-0, schedule-0, check-1
11
+ "max_iterations": 128,
12
+ "num_beams": 4,
13
+ "static_shapes": True,
14
+ "use_cache": True,
15
+ "use_graphs": True,
16
+ "limit_graphs": False,
17
+ "use_rolling_position_ids": True,
18
+ "reuse_cache": True,
19
+ "kv_cache_fp8": False,
20
+ "trim_logits": True,
21
+ "kv_cache_margin": 129,
22
+ }
23
+
24
+
25
+ def get_options_dict(options_str: str = None) -> dict:
26
+ options = {}
27
+ if options_str is not None:
28
+ options = dict(
29
+ [hgu.parse_key_type_value(ktv) for ktv in options_str.split(',')]
30
+ )
31
+ return {**default_options, **options}
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/main.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+
5
+ import os
6
+ os.environ.setdefault('PT_HPU_INFERENCE_MODE', '1')
7
+
8
+ import argparse
9
+ import mlperf_loadgen as lg
10
+ import sys
11
+
12
+ sys.path.insert(0, os.getcwd())
13
+
14
+ scenario_map = {
15
+ "Offline": lg.TestScenario.Offline,
16
+ "Server": lg.TestScenario.Server
17
+ }
18
+
19
+ def get_args():
20
+ parser = argparse.ArgumentParser()
21
+ parser.add_argument("--scenario", choices=["Offline", "Server"], default="Offline", help="Scenario")
22
+ parser.add_argument("--model-path", default="/mnt/weka/data/pytorch/gpt-j", help="")
23
+ parser.add_argument("--dataset-path", default="/mnt/weka/data/pytorch/gpt-j/cnn_eval.json", help="")
24
+ parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
25
+ parser.add_argument("--dtype", choices=["bfloat16", "float32", "float8"], default="bfloat16",
26
+ help="data type of the model, choose from bfloat16, float32 and float8")
27
+ parser.add_argument("--device", type=str, choices=["cpu", "cuda", "hpu", "socket"],
28
+ default="hpu", help="device to run the inference on")
29
+ parser.add_argument("--mlperf_conf", default="mlperf.conf", help="mlperf rules config")
30
+ parser.add_argument("--user_conf", default="user.conf",
31
+ help="user config for user LoadGen settings such as target QPS")
32
+ parser.add_argument("--max_examples", type=int, default=13368,
33
+ help="Maximum number of examples to consider (not limited by default)")
34
+ parser.add_argument("--num_workers", type=int, default=1)
35
+ parser.add_argument("--batch_size", type=int, default=12)
36
+ parser.add_argument("--quantization_file", "-qf", type=str,
37
+ help="Read quantization configuration from a file")
38
+ parser.add_argument("--log_path", default="build/logs")
39
+ parser.add_argument("--options", type=str, default='',
40
+ help="Coma-seperated list of options used in generation")
41
+ parser.add_argument("--profile", action='store_true', help="Enable profiling")
42
+ parser.add_argument("--profile_type", type=str, choices=["tb", "hltv"], default='tb', help="Profiling format")
43
+ parser.add_argument("--profile_tokens", type=int, default=5, help="Number of tokens to profile")
44
+ parser.add_argument("--help_options", action="store_true", help="Show detailed option help")
45
+ parser.add_argument("--fake_device", action='store_true', help="Enable dummy device with estimated delay")
46
+ parser.add_argument("--fake_dataset", action='store_true', help="Enable dummy dataset")
47
+ parser.add_argument("--stdout", action="store_true", help="Print logs to stdout instead of a file")
48
+ parser.add_argument('--enable-tensorboard-logging', action='store_true')
49
+ parser.add_argument('--eager', action='store_true')
50
+ args = parser.parse_args()
51
+ return args
52
+
53
+
54
+ def main():
55
+ args = get_args()
56
+ if args.eager:
57
+ os.environ['PT_HPU_LAZY_MODE'] = '0'
58
+
59
+ # These imports need to be placed after setting PT_HPU_LAZY_MODE=0 when we're running eager mode
60
+ from hgu_options import get_options_dict
61
+ import habana_generation_utils as hgu
62
+
63
+ if args.num_workers != 1:
64
+ assert args.device != 'hpu', "In order to run more than 1 worker, you need to set device to 'socket'"
65
+ if args.help_options is True:
66
+ print(hgu.generate_option_help())
67
+ sys.exit(0)
68
+
69
+ if args.scenario == "Offline":
70
+ if args.device == "socket":
71
+ from socket_backend import SUT_Offline
72
+ sut = SUT_Offline(args)
73
+ else:
74
+ from backend import SUT_Offline
75
+ options = get_options_dict(args.options)
76
+ sut = SUT_Offline(args, options)
77
+ else:
78
+ if args.device == "socket":
79
+ from socket_backend import SUT_Server
80
+ sut = SUT_Server(args)
81
+ else:
82
+ from backend import SUT_Server
83
+ options = get_options_dict(args.options)
84
+ sut = SUT_Server(args, options)
85
+
86
+ settings = lg.TestSettings()
87
+ settings.scenario = scenario_map[args.scenario]
88
+ # Need to update the conf
89
+ settings.FromConfig(args.mlperf_conf, "gptj", args.scenario)
90
+ settings.FromConfig(args.user_conf, "gptj", args.scenario)
91
+
92
+ if args.accuracy:
93
+ settings.mode = lg.TestMode.AccuracyOnly
94
+ else:
95
+ settings.mode = lg.TestMode.PerformanceOnly
96
+ os.makedirs(args.log_path, exist_ok=True)
97
+ log_output_settings = lg.LogOutputSettings()
98
+ log_output_settings.outdir = args.log_path
99
+ log_output_settings.copy_summary_to_stdout = True
100
+ log_settings = lg.LogSettings()
101
+ log_settings.log_output = log_output_settings
102
+ log_settings.enable_trace = True
103
+
104
+ lg.StartTestWithLogSettings(sut.sut, sut.qsl, settings, log_settings)
105
+
106
+ print("Test Done!")
107
+
108
+ print("Destroying SUT...")
109
+ sut.close_log_file()
110
+ lg.DestroySUT(sut.sut)
111
+
112
+ print("Destroying QSL...")
113
+ lg.DestroyQSL(sut.qsl)
114
+
115
+
116
+ if __name__ == "__main__":
117
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/mlperf.conf ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The format of this config file is 'key = value'.
2
+ # The key has the format 'model.scenario.key'. Value is mostly int64_t.
3
+ # Model maybe '*' as wildcard. In that case the value applies to all models.
4
+ # All times are in milli seconds
5
+
6
+ # Set performance_sample_count for each model.
7
+ # User can optionally set this to higher values in user.conf.
8
+ resnet50.*.performance_sample_count_override = 1024
9
+ ssd-mobilenet.*.performance_sample_count_override = 256
10
+ retinanet.*.performance_sample_count_override = 64
11
+ bert.*.performance_sample_count_override = 10833
12
+ dlrm.*.performance_sample_count_override = 204800
13
+ rnnt.*.performance_sample_count_override = 2513
14
+ # set to 0 to let entire sample set to be performance sample
15
+ 3d-unet.*.performance_sample_count_override = 0
16
+
17
+ # Set seeds. The seeds will be distributed two weeks before the submission.
18
+ *.*.qsl_rng_seed = 148687905518835231
19
+ *.*.sample_index_rng_seed = 520418551913322573
20
+ *.*.schedule_rng_seed = 811580660758947900
21
+ # Set seeds for TEST_05. The seeds will be distributed two weeks before the submission.
22
+ *.*.test05_qsl_rng_seed = 793197339507417767
23
+ *.*.test05_sample_index_rng_seed = 255610748586851044
24
+ *.*.test05_schedule_rng_seed = 352213341366340113
25
+
26
+
27
+ *.SingleStream.target_latency_percentile = 90
28
+ *.SingleStream.min_duration = 600000
29
+ #*.SingleStream.min_query_count = 1024
30
+
31
+ *.MultiStream.target_latency_percentile = 99
32
+ *.MultiStream.samples_per_query = 8
33
+ *.MultiStream.min_duration = 600000
34
+ #*.MultiStream.min_query_count = 270336
35
+ *.MultiStream.min_query_count = 662
36
+ retinanet.MultiStream.target_latency = 528
37
+
38
+ # 3D-UNet uses equal issue mode
39
+ 3d-unet.*.sample_concatenate_permutation = 1
40
+
41
+ *.Server.target_latency = 10
42
+ *.Server.target_latency_percentile = 99
43
+ *.Server.target_duration = 0
44
+ *.Server.min_duration = 600000
45
+ #*.Server.min_query_count = 270336
46
+ resnet50.Server.target_latency = 15
47
+ retinanet.Server.target_latency = 100
48
+ bert.Server.target_latency = 130
49
+ dlrm.Server.target_latency = 60
50
+ rnnt.Server.target_latency = 1000
51
+ gptj.Server.target_latency = 20000
52
+
53
+ *.Offline.target_latency_percentile = 90
54
+ *.Offline.min_duration = 600000
55
+ # In Offline scenario, we always have one query. But LoadGen maps this to
56
+ # min_sample_count internally in Offline scenario, so set this to 24576 since
57
+ # the rule requires that Offline scenario run for at least 24576 samples.
58
+ *.Offline.min_query_count = 24576
59
+
60
+ # These fields should be defined and overridden by user.conf.
61
+ *.SingleStream.target_latency = 10
62
+ *.MultiStream.target_latency = 80
63
+ *.Server.target_qps = 1.0
64
+ *.Offline.target_qps = 1.0
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/modeling_gptj.py ADDED
@@ -0,0 +1,782 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ ###############################################################################
17
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
18
+ ###############################################################################
19
+ # Changes:
20
+ # - remove dead code (functions and tensors unused in MLPerf GPT-J benchmark)
21
+ # - remove training support
22
+ # - remove float16 support
23
+ # - remove device-parallelism support
24
+ # - use apply_rotary_pos_emb kernel on HPU
25
+ # - remove duplicated operations (for example: calculate sin and cos only once)
26
+ # - reshape tensors from 4D to 3D for better performance
27
+ # - use optimized softmax
28
+ # - adjust the code to HPU graphs
29
+ # - use optimized kernels for KV cache reorder
30
+ # - introduce support for fp8 KV cache
31
+ # - remove unnecessary int64 usage (use int32 or bfloat16)
32
+
33
+ from typing import Optional, Tuple, Union
34
+ import numpy as np
35
+
36
+ import torch
37
+ import torch.fx
38
+ import torch.utils.checkpoint
39
+ from torch import nn
40
+
41
+ try:
42
+ from habana_frameworks.torch.hpex.kernels import apply_rotary_pos_emb as apply_rotary_pos_emb_hpu
43
+ from habana_frameworks.torch.hpex.kernels import RotaryPosEmbeddingMode
44
+ except ImportError:
45
+ print("Not using HPU kernel for apply_rotary_pos_emb")
46
+ apply_rotary_pos_emb_hpu = None
47
+
48
+ from habana_frameworks.torch.hpex.kernels import CustomSoftmax as FastSoftmax
49
+
50
+ try:
51
+ in_place_interleave_hpu = torch.ops.hpu.in_place_interleave_
52
+ except AttributeError:
53
+ print(f"Not using HPU kernel for in_place_interleave_")
54
+ in_place_interleave_hpu = None
55
+
56
+ __package__ = 'transformers.models.gptj'
57
+
58
+ from ...activations import ACT2FN
59
+ from ...modeling_outputs import (
60
+ BaseModelOutputWithPast,
61
+ CausalLMOutputWithPast,
62
+ )
63
+ from ...modeling_utils import PreTrainedModel
64
+ from .configuration_gptj import GPTJConfig
65
+
66
+
67
+
68
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
69
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
70
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.float), inv_freq).float()
71
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
72
+
73
+
74
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
75
+ x1 = x[:, :, :, ::2]
76
+ x2 = x[:, :, :, 1::2]
77
+ x = torch.stack((-x2, x1), dim=-1)
78
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
79
+
80
+
81
+ def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
82
+ if apply_rotary_pos_emb_hpu is None:
83
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
84
+ else:
85
+ return apply_rotary_pos_emb_hpu(tensor, cos, sin, None, 0, RotaryPosEmbeddingMode.PAIRWISE)
86
+
87
+ class Matmul(nn.Module):
88
+ def __init__(self):
89
+ super().__init__()
90
+
91
+ def forward(self, x, y):
92
+ return torch.matmul(x, y)
93
+
94
+ class BatchMatmul(nn.Module):
95
+ def __init__(self):
96
+ super().__init__()
97
+
98
+ def forward(self, x, y):
99
+ return torch.bmm(x,y)
100
+
101
+ class CacheUpdateFp8(nn.Module):
102
+ def __init__(self):
103
+ super().__init__()
104
+
105
+ def forward(self, prev, cur, dim, idx):
106
+ orig_cur = cur
107
+ cur_fp8, amax = torch.ops.hpu.cast_to_fp8_v2(cur,None,False, False)
108
+ if prev.shape[0] != cur_fp8.shape[0]:
109
+ assert prev.shape[0] % cur_fp8.shape[0] == 0, f'Cannot update kv-cache. BatchSize changed! {prev.shape[0]} vs {cur_fp8.shape[0]}'
110
+ # Repeat to accomodate bs/beam changes
111
+ repeats = (prev.shape[0] // cur_fp8.shape[0], 1, 1, 1)
112
+ cur_fp8 = torch.ops.hpu.fp8_repeat_v2(cur_fp8, repeats)
113
+ assert prev.shape == cur_fp8.shape, f'Cannot update kv-cache. BatchSize changed! {prev.shape[0]} vs {cur_fp8.shape[0]}'
114
+ # Initialize
115
+ torch.ops.hpu.fp8_copy_(prev, cur_fp8)
116
+ return orig_cur
117
+ else:
118
+ assert cur_fp8.shape[2] == 1, f'Cannot update kv-cache. Unsupported shapes. prev:{prev.shape} cur:{cur_fp8.shape}'
119
+ torch.ops.hpu.fp8_index_copy_(prev, dim, idx - 1, cur_fp8)
120
+ prev_bf16 = torch.ops.hpu.cast_from_fp8(prev, None, cur.dtype)
121
+ return prev_bf16
122
+
123
+ class CacheUpdate(nn.Module):
124
+ def __init__(self):
125
+ super().__init__()
126
+
127
+ def forward(self, prev, cur, dim, idx):
128
+ orig_cur = cur
129
+ if prev.shape[0] != cur.shape[0]:
130
+ assert prev.shape[0] % cur.shape[0] == 0, f'Cannot update kv-cache. BatchSize changed! {prev.shape[0]} vs {cur.shape[0]}'
131
+ # Repeat to accomodate bs/beam changes
132
+ cur = cur.repeat(prev.shape[0] // cur.shape[0], 1, 1, 1)
133
+ assert prev.shape == cur.shape, f'Cannot update kv-cache. BatchSize changed! {prev.shape[0]} vs {cur.shape[0]}'
134
+ # Initialize
135
+ prev.copy_(cur)
136
+ return orig_cur
137
+ else:
138
+ assert cur.shape[2] == 1, f'Cannot update kv-cache. Unsupported shapes. prev:{prev.shape} cur:{cur.shape}'
139
+ return prev.index_copy_(dim, idx - 1, cur)
140
+
141
+
142
+ class GPTJAttention(nn.Module):
143
+ def __init__(self, config):
144
+ super().__init__()
145
+ self.matmul_qk = BatchMatmul()
146
+ self.matmul_av = Matmul()
147
+
148
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
149
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
150
+
151
+ self.past_key = {}
152
+ self.past_value = {}
153
+ self.kv_cache_fp8 = False
154
+ self.v_update = CacheUpdate()
155
+ self.k_update = CacheUpdate()
156
+
157
+ self.embed_dim = config.hidden_size
158
+ self.num_attention_heads = config.num_attention_heads
159
+ self.head_dim = self.embed_dim // self.num_attention_heads
160
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
161
+ raise ValueError(
162
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
163
+ f" `num_attention_heads`: {self.num_attention_heads})."
164
+ )
165
+ self.register_buffer("inv_scale_attn",
166
+ torch.rsqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()),
167
+ persistent=False)
168
+ self.inv_scale_attn_scalar = 1.0 / np.sqrt(self.head_dim)
169
+
170
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
171
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
172
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
173
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
174
+ self.rotary_dim = config.rotary_dim
175
+
176
+ def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
177
+ """
178
+ Splits hidden dim into attn_head_size and num_attention_heads
179
+ """
180
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
181
+ tensor = tensor.view(new_shape)
182
+ if rotary:
183
+ return tensor
184
+ if len(tensor.shape) == 5:
185
+ return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)
186
+ elif len(tensor.shape) == 4:
187
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
188
+ else:
189
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
190
+
191
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
192
+ """
193
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
194
+ """
195
+ if len(tensor.shape) == 5:
196
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
197
+ elif len(tensor.shape) == 4:
198
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
199
+ else:
200
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
201
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
202
+ return tensor.view(new_shape)
203
+
204
+ def _attn(
205
+ self,
206
+ query,
207
+ key,
208
+ value,
209
+ attention_mask=None,
210
+ start_end=None,
211
+ ):
212
+ batch_size, query_len, key_len = query.shape[0], query.shape[-2], key.shape[-2]
213
+
214
+ # Reshape to 3D tensors
215
+ query = query.reshape((batch_size * self.num_attention_heads, query_len, self.head_dim))
216
+ key = key.reshape((batch_size * self.num_attention_heads, key_len, self.head_dim))
217
+ value = value.reshape((batch_size * self.num_attention_heads, key_len, self.head_dim))
218
+
219
+ attn_weights = self.matmul_qk(query, key.transpose(-1, -2))
220
+
221
+ if query_len == 1:
222
+ # next token
223
+ attn_weights = attn_weights * self.inv_scale_attn
224
+ attn_weights = attn_weights + attention_mask
225
+
226
+ attn_weights = FastSoftmax.apply(attn_weights, 2) # optimized softmax (no LUTs)
227
+
228
+ else:
229
+ # first token
230
+ attn_weights = torch.ops.hpu.scaled_masked_triangular_softmax(
231
+ attn_weights,
232
+ start_end,
233
+ self.inv_scale_attn_scalar,
234
+ self.num_attention_heads,
235
+ False, # don't use max
236
+ 1 # optimized softmax (no LUTs)
237
+ )
238
+
239
+ attn_output = self.matmul_av(attn_weights, value)
240
+
241
+ # Reshape back to 4D tensors
242
+ attn_output = attn_output.reshape((batch_size, self.num_attention_heads) + attn_output.shape[1:])
243
+ attn_weights = attn_weights.reshape((batch_size, self.num_attention_heads) + attn_weights.shape[1:])
244
+
245
+ return attn_output, attn_weights
246
+
247
+
248
+ def allocate_kv_cache(self, batch_size, seq_len, kv_cache_fp8):
249
+ if (batch_size, seq_len) not in self.past_key.keys():
250
+ device = self.k_proj.weight.device
251
+ dtype = self.k_proj.weight.dtype
252
+ shape = (batch_size, self.num_attention_heads, seq_len, self.head_dim)
253
+ past_key = torch.empty(shape, dtype=dtype, device=device)
254
+ past_value = torch.empty(shape, dtype=dtype, device=device)
255
+ if kv_cache_fp8:
256
+ self.kv_cache_fp8 = True
257
+ self.past_value[(batch_size, seq_len)], amax = torch.ops.hpu.cast_to_fp8_v2(past_value, None, False, False)
258
+ self.past_key[(batch_size, seq_len)], amax = torch.ops.hpu.cast_to_fp8_v2(past_key, None, False, False)
259
+ self.v_update = CacheUpdateFp8()
260
+ self.k_update = CacheUpdateFp8()
261
+
262
+ import habana_frameworks.torch.core as htcore
263
+ htcore.mark_step()
264
+ else:
265
+ self.past_key[(batch_size, seq_len)] = past_key
266
+ self.past_value[(batch_size, seq_len)] = past_value
267
+
268
+ def reorder_first_token(self, tensor):
269
+ if in_place_interleave_hpu is not None:
270
+ in_place_interleave_hpu(tensor)
271
+ else:
272
+ shape = tensor.shape
273
+ l = []
274
+ NUM_BEAMS=4
275
+ for i in range(shape[0] // NUM_BEAMS):
276
+ val = tensor[i, :, :, :].clone()
277
+ for i in range(NUM_BEAMS):
278
+ l.append(val)
279
+ updated = torch.cat(l, 0)
280
+ updated = torch.reshape(updated, shape)
281
+ tensor.copy_(updated)
282
+
283
+ def reorder_kv_cache_first_token(self, kv_cache_shape):
284
+ if self.past_key is None or kv_cache_shape not in self.past_key.keys():
285
+ return (None, None)
286
+
287
+ self.reorder_first_token(self.past_key[kv_cache_shape])
288
+ self.reorder_first_token(self.past_value[kv_cache_shape])
289
+
290
+ return (self.past_key[kv_cache_shape].shape, self.past_value[kv_cache_shape].shape)
291
+
292
+ def reorder_kv_cache_next_token(self, start, end, beam_idx, kv_cache_shape):
293
+ if self.past_key is None or kv_cache_shape not in self.past_key.keys():
294
+ return (None, None)
295
+
296
+ if self.kv_cache_fp8:
297
+ torch.ops.hpu.fp8_kv_reorder_(self.past_key[kv_cache_shape], start, end, beam_idx)
298
+ torch.ops.hpu.fp8_kv_reorder_(self.past_value[kv_cache_shape], start, end, beam_idx)
299
+ else:
300
+ torch.ops.hpu.kv_reorder_(self.past_key[kv_cache_shape], start, end, beam_idx)
301
+ torch.ops.hpu.kv_reorder_(self.past_value[kv_cache_shape], start, end, beam_idx)
302
+
303
+ return (self.past_key[kv_cache_shape].shape, self.past_value[kv_cache_shape].shape)
304
+
305
+ def forward(
306
+ self,
307
+ hidden_states: torch.FloatTensor,
308
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
309
+ attention_mask: Optional[torch.FloatTensor] = None,
310
+ use_cache: Optional[bool] = False,
311
+ output_attentions: Optional[bool] = False,
312
+ token_idx: Optional[torch.Tensor] = None,
313
+ reuse_cache: Optional[bool] = False,
314
+ kv_cache_shape: Tuple[int, int] = None,
315
+ sin: Optional[torch.Tensor] = None,
316
+ cos: Optional[torch.Tensor] = None,
317
+ start_end: Optional[torch.Tensor] = None,
318
+ ) -> Union[
319
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
320
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
321
+ ]:
322
+ query = self.q_proj(hidden_states)
323
+ key = self.k_proj(hidden_states)
324
+ value = self.v_proj(hidden_states)
325
+
326
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
327
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
328
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
329
+
330
+ k_rot = key[:, :, :, : self.rotary_dim]
331
+ k_pass = key[:, :, :, self.rotary_dim :]
332
+
333
+ q_rot = query[:, :, :, : self.rotary_dim]
334
+ q_pass = query[:, :, :, self.rotary_dim :]
335
+
336
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
337
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
338
+
339
+ key = torch.cat([k_rot, k_pass], dim=-1)
340
+ query = torch.cat([q_rot, q_pass], dim=-1)
341
+
342
+ key = key.permute(0, 2, 1, 3)
343
+ query = query.permute(0, 2, 1, 3)
344
+
345
+ if layer_past is not None or reuse_cache:
346
+ if reuse_cache:
347
+ past_key, past_value = self.past_key[kv_cache_shape], self.past_value[kv_cache_shape]
348
+ else:
349
+ past_key, past_value = layer_past
350
+
351
+ key = self.k_update(past_key, key, -2, token_idx)
352
+ value = self.v_update(past_value, value, -2, token_idx)
353
+
354
+ if use_cache is True:
355
+ if reuse_cache:
356
+ present = (key.shape, value.shape)
357
+ else:
358
+ present = (key, value)
359
+ else:
360
+ present = None
361
+
362
+ # compute self-attention: V x Softmax(QK^T)
363
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, start_end)
364
+
365
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
366
+ attn_output = self.out_proj(attn_output)
367
+ attn_output = self.resid_dropout(attn_output)
368
+
369
+ outputs = (attn_output, present)
370
+ if output_attentions:
371
+ outputs += (attn_weights,)
372
+
373
+ return outputs # a, present, (attentions)
374
+
375
+
376
+ class GPTJMLP(nn.Module):
377
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
378
+ super().__init__()
379
+ embed_dim = config.n_embd
380
+
381
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
382
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
383
+
384
+ self.act = ACT2FN["quick_gelu"]
385
+ self.dropout = nn.Dropout(config.resid_pdrop)
386
+
387
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
388
+ hidden_states = self.fc_in(hidden_states)
389
+ hidden_states = self.act(hidden_states)
390
+ hidden_states = self.fc_out(hidden_states)
391
+ hidden_states = self.dropout(hidden_states)
392
+ return hidden_states
393
+
394
+
395
+ class GPTJBlock(nn.Module):
396
+ def __init__(self, config):
397
+ super().__init__()
398
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
399
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
400
+ self.attn = GPTJAttention(config)
401
+ self.mlp = GPTJMLP(inner_dim, config)
402
+
403
+ def allocate_kv_cache(self, batch_size, seq_len, kv_cache_fp8):
404
+ self.attn.allocate_kv_cache(batch_size, seq_len, kv_cache_fp8)
405
+
406
+ def reorder_kv_cache_first_token(self, kv_cache_shape):
407
+ return self.attn.reorder_kv_cache_first_token(kv_cache_shape)
408
+
409
+ def reorder_kv_cache_next_token(self, start, end, beam_idx, kv_cache_shape):
410
+ return self.attn.reorder_kv_cache_next_token(start, end, beam_idx, kv_cache_shape)
411
+
412
+ def forward(
413
+ self,
414
+ hidden_states: Optional[torch.FloatTensor],
415
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
416
+ attention_mask: Optional[torch.FloatTensor] = None,
417
+ use_cache: Optional[bool] = False,
418
+ output_attentions: Optional[bool] = False,
419
+ token_idx: Optional[torch.Tensor] = None,
420
+ reuse_cache: Optional[bool] = None,
421
+ kv_cache_shape: Tuple[int, int] = None,
422
+ sin: Optional[torch.Tensor] = None,
423
+ cos: Optional[torch.Tensor] = None,
424
+ start_end: Optional[torch.Tensor] = None,
425
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
426
+ residual = hidden_states
427
+ hidden_states = self.ln_1(hidden_states)
428
+ attn_outputs = self.attn(
429
+ hidden_states=hidden_states,
430
+ layer_past=layer_past,
431
+ attention_mask=attention_mask,
432
+ use_cache=use_cache,
433
+ output_attentions=output_attentions,
434
+ token_idx=token_idx,
435
+ reuse_cache=reuse_cache,
436
+ kv_cache_shape=kv_cache_shape,
437
+ sin=sin,
438
+ cos=cos,
439
+ start_end=start_end,
440
+ )
441
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
442
+ outputs = attn_outputs[1:]
443
+
444
+ feed_forward_hidden_states = self.mlp(hidden_states)
445
+ hidden_states = attn_output + feed_forward_hidden_states + residual
446
+
447
+ if use_cache:
448
+ outputs = (hidden_states,) + outputs
449
+ else:
450
+ outputs = (hidden_states,) + outputs[1:]
451
+
452
+ return outputs # hidden_states, present, (attentions)
453
+
454
+
455
+ class GPTJPreTrainedModel(PreTrainedModel):
456
+ """
457
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
458
+ models.
459
+ """
460
+
461
+ config_class = GPTJConfig
462
+ base_model_prefix = "transformer"
463
+ is_parallelizable = True
464
+ _no_split_modules = ["GPTJBlock"]
465
+ _skip_keys_device_placement = "past_key_values"
466
+
467
+ def __init__(self, *inputs, **kwargs):
468
+ super().__init__(*inputs, **kwargs)
469
+
470
+ def _init_weights(self, module):
471
+ """Initialize the weights."""
472
+ if isinstance(module, (nn.Linear,)):
473
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
474
+ # cf https://github.com/pytorch/pytorch/pull/5617
475
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
476
+ if module.bias is not None:
477
+ module.bias.data.zero_()
478
+ elif isinstance(module, nn.Embedding):
479
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
480
+ if module.padding_idx is not None:
481
+ module.weight.data[module.padding_idx].zero_()
482
+ elif isinstance(module, nn.LayerNorm):
483
+ module.bias.data.zero_()
484
+ module.weight.data.fill_(1.0)
485
+
486
+
487
+ class GPTJModel(GPTJPreTrainedModel):
488
+ config_class = GPTJConfig
489
+ base_model_prefix = "transformer"
490
+ is_parallelizable = True
491
+ _no_split_modules = ["GPTJBlock"]
492
+ _skip_keys_device_placement = "past_key_values"
493
+
494
+ def __init__(self, config):
495
+ super().__init__(config)
496
+
497
+ self.embed_dim = config.n_embd
498
+ self.vocab_size = config.vocab_size
499
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim, dtype=torch.bfloat16)
500
+ self.drop = nn.Dropout(config.embd_pdrop)
501
+ self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])
502
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
503
+ self.register_buffer("embed_positions",
504
+ create_sinusoidal_positions(self.config.max_position_embeddings, self.config.rotary_dim),
505
+ persistent=False)
506
+ # Initialize weights and apply final processing
507
+ self.post_init()
508
+
509
+ def _init_weights(self, module):
510
+ """Initialize the weights."""
511
+ if isinstance(module, (nn.Linear,)):
512
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
513
+ # cf https://github.com/pytorch/pytorch/pull/5617
514
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
515
+ if module.bias is not None:
516
+ module.bias.data.zero_()
517
+ elif isinstance(module, nn.Embedding):
518
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
519
+ if module.padding_idx is not None:
520
+ module.weight.data[module.padding_idx].zero_()
521
+ elif isinstance(module, nn.LayerNorm):
522
+ module.bias.data.zero_()
523
+ module.weight.data.fill_(1.0)
524
+
525
+ def get_input_embeddings(self):
526
+ return self.wte
527
+
528
+ def set_input_embeddings(self, new_embeddings):
529
+ self.wte = new_embeddings
530
+
531
+ def allocate_kv_cache(self, batch_size, seq_len, kv_cache_fp8):
532
+ for layer in self.h:
533
+ layer.allocate_kv_cache(batch_size, seq_len, kv_cache_fp8)
534
+
535
+ def reorder_kv_cache_first_token(self, kv_cache_shape):
536
+ return tuple(layer.reorder_kv_cache_first_token(kv_cache_shape) for layer in self.h)
537
+
538
+ def reorder_kv_cache_next_token(self, start, end, beam_idx, kv_cache_shape):
539
+ return tuple(layer.reorder_kv_cache_next_token(start, end, beam_idx, kv_cache_shape) for layer in self.h)
540
+
541
+ def forward(
542
+ self,
543
+ input_ids: Optional[torch.LongTensor] = None,
544
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
545
+ attention_mask: Optional[torch.FloatTensor] = None,
546
+ token_type_ids: Optional[torch.LongTensor] = None,
547
+ inputs_embeds: Optional[torch.FloatTensor] = None,
548
+ use_cache: Optional[bool] = None,
549
+ output_attentions: Optional[bool] = None,
550
+ output_hidden_states: Optional[bool] = None,
551
+ return_dict: Optional[bool] = None,
552
+ token_idx: Optional[torch.Tensor] = None,
553
+ reuse_cache: Optional[bool] = None,
554
+ kv_cache_shape: Tuple[int, int] = None,
555
+ sin: Optional[torch.Tensor] = None,
556
+ cos: Optional[torch.Tensor] = None,
557
+ start_end: Optional[torch.Tensor] = None,
558
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
559
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
560
+ output_hidden_states = (
561
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
562
+ )
563
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
564
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
565
+
566
+ if input_ids is not None and inputs_embeds is not None:
567
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
568
+ elif input_ids is not None:
569
+ input_shape = input_ids.size()
570
+ input_ids = input_ids.view(-1, input_shape[-1])
571
+ batch_size = input_ids.shape[0]
572
+ elif inputs_embeds is not None:
573
+ input_shape = inputs_embeds.size()[:-1]
574
+ batch_size = inputs_embeds.shape[0]
575
+ else:
576
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
577
+
578
+ if token_type_ids is not None:
579
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
580
+
581
+ if past_key_values is None:
582
+ past_key_values = tuple([None] * len(self.h))
583
+
584
+ # Attention mask.
585
+ if attention_mask is not None:
586
+ # TODO: try get value from GPTJAttention
587
+ num_attention_heads = 16
588
+ attention_mask = torch.repeat_interleave(
589
+ attention_mask, num_attention_heads, 0, output_size=num_attention_heads*batch_size)
590
+
591
+ if inputs_embeds is None:
592
+ inputs_embeds = self.wte(input_ids)
593
+
594
+ hidden_states = inputs_embeds
595
+
596
+ if token_type_ids is not None:
597
+ token_type_embeds = self.wte(token_type_ids)
598
+ hidden_states = hidden_states + token_type_embeds
599
+
600
+ hidden_states = self.drop(hidden_states)
601
+
602
+ output_shape = input_shape + (hidden_states.size(-1),)
603
+
604
+ presents = () if use_cache else None
605
+ all_self_attentions = () if output_attentions else None
606
+ all_hidden_states = () if output_hidden_states else None
607
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
608
+ if output_hidden_states:
609
+ all_hidden_states = all_hidden_states + (hidden_states,)
610
+
611
+ outputs = block(
612
+ hidden_states=hidden_states,
613
+ layer_past=layer_past,
614
+ attention_mask=attention_mask,
615
+ use_cache=use_cache,
616
+ output_attentions=output_attentions,
617
+ token_idx=token_idx,
618
+ reuse_cache=reuse_cache,
619
+ kv_cache_shape=kv_cache_shape,
620
+ sin=sin,
621
+ cos=cos,
622
+ start_end=start_end,
623
+ )
624
+
625
+ hidden_states = outputs[0]
626
+ if use_cache is True:
627
+ presents = presents + (outputs[1],)
628
+
629
+ if output_attentions:
630
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
631
+
632
+ hidden_states = self.ln_f(hidden_states)
633
+
634
+ hidden_states = hidden_states.view(output_shape)
635
+ # Add last hidden state
636
+ if output_hidden_states:
637
+ all_hidden_states = all_hidden_states + (hidden_states,)
638
+
639
+ if not return_dict:
640
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
641
+
642
+ return BaseModelOutputWithPast(
643
+ last_hidden_state=hidden_states,
644
+ past_key_values=presents,
645
+ hidden_states=all_hidden_states,
646
+ attentions=all_self_attentions,
647
+ )
648
+
649
+
650
+ class GPTJForCausalLM(GPTJPreTrainedModel):
651
+ _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias"]
652
+ _tied_weights_keys = ["lm_head.weight"]
653
+
654
+ def __init__(self, config):
655
+ super().__init__(config)
656
+ self.transformer = GPTJModel(config)
657
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
658
+
659
+ # Initialize weights and apply final processing
660
+ self.post_init()
661
+
662
+ def get_output_embeddings(self):
663
+ return self.lm_head
664
+
665
+ def set_output_embeddings(self, new_embeddings):
666
+ self.lm_head = new_embeddings
667
+
668
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
669
+ token_type_ids = kwargs.get("token_type_ids", None)
670
+ # only last token for inputs_ids if past is defined in kwargs
671
+ if past_key_values:
672
+ input_ids = input_ids[:, -1].unsqueeze(-1)
673
+ if token_type_ids is not None:
674
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
675
+
676
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
677
+ if inputs_embeds is not None and past_key_values is None:
678
+ model_inputs = {"inputs_embeds": inputs_embeds}
679
+ else:
680
+ model_inputs = {"input_ids": input_ids}
681
+
682
+ model_inputs.update(
683
+ {
684
+ "past_key_values": past_key_values,
685
+ "use_cache": kwargs.get("use_cache"),
686
+ "token_type_ids": token_type_ids,
687
+ }
688
+ )
689
+
690
+ return model_inputs
691
+
692
+ def allocate_kv_cache(self, batch_size, seq_len, kv_cache_fp8):
693
+ self.transformer.allocate_kv_cache(batch_size, seq_len, kv_cache_fp8)
694
+
695
+ def reorder_kv_cache_first_token(self, kv_cache_shape):
696
+ return self.transformer.reorder_kv_cache_first_token(kv_cache_shape)
697
+
698
+ def reorder_kv_cache_next_token(self, start, end, beam_idx, kv_cache_shape):
699
+ return self.transformer.reorder_kv_cache_next_token(start, end, beam_idx, kv_cache_shape)
700
+
701
+ def forward(
702
+ self,
703
+ input_ids: Optional[torch.LongTensor] = None,
704
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
705
+ attention_mask: Optional[torch.FloatTensor] = None,
706
+ token_type_ids: Optional[torch.LongTensor] = None,
707
+ inputs_embeds: Optional[torch.FloatTensor] = None,
708
+ use_cache: Optional[bool] = None,
709
+ output_attentions: Optional[bool] = None,
710
+ output_hidden_states: Optional[bool] = None,
711
+ return_dict: Optional[bool] = None,
712
+ token_idx: Optional[torch.Tensor] = None,
713
+ reuse_cache: Optional[bool] = None,
714
+ trim_logits: Optional[bool] = None,
715
+ kv_cache_shape: Tuple[int, int] = None,
716
+ sin: Optional[torch.Tensor] = None,
717
+ cos: Optional[torch.Tensor] = None,
718
+ start_end: Optional[torch.Tensor] = None,
719
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
720
+ r"""
721
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
722
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
723
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
724
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
725
+ """
726
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
727
+
728
+ transformer_outputs = self.transformer(
729
+ input_ids,
730
+ past_key_values=past_key_values,
731
+ attention_mask=attention_mask,
732
+ token_type_ids=token_type_ids,
733
+ inputs_embeds=inputs_embeds,
734
+ use_cache=use_cache,
735
+ output_attentions=output_attentions,
736
+ output_hidden_states=output_hidden_states,
737
+ return_dict=return_dict,
738
+ token_idx=token_idx,
739
+ reuse_cache=reuse_cache,
740
+ kv_cache_shape=kv_cache_shape,
741
+ sin=sin,
742
+ cos=cos,
743
+ start_end=start_end,
744
+ )
745
+ hidden_states = transformer_outputs[0]
746
+ _, seq_len, _ = hidden_states.shape
747
+ if seq_len > 1 and trim_logits:
748
+ if token_idx is not None:
749
+ hidden_states = hidden_states.index_select(1, token_idx - 1)
750
+ else:
751
+ hidden_states = hidden_states[:, -1, :]
752
+
753
+ # make sure sampling in fp16 works correctly and
754
+ # compute loss in fp32 to match with mesh-tf version
755
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
756
+ lm_logits = self.lm_head(hidden_states)
757
+
758
+ if not return_dict:
759
+ output = (lm_logits,) + transformer_outputs[1:]
760
+ return output
761
+
762
+ return CausalLMOutputWithPast(
763
+ logits=lm_logits,
764
+ past_key_values=transformer_outputs.past_key_values,
765
+ hidden_states=transformer_outputs.hidden_states,
766
+ attentions=transformer_outputs.attentions,
767
+ )
768
+
769
+ @staticmethod
770
+ def _reorder_cache(
771
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
772
+ ) -> Tuple[Tuple[torch.Tensor]]:
773
+ """
774
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
775
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
776
+ beam_idx at every generation step.
777
+ """
778
+ return tuple(
779
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
780
+ for layer_past in past_key_values
781
+ )
782
+
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/prepare-calibration.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import json
4
+ from argparse import ArgumentParser
5
+ from datasets import load_dataset
6
+
7
+ def get_args():
8
+ parser = ArgumentParser()
9
+ parser.add_argument("--calibration-list-file", required=True, help="Path to calibration list")
10
+ parser.add_argument("--output-dir", help="Output directory", default="calibration-data")
11
+
12
+ return parser.parse_args()
13
+
14
+ dataset_id='cnn_dailymail'
15
+ version='3.0.0'
16
+ split='train'
17
+
18
+ instruction_template="Summarize the following news article:"
19
+
20
+ def check_path(path):
21
+ return os.path.exists(path)
22
+
23
+ def prepare_calibration_data(calibration_list_file, output_dir):
24
+ if not check_path(calibration_list_file):
25
+ print("Calibration list file not found: {}".format(calibration_list_file))
26
+ sys.exit(1)
27
+
28
+ dataset = load_dataset("cnn_dailymail", name="3.0.0", split='train')
29
+ train = dict((x['id'], x) for x in dataset)
30
+
31
+
32
+ with open(calibration_list_file, 'r') as fid:
33
+ calibration_ids = fid.read().splitlines()
34
+
35
+ inputs = []
36
+ for id in calibration_ids:
37
+ calibration_sample = train[id]
38
+ x = dict()
39
+ x["instruction"] = instruction_template
40
+ x["input"] = calibration_sample["article"]
41
+ x["output"] = calibration_sample["highlights"]
42
+ inputs.append(x)
43
+
44
+ if not os.path.isdir(output_dir):
45
+ os.makedirs(output_dir)
46
+
47
+ output_path = os.path.join(output_dir,"cnn_dailymail_calibration.json")
48
+ with open(output_path, 'w') as write_f:
49
+ json.dump(inputs, write_f, indent=4, ensure_ascii=False)
50
+
51
+ print("Calibration data saved at {}".format(output_path))
52
+
53
+ def main():
54
+
55
+ args = get_args()
56
+ prepare_calibration_data(args.calibration_list_file, args.output_dir)
57
+
58
+ if __name__=="__main__":
59
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/quantization/configuration/config.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+
5
+ import json
6
+ from os import path
7
+
8
+
9
+ # Configuration Aux strings
10
+ class CFGS:
11
+ ON = "on"
12
+ OFF = "off"
13
+ QUANTIZATION = "quantization"
14
+ MEASUREMENTS_PATH = "measurements_path"
15
+ BACKOFF_FACTOR = "backoff_factor"
16
+
17
+ # QuantConfig class
18
+ class QuantConfig:
19
+ def __init__(self):
20
+ self._quantization_enabled = False
21
+ self._measurements_path = ""
22
+ self._backoff_factor = 1.0
23
+
24
+
25
+ @property
26
+ def quantization_enabled(self):
27
+ return self._quantization_enabled
28
+
29
+ @quantization_enabled.setter
30
+ def quantization_enabled(self, val):
31
+ self._quantization_enabled = val
32
+
33
+ @property
34
+ def measurements_path(self):
35
+ return self._measurements_path
36
+
37
+ @measurements_path.setter
38
+ def measurements_path(self, path):
39
+ self._measurements_path = path
40
+
41
+ @property
42
+ def backoff_factor(self):
43
+ return self._backoff_factor
44
+
45
+ @backoff_factor.setter
46
+ def backoff_factor(self, bo_factor):
47
+ self._backoff_factor = bo_factor
48
+
49
+
50
+ def parse_quant_config(json_file_path : str) -> QuantConfig:
51
+ quant_config = QuantConfig()
52
+ if not path.isfile(json_file_path):
53
+ print("Quantization configuration file not found. Path - {}".format(
54
+ json_file_path))
55
+ else:
56
+ with open(json_file_path, 'r') as f:
57
+ quant_cfg_json = json.load(f)
58
+ if CFGS.QUANTIZATION in quant_cfg_json and quant_cfg_json[CFGS.QUANTIZATION] == CFGS.ON:
59
+ quant_config.quantization_enabled = True
60
+ if CFGS.BACKOFF_FACTOR in quant_cfg_json:
61
+ quant_config.backoff_factor = quant_cfg_json[CFGS.BACKOFF_FACTOR]
62
+ if CFGS.MEASUREMENTS_PATH in quant_cfg_json:
63
+ measurements_path = quant_cfg_json[CFGS.MEASUREMENTS_PATH]
64
+ if '$' in measurements_path :
65
+ print("Env var detected in path, expanding it")
66
+ measurements_path = path.expandvars(measurements_path)
67
+ quant_config.measurements_path = measurements_path
68
+
69
+ return quant_config
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/socket_worker.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ ###############################################################################
4
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
5
+ ###############################################################################
6
+
7
+ import argparse
8
+ import os
9
+ import torch
10
+ import time
11
+ import random
12
+ import threading
13
+ import queue
14
+ from contextlib import contextmanager, nullcontext
15
+ from torch.utils.tensorboard import SummaryWriter
16
+
17
+ import habana_generation_utils as hgu
18
+ import modeling_gptj as hpu_modeling_gptj
19
+ import quantization.quantize as quantize
20
+ from hgu_options import get_options_dict
21
+
22
+ import socket_utils
23
+ from dataset import Dataset
24
+
25
+
26
+ MIN_NEW_TOKENS = 30
27
+ MAX_NEW_TOKENS = 128
28
+
29
+
30
+ def fatal(e):
31
+ import traceback
32
+ traceback.print_exc()
33
+ print("EXCEPTION:", e, flush=True)
34
+ os._exit(1)
35
+
36
+
37
+ def get_fake_delay(dtype: str) -> dict:
38
+ class FakeDelayDict(dict):
39
+ def __getitem__(self, length: int) -> int:
40
+ key = min([key for key in self.keys() if key >= length - MAX_NEW_TOKENS - 1])
41
+ return dict.__getitem__(self, key)
42
+
43
+ # dict {
44
+ # input_length: average processing time on real device [us]
45
+ # }
46
+ if dtype == 'float8':
47
+ return FakeDelayDict({
48
+ 1919: 207946,
49
+ 1663: 177573,
50
+ 1407: 162134,
51
+ 1151: 141677,
52
+ 1023: 144127,
53
+ 895: 105898,
54
+ 767: 94835,
55
+ 639: 79685,
56
+ 511: 63538
57
+ })
58
+ else:
59
+ return FakeDelayDict({
60
+ 1919: 418798,
61
+ 1663: 367299,
62
+ 1407: 337564,
63
+ 1151: 292790,
64
+ 1023: 289867,
65
+ 895: 234328,
66
+ 767: 211056,
67
+ 639: 156582,
68
+ 511: 143436
69
+ })
70
+
71
+
72
+ def get_args():
73
+ parser = argparse.ArgumentParser()
74
+ parser.add_argument("--socket", type=str, required=True, help="Unix socket to connect to")
75
+ parser.add_argument("--quantization_file", "-qf", type=str,
76
+ help="Read quantization configuration from a file")
77
+ parser.add_argument("--model-path", required=True, help="Path to model checkpoint")
78
+ parser.add_argument("--dtype", choices=["bfloat16", "float32", "float8"], required=True,
79
+ help="data type of the model, choose from bfloat16, float32 and float8")
80
+ parser.add_argument("--dataset-path", required=True, help="")
81
+ parser.add_argument("--max_examples", type=int, required=True, help="Maximum number of examples to consider (not limited by default)")
82
+ parser.add_argument("--options", type=str, required=True,
83
+ help="Coma-seperated list of options used in generation")
84
+ parser.add_argument("--fake_device", action='store_true', help="Enable dummy device with estimated delay")
85
+ parser.add_argument("--fake_dataset", action='store_true', help="Enable dummy dataset")
86
+ parser.add_argument('--eager', action='store_true')
87
+ parser.add_argument('--enable-tensorboard-logging', action='store_true')
88
+ args = parser.parse_args()
89
+ return args
90
+
91
+
92
+ def handle(sock, prepare_input_func, pipeline_func, finalize_beams_func, options):
93
+ pipeline_queue = queue.Queue()
94
+ thread = threading.Thread(target=run_pipeline, args=(pipeline_queue, pipeline_func, finalize_beams_func))
95
+ thread.start()
96
+
97
+ while True:
98
+ try:
99
+ data = socket_utils.receive(sock)
100
+ if data is None:
101
+ break
102
+ pipeline_input = prepare_input_func(data, options)
103
+ pipeline_queue.put(pipeline_input)
104
+ except Exception as e:
105
+ fatal(e)
106
+
107
+ pipeline_queue.put(None)
108
+ thread.join()
109
+
110
+
111
+ def prepare_input(data, options):
112
+ batch, new_options, batch_size = data
113
+ options.update(new_options)
114
+
115
+ req_ids = [b[0][0] for b in batch]
116
+ sample_ids = [b[0][1] for b in batch]
117
+ while len(sample_ids) < batch_size:
118
+ sample_ids.append(sample_ids[0])
119
+
120
+ def getter(src):
121
+ def get(idx):
122
+ if idx != -1:
123
+ return src[idx]
124
+ else:
125
+ return torch.ones((1, 1), dtype=src[0].dtype)
126
+ return get
127
+
128
+ src_input_ids = getter(dataset.source_encoded_input_ids)
129
+ src_attn_masks = getter(dataset.source_encoded_attn_masks)
130
+ input_ids = [src_input_ids(id) for id in sample_ids]
131
+ attention_mask = [src_attn_masks(id) for id in sample_ids]
132
+ batch, max_input_length = align_batch(input_ids, attention_mask, dataset.tokenizer.pad_token_id, options.max_input_length)
133
+
134
+ options.set('max_input_length', max_input_length + MAX_NEW_TOKENS + 1)
135
+ options.set('max_length', max_input_length + MAX_NEW_TOKENS + 1)
136
+ options.set('min_length', max_input_length + MIN_NEW_TOKENS)
137
+
138
+ batch, max_length, input_length = hgu.prepare_decoder_only_input_without_moving(dataset.tokenizer.pad_token_id, options, batch)
139
+ return (batch, options, max_length, input_length, req_ids)
140
+
141
+ @contextmanager
142
+ def tensorboard_logger():
143
+ global tb_counter, local_rank
144
+ t_start = time.time()
145
+ yield
146
+ t_end = time.time()
147
+ tb_writer.add_scalar(f'worker number {local_rank}, batch_time [seconds]', t_end - t_start, tb_counter)
148
+ tb_counter += 1
149
+
150
+ def run_pipeline(pipeline_queue, pipeline_func, finalize_beams_func):
151
+ try:
152
+ with torch.inference_mode():
153
+ thread = None
154
+ while True:
155
+ items = pipeline_queue.get()
156
+ if items is None:
157
+ break
158
+
159
+ batch, options, max_length, input_length, req_ids = items
160
+ with tensorboard_logger() if tb_writer else nullcontext():
161
+ initial_ids, beam_trace = pipeline_func(batch, options, max_length, input_length)
162
+ thread = threading.Thread(target=finalize_beams_func, args=(initial_ids, beam_trace, max_length, req_ids))
163
+ thread.start()
164
+ thread.join()
165
+ except Exception as e:
166
+ fatal(e)
167
+
168
+
169
+ def finalize_beams(initial_ids, beam_trace, max_input_length, req_ids):
170
+ try:
171
+ output = hgu.finalize_beams(initial_ids, beam_trace, model.config, options.length_penalty)
172
+
173
+ response = []
174
+ for req_id, output in zip(req_ids, output):
175
+ response.append((req_id, output[max_input_length:].numpy().tobytes()))
176
+ socket_utils.send(sock, response)
177
+ except Exception as e:
178
+ fatal(e)
179
+
180
+ def left_pad(tensor, max_len, value):
181
+ return torch.nn.functional.pad(tensor, (max_len - tensor.size(-1), 0), value=value)
182
+
183
+
184
+ def align_batch(input_ids, attention_mask, pad_token_id, max_length=None):
185
+ input_lengths = [t.size(-1) for t in input_ids]
186
+ if max_length is None:
187
+ max_length = max(input_lengths)
188
+ input_ids = [left_pad(t, max_length, pad_token_id) for t in input_ids]
189
+ attention_mask = [left_pad(t, max_length, 0) for t in attention_mask]
190
+ return {"input_ids": torch.cat(input_ids), "attention_mask": torch.cat(attention_mask)}, max_length
191
+
192
+
193
+ if __name__ == "__main__":
194
+ args = get_args()
195
+
196
+ tb_writer, tb_counter = (SummaryWriter(), 0) if args.enable_tensorboard_logging else (None, None)
197
+
198
+ dataset = Dataset(args.model_path, args.dataset_path, total_count_override=args.max_examples, add_padding=False, fake_data=args.fake_dataset)
199
+ options = get_options_dict(args.options)
200
+ options = hgu.GenerationOptions(**options)
201
+ hgu_pipeline = None
202
+ device = torch.device("cpu")
203
+
204
+ if not args.fake_device:
205
+ if int(os.environ.get('OMPI_COMM_WORLD_SIZE', 1)) > 1:
206
+ local_rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK', "0")
207
+ os.environ["HLS_MODULE_ID"] = local_rank
208
+
209
+ import habana_frameworks.torch.core as htcore
210
+ device = torch.device('hpu')
211
+
212
+ print("Loading PyTorch model...")
213
+ model_path = args.model_path
214
+
215
+ model = hpu_modeling_gptj.GPTJForCausalLM.from_pretrained(
216
+ model_path,
217
+ low_cpu_mem_usage=True,
218
+ torch_dtype=torch.bfloat16
219
+ )
220
+
221
+ if model.config.pad_token_id is None:
222
+ model.config.pad_token_id = model.config.eos_token_id
223
+
224
+ model.to(torch.bfloat16)
225
+ model.to(device)
226
+
227
+ if not args.eager:
228
+ import habana_frameworks.torch.hpu.graphs as htgraphs
229
+ model = htgraphs.wrap_in_hpu_graph(model)
230
+
231
+ if args.quantization_file:
232
+ model = quantize.setup_quantization(model, args.quantization_file)
233
+
234
+ def pipeline(batch, options, max_length, input_length):
235
+ return hgu.generate_on_prepared_input(model, options, batch, max_length, input_length)
236
+
237
+ prepare_input_func = prepare_input
238
+ pipeline_func = pipeline
239
+ finalize_beams_func = finalize_beams
240
+ else:
241
+ fake_delay_dict = get_fake_delay(args.dtype)
242
+
243
+ def fake_pipeline(batch, *args):
244
+ batch_size, length = batch['input_ids'].shape
245
+ fake_delay = fake_delay_dict[length] * random.uniform(0.9, 1.1)
246
+ total_fake_delay = batch_size * fake_delay / 1e6
247
+ time.sleep(total_fake_delay / 10)
248
+ return batch['input_ids'], None
249
+
250
+ def fake_finalize_beams(initial_ids, _, max_input_length, req_ids):
251
+ try:
252
+ output = initial_ids.repeat(1, 2)
253
+ response = []
254
+ for req_id, output in zip(req_ids, output):
255
+ response.append((req_id, output[max_input_length:].numpy().tobytes()))
256
+ socket_utils.send(sock, response)
257
+ except Exception as e:
258
+ fatal(e)
259
+
260
+ prepare_input_func = prepare_input
261
+ pipeline_func = fake_pipeline
262
+ finalize_beams_func = fake_finalize_beams
263
+
264
+ if args.dtype == "float8":
265
+ options.kv_cache_fp8 = True
266
+
267
+ sock = socket_utils.connect(args.socket)
268
+ handle(sock, prepare_input_func, pipeline_func, finalize_beams_func, options)
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/gpt-j/user.conf ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The format of this config file is 'key = value'.
2
+ # The key has the format 'model.scenario.key'. Value is mostly int64_t.
3
+ # Model maybe '*' as wildcard. In that case the value applies to all models.
4
+ # All times are in milli seconds
5
+
6
+ # TODO: We need to fine-tune this value so that we get the maximum possible
7
+ # server utilization, while still reaching the QOS criteria
8
+ *.Server.target_qps = 11
9
+
10
+ *.Server.min_query_count = 24576
11
+ *.Server.target_latency = 20000
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/prepare_and_check_submission.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import argparse
5
+ import yaml
6
+ import typing
7
+ import subprocess
8
+ import logging
9
+ import sys
10
+ import json
11
+ import shutil
12
+ import re
13
+ import os
14
+ from pathlib import Path
15
+ import time
16
+
17
+ scenarios_config = yaml.full_load(open("scenarios.yaml"))
18
+ logging.basicConfig(level=logging.INFO)
19
+
20
+ modes = ["Server", "Offline"]
21
+ system_desc_id = "HLS-Gaudi2-PT"
22
+ implementation_id = "PyTorch"
23
+
24
+
25
+ def get_configuration(scenarios) -> typing.Tuple[str, str]:
26
+ runs = []
27
+ for scenario in scenarios:
28
+ if scenario in scenarios_config["scenarios"]:
29
+ for mode in modes:
30
+ runs.append((scenario, mode))
31
+ else:
32
+ try:
33
+ scenario, mode = scenario.split("_")
34
+ assert mode in modes
35
+ runs.append((scenario, mode))
36
+ except:
37
+ logging.error(
38
+ f"Scenario {scenario} not supported, see scenarios.yaml to view supported scenarios"
39
+ )
40
+ exit()
41
+ return runs
42
+
43
+
44
+ def get_args():
45
+ """Parse commandline."""
46
+ parser = argparse.ArgumentParser()
47
+ parser.add_argument(
48
+ "scenarios",
49
+ nargs="+",
50
+ help="List of scenarios e.g. gpt-j_Server or gpt-j separated by space, to run all possible scenarios set first element to 'all'",
51
+ )
52
+ parser.add_argument(
53
+ "--output-dir",
54
+ required=False,
55
+ default="./results",
56
+ help="Path to save results folder in",
57
+ )
58
+ parser.add_argument(
59
+ "--mlperf-path", required=True, help="Path to mlperf inference directory"
60
+ )
61
+ parser.add_argument("--systems-dir-path", required=True)
62
+ parser.add_argument("--measurements-dir-path", required=True)
63
+ args = parser.parse_args()
64
+ return args
65
+
66
+
67
+ def main():
68
+ args = get_args()
69
+
70
+ configuration = get_configuration(args.scenarios)
71
+
72
+ output_dir = Path(args.output_dir).absolute()
73
+ logs_dir = output_dir / "logs"
74
+ # for reference https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#563-inference
75
+ submission_dir = output_dir / "submission"
76
+ submission_dir.mkdir(exist_ok=True)
77
+
78
+ division_dir = submission_dir / "closed"
79
+ division_dir.mkdir(exist_ok=True)
80
+ company_dir = division_dir / "Intel-HabanaLabs"
81
+ company_dir.mkdir(exist_ok=True)
82
+
83
+ code_dir = company_dir / "code"
84
+ code_dir.mkdir(exist_ok=True)
85
+
86
+ results_dir = company_dir / "results"
87
+ results_dir.mkdir(exist_ok=True)
88
+
89
+ systems_dir = company_dir / "systems"
90
+ systems_dir.mkdir(exist_ok=True)
91
+
92
+ measurements_dir = company_dir / "measurements"
93
+ measurements_dir.mkdir(exist_ok=True)
94
+
95
+ mlperf_path = Path(args.mlperf_path)
96
+ # for each run
97
+ for scenario, mode in configuration:
98
+ benchmark = scenarios_config["scenarios"][scenario]["benchmark"]
99
+
100
+ # systems dir
101
+ shutil.copyfile(
102
+ f"{args.systems_dir_path}/{system_desc_id}.json",
103
+ systems_dir / f"{system_desc_id}.json",
104
+ )
105
+
106
+ # code dir
107
+ current_dir = os.getcwd()
108
+ shutil.copytree(
109
+ current_dir,
110
+ code_dir / benchmark,
111
+ ignore=shutil.ignore_patterns(
112
+ ".graph_dumps", "__pycache__", ".gitignore", "internal", output_dir, "results"
113
+ ),
114
+ dirs_exist_ok=True,
115
+ )
116
+ # move general README.md out of benchmark to code directory
117
+ shutil.move(
118
+ code_dir / benchmark / "README.md",
119
+ code_dir / "README.md"
120
+ )
121
+
122
+ # measurements dir
123
+ measurements_dir_path = Path(args.measurements_dir_path)
124
+ Path(measurements_dir / system_desc_id / benchmark / mode).mkdir(
125
+ exist_ok=True, parents=True
126
+ )
127
+ shutil.copytree(
128
+ measurements_dir_path / benchmark,
129
+ measurements_dir / system_desc_id / benchmark,
130
+ dirs_exist_ok=True,
131
+ )
132
+ code_dir_path = Path(scenarios_config["scenarios"][scenario]["code_dir"])
133
+ shutil.copyfile(
134
+ code_dir_path / "mlperf.conf",
135
+ measurements_dir / system_desc_id / benchmark / mode / "mlperf.conf",
136
+ )
137
+ shutil.copyfile(
138
+ measurements_dir_path / "calibration_process.md",
139
+ measurements_dir / system_desc_id / benchmark / mode / "calibration_process.md",
140
+ )
141
+ if benchmark == "gptj-99.9":
142
+ config_file = "fp8-99.9.conf"
143
+ else:
144
+ config_file = "fp8-99.conf"
145
+
146
+ shutil.copyfile(
147
+ code_dir_path / "configs" / config_file,
148
+ measurements_dir / system_desc_id / benchmark / mode / "user.conf",
149
+ )
150
+
151
+ # results dir
152
+ shutil.copytree(
153
+ logs_dir / scenario / mode / "accuracy",
154
+ results_dir / system_desc_id / benchmark / mode / "accuracy",
155
+ ignore=shutil.ignore_patterns("mlperf_log_trace.json"),
156
+ )
157
+ shutil.copytree(
158
+ logs_dir / scenario / mode / "performance",
159
+ results_dir / system_desc_id / benchmark / mode / "performance",
160
+ ignore=shutil.ignore_patterns(
161
+ "mlperf_log_trace.json", "mlperf_log_accuracy.json"
162
+ ),
163
+ )
164
+
165
+ #truncate accuracy logs
166
+ accuracy_logs_backup = output_dir / "backup"
167
+ command = f"python {mlperf_path / 'tools/submission/truncate_accuracy_log.py'} --input {submission_dir} --submitter Intel-HabanaLabs --backup {accuracy_logs_backup}"
168
+ try:
169
+ subprocess.run(command, check=True, shell=True)
170
+ except subprocess.CalledProcessError as e:
171
+ sys.exit("Failed truncating logs")
172
+
173
+ # submission checker
174
+ command = f"python {mlperf_path / 'tools/submission/submission_checker.py'} --input {submission_dir} --csv {output_dir / 'summary.csv'}"
175
+ try:
176
+ subprocess.run(command, check=True, shell=True)
177
+ except subprocess.CalledProcessError as e:
178
+ sys.exit("Submission checker failed")
179
+
180
+ # zip submission folder
181
+ command = f"tar -cvzf {output_dir}/submission.gz -C {os.path.dirname(submission_dir)} {os.path.basename(submission_dir)}"
182
+ try:
183
+ subprocess.run(command, check=True, shell=True)
184
+ except subprocess.CalledProcessError as e:
185
+ sys.exit("Failed packaging submission folder")
186
+
187
+
188
+ if __name__ == "__main__":
189
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/run_mlperf_scenarios.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import argparse
5
+ import yaml
6
+ import typing
7
+ import subprocess
8
+ import logging
9
+ import sys
10
+ import json
11
+ import shutil
12
+ import re
13
+ import os
14
+ from pathlib import Path
15
+ import time
16
+
17
+ scenarios_config = yaml.full_load(open("scenarios.yaml"))
18
+ logging.basicConfig(level=logging.INFO)
19
+ modes = ["Server", "Offline"]
20
+ units_map = {"Server": "Queries/s", "Offline": "Samples/s"}
21
+
22
+
23
+ def get_configuration(scenarios) -> typing.Tuple[str, str]:
24
+ runs = []
25
+ for scenario in scenarios:
26
+ if scenario in scenarios_config["scenarios"]:
27
+ for mode in modes:
28
+ runs.append((scenario, mode))
29
+ else:
30
+ try:
31
+ scenario, mode = scenario.split("_")
32
+ assert mode in modes
33
+ runs.append((scenario, mode))
34
+ except:
35
+ logging.error(
36
+ f"Scenario {scenario} not supported, see scenarios.yaml to view supported scenarios"
37
+ )
38
+ exit()
39
+ return runs
40
+
41
+
42
+ def get_args():
43
+ """Parse commandline."""
44
+ parser = argparse.ArgumentParser()
45
+ parser.add_argument(
46
+ "scenarios",
47
+ nargs="+",
48
+ help="List of scenarios e.g. gpt-j_Server or gpt-j separated by space, to run all possible scenarios set first element to 'all'",
49
+ )
50
+ parser.add_argument(
51
+ "--output-dir",
52
+ required=False,
53
+ default="./results",
54
+ help="Path to save results folder in",
55
+ )
56
+ parser.add_argument(
57
+ "--mlperf-path", help="Path to mlperf inference directory"
58
+ )
59
+ parser.add_argument("--mode", type=str, choices=["full", "perf", "acc"], default="full", help="dev options to shorten test time")
60
+ parser.add_argument("--eager", action="store_true", help="Eager mode enabled")
61
+ args = parser.parse_args()
62
+ return args
63
+
64
+
65
+ def run_inference(base_dir, command, mode, accuracy, scenario):
66
+ args = get_args()
67
+ command += f" --scenario {mode}"
68
+ if accuracy:
69
+ command += " --accuracy"
70
+ if args.eager:
71
+ command += " --eager"
72
+ logging.info(command)
73
+ try:
74
+ subprocess.run(command, check=True, shell=True, cwd=base_dir)
75
+ except subprocess.CalledProcessError as e:
76
+ sys.exit(f"Failed running {scenario}_{mode}")
77
+
78
+
79
+ def evaluate(base_dir):
80
+ start_time = time.time()
81
+ # Assuming script naming convention is consistent between models
82
+ command = "python evaluation.py | tee -a ./build/logs/accuracy.txt"
83
+ logging.info(command)
84
+ try:
85
+ subprocess.run(command, check=True, shell=True, cwd=base_dir)
86
+ except subprocess.CalledProcessError as e:
87
+ sys.exit(f"Failed evaluating {base_dir}")
88
+ return time.time() - start_time
89
+
90
+
91
+ def verify_thresholds(benchmark, results: typing.Dict[str, typing.Any]):
92
+ error = ""
93
+ valid = True
94
+ thresholds = scenarios_config["benchmarks"][benchmark]
95
+ for metric, threshold in thresholds.items():
96
+ if results[metric] < threshold:
97
+ error += f"{metric} "
98
+ valid = False
99
+ results["valid"] = valid
100
+ results["error"] = error
101
+ return results
102
+
103
+
104
+ def get_results(accuracy_path, benchmark):
105
+ text = open(accuracy_path / "accuracy.txt").readlines()
106
+ results = None
107
+ for line in text:
108
+ object_results = re.match("(\{.*?\})", line)
109
+ if object_results is not None:
110
+ results = yaml.full_load(object_results.group(1))
111
+ if results is None:
112
+ return sys.exit(f"No metrics found for {benchmark}")
113
+ results = verify_thresholds(benchmark, results)
114
+ return results
115
+
116
+
117
+ def get_performance(performance_path, mode):
118
+ perf = {}
119
+ text = open(performance_path / "mlperf_log_summary.txt").read()
120
+ perf_pattern = (
121
+ "Samples per second: (.+?)\n"
122
+ if mode == "Offline"
123
+ else "Scheduled samples per second : (.+?)\n"
124
+ )
125
+ validity_pattern = "Result is : (.+?)\n"
126
+ perf['samples_per_seconds'] = re.search(perf_pattern, text).group(1)
127
+ perf['validity'] = re.search(validity_pattern, text).group(1)
128
+
129
+ return perf
130
+
131
+ def verify_performance(perf_validity, results: typing.Dict[str, typing.Any]):
132
+ if perf_validity == "INVALID":
133
+ results["valid"] = False
134
+ results["error"] = "invalid"
135
+ return results
136
+
137
+ def write_summary(output_dir, summary):
138
+ summary_json_path = f"{output_dir}/summary.json"
139
+ all_summaries = []
140
+ if os.path.exists(summary_json_path):
141
+ with open(summary_json_path) as summary_file:
142
+ try:
143
+ all_summaries = json.load(summary_file)
144
+ except json.JSONDecodeError:
145
+ all_summaries = []
146
+ all_summaries.append(summary)
147
+ logging.info(f"Writing summary to {summary_json_path}")
148
+ with open(summary_json_path, mode="w") as summary_file:
149
+ json.dump(all_summaries, summary_file)
150
+
151
+
152
+ def main():
153
+ args = get_args()
154
+ configuration = get_configuration(args.scenarios)
155
+ output_dir = Path(args.output_dir).absolute()
156
+ logging.info(f"Saving results to {output_dir}")
157
+ output_dir.mkdir(exist_ok=True)
158
+ for scenario, mode in configuration:
159
+ logging.info(f"Running {scenario} {mode}")
160
+ base_dir = Path(scenarios_config["scenarios"][scenario]["code_dir"])
161
+ benchmark = scenarios_config["scenarios"][scenario]["benchmark"]
162
+ command = scenarios_config["scenarios"][scenario]["command"]
163
+
164
+ # logs are saved in the code/<model> dir
165
+ logs_path = base_dir / "build" / "logs"
166
+
167
+ # start timer
168
+ total_time = 0
169
+ start = time.time()
170
+ if args.mode == "perf":
171
+ # copy audit.config to get accuracy logs from performance mode
172
+ # this is equivalent to running compliance TEST01
173
+ shutil.copyfile("accuracy_from_perf.config", base_dir / "audit.config")
174
+
175
+ accuracy_path = output_dir / "logs" / scenario / mode / "compliance" / "TEST01"
176
+ # logs from performance are the same as accuracy in this mode
177
+ performance_path = accuracy_path
178
+
179
+ run_inference(base_dir, command, mode, False, scenario)
180
+ evaluation_time = evaluate(base_dir)
181
+ # move logs
182
+ shutil.move(logs_path, accuracy_path)
183
+ # remove audit
184
+ os.remove(base_dir / "audit.config")
185
+ else:
186
+ # run accuracy
187
+ logging.info("Running accuracy")
188
+ run_inference(base_dir, command, mode, True, scenario)
189
+ evaluation_time = evaluate(base_dir)
190
+ accuracy_path = output_dir / "logs" / scenario / mode / "accuracy"
191
+ shutil.move(logs_path, accuracy_path)
192
+ if args.mode != "acc":
193
+ logging.info("Running performance")
194
+ run_inference(base_dir, command, mode, False, scenario)
195
+ performance_path = (
196
+ output_dir / "logs" / scenario / mode / "performance" / "run_1"
197
+ )
198
+ shutil.move(logs_path, performance_path)
199
+
200
+ # get summary
201
+ precision = scenarios_config["scenarios"][scenario]["precision"]
202
+ batch_size = scenarios_config["scenarios"][scenario]["batch_size"]
203
+ total_time = time.time() - start
204
+ results = get_results(accuracy_path, benchmark)
205
+ units = units_map[mode]
206
+ if args.mode != "acc":
207
+ perf = get_performance(performance_path, mode)
208
+ performance = perf['samples_per_seconds']
209
+ results = verify_performance(perf['validity'], results)
210
+ else:
211
+ performance = None
212
+ if "gptj" in scenario:
213
+ thresholds = scenarios_config["benchmarks"]["gptj"]
214
+ results["accuracy"] = (
215
+ min(
216
+ results["rouge1"] / thresholds["rouge1"],
217
+ results["rouge2"] / thresholds["rouge2"],
218
+ results["rougeL"] / thresholds["rougeL"],
219
+ )
220
+ * 100
221
+ )
222
+ summary = {
223
+ "model": benchmark,
224
+ "scenario": scenario,
225
+ "units": units,
226
+ "performance": performance,
227
+ "batch_size": batch_size,
228
+ "precision": precision,
229
+ "iterations": results["gen_num"],
230
+ "dataset": scenarios_config["scenarios"][scenario]["dataset"],
231
+ "total_time": total_time,
232
+ "eval_time": evaluation_time,
233
+ "warmup_time": 0,
234
+ **results,
235
+ }
236
+ write_summary(output_dir, summary)
237
+ shutil.rmtree(base_dir / "build")
238
+
239
+
240
+ if __name__ == "__main__":
241
+ main()
docker/bloom13b/Model-References/MLPERF3.1/Inference/code/scenarios.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ benchmarks:
2
+ gptj-99:
3
+ "rouge1": 42.556635
4
+ "rouge2": 19.922265
5
+ "rougeL": 29.688219
6
+ "gen_len": 3615191
7
+ gptj-99.9:
8
+ "rouge1": 42.9435135
9
+ "rouge2": 20.1033765
10
+ "rougeL": 29.9581119
11
+ "gen_len": 3615191
12
+ gptj:
13
+ "rouge1": 42.9865
14
+ "rouge2": 20.1235
15
+ "rougeL": 29.9881
16
+ "gen_len": 3615191
17
+ scenarios:
18
+ gptj-99.9-bf16:
19
+ dataset: cnn_dailymail
20
+ code_dir: gpt-j
21
+ benchmark: gptj-99.9
22
+ command: python main.py --device socket --num_workers 8 --user_conf configs/bf16.conf
23
+ precision: bf16
24
+ batch_size: 12
25
+ gptj-99-fp8:
26
+ dataset: cnn_dailymail
27
+ code_dir: gpt-j
28
+ benchmark: gptj-99
29
+ command: PT_USE_FP8_143=1 UPDATE_MME_OUTPUT_PRECISION_FILTER="v_proj,matmul_av" ENABLE_EXPERIMENTAL_FLAGS=true python main.py -qf quantization/configuration/examples/quant_on.json --device socket --num_workers 8 --user_conf configs/fp8-99.conf --dtype float8
30
+ precision: fp8
31
+ batch_size: 32
32
+ gptj-99.9-fp8:
33
+ dataset: cnn_dailymail
34
+ code_dir: gpt-j
35
+ benchmark: gptj-99.9
36
+ command: PT_USE_FP8_143=1 UPDATE_MME_OUTPUT_PRECISION_FILTER="v_proj,matmul_av" ENABLE_EXPERIMENTAL_FLAGS=true python main.py -qf quantization/configuration/examples/quant_on.json --device socket --num_workers 8 --user_conf configs/fp8-99.conf --dtype float8
37
+ precision: fp8
38
+ batch_size: 32
docker/bloom13b/Model-References/MLPERF3.1/Inference/measurements/gptj-99.9/Offline/HLS-Gaudi2-PT_PyTorch_Offline.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "input_data_types": "int32",
3
+ "retraining": "No",
4
+ "starting_weights_filename": "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx",
5
+ "weight_data_types": "fp8-E4M3",
6
+ "weight_transformations": "quantization"
7
+ }
docker/bloom13b/Model-References/MLPERF3.1/Inference/measurements/gptj-99.9/Server/HLS-Gaudi2-PT_PyTorch_Server.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "input_data_types": "int32",
3
+ "retraining": "No",
4
+ "starting_weights_filename": "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx",
5
+ "weight_data_types": "fp8-E4M3",
6
+ "weight_transformations": "quantization"
7
+ }
docker/bloom13b/Model-References/MLPERF3.1/Inference/measurements/gptj-99.9/Server/README.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Steps to run gptj-99.9 Server
2
+
3
+ ### Environment setup
4
+ To setup the environment follow the steps described in [/closed/Intel-HabanaLabs/code/README.md](../../../code/README.md)
5
+
6
+ ### Commands
7
+ Run the following commands from [/closed/Intel-HabanaLabs/code/](../../../code/) directory.
8
+
9
+ #### Run accuracy
10
+ ```bash
11
+ source gptj-99.9/functions.sh
12
+ build_mlperf_inference --output-dir <output_dir> --submission gptj-99.9-fp8_Server --mode acc
13
+ ```
14
+
15
+ #### Run performance
16
+ ```bash
17
+ source gptj-99.9/functions.sh
18
+ build_mlperf_inference --output-dir <output_dir> --submission gptj-99.9-fp8_Server --mode perf
19
+ ```
20
+
21
+ ### Results
22
+
23
+ You can find the logs under /output_dir/logs/gptj-99.9-fp8/Server
24
+
25
+ For more details go to [/closed/Intel-HabanaLabs/code/README.md](../../../code/README.md)
docker/bloom13b/Model-References/MLPERF3.1/Inference/systems/HLS-Gaudi2-PT.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "submitter": "Intel-HabanaLabs",
3
+ "division": "closed",
4
+ "status": "available",
5
+ "system_name": "HLS-Gaudi2-PT",
6
+ "system_type": "datacenter",
7
+ "number_of_nodes": "1",
8
+ "host_processors_per_node": "2",
9
+ "host_processor_model_name": "Intel(R) Xeon(R) Platinum 8380",
10
+ "host_processor_core_count": "40",
11
+ "host_processor_vcpu_count": "80",
12
+ "host_processor_frequency": "2.3 GHz",
13
+ "host_processor_caches": "L1d cache: 3.8 MiB, L1i cache: 2.5 MiB, L2 cache: 100 MiB, L3 cache: 120 MiB",
14
+ "host_processor_interconnect": "UPI",
15
+ "host_memory_capacity": "1024 GB",
16
+ "host_memory_configuration": "DDR4-3200",
17
+ "host_storage_type": "Weka",
18
+ "host_storage_capacity": "1 PB",
19
+ "host_networking": "2x Mellanox ConnectX-5 Ex 100Gb/s Ethernet",
20
+ "host_networking_topology": "L3 Fat Tree",
21
+ "accelerators_per_node": "8",
22
+ "accelerator_model_name": "Intel® Gaudi® 2 AI Accelerator",
23
+ "accelerator_host_interconnect": "4x PCIe 4.0 x16",
24
+ "accelerator_frequency": "1800MHz",
25
+ "accelerator_on-chip_memories": "6",
26
+ "accelerator_memory_configuration": "HBM2E",
27
+ "accelerator_memory_capacity": "96 GB",
28
+ "accelerator_interconnect": "24x 100Gb/s Ethernet",
29
+ "accelerator_interconnect_topology": "10x L3 Fat Tree",
30
+ "cooling": "Air-cooled",
31
+ "hw_notes": "",
32
+ "framework": "PyTorch 2.0.1a0",
33
+ "other_software_stack": "synapseAI 1.12.98",
34
+ "operating_system": "Ubuntu 20.04",
35
+ "sw_notes": "",
36
+ "system_type_detail": "",
37
+ "host_networking_card_count": "N/A"
38
+ }
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/HLS-Gaudi2-TF/defaults.cfg ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ DATESTAMP=`date +'%y%m%d%H%M%S'`
3
+ export INPUT_FILES_DIR_UNPACKED=/root/datasets/tensorflow_bert/unpacked_data
4
+ export INPUT_FILES_DIR_PACKED=/root/datasets/tensorflow_bert/packed_data_500
5
+ export EVAL_FILES_DIR=/root/datasets/tensorflow_bert/eval_dataset
6
+ export INITIAL_CHECKPOINT=/root/datasets/tensorflow_bert/checkpoint/model.ckpt-28252
7
+ export BERT_CONFIG_DIR=/root/datasets/tensorflow_bert/checkpoint
8
+ export OUTPUT_DIR=/tmp/bert_pretrain/phase_2
9
+ export LOG_DIR=/tmp/bert_pretrain/phase_2
10
+ export TRAIN_BATCH_SIZE=28
11
+ export EVAL_BATCH_SIZE=125
12
+ export MAX_EVAL_STEPS=10
13
+ export NUM_DIST_EVAL_WORKERS=8
14
+ export TRAIN_STEPS=6700
15
+ export WARMUP_STEPS=0
16
+ export LEARNING_RATE=0.000425
17
+ export LAMB_BETA_1=0.9
18
+ export LAMB_BETA_2=0.999
19
+ export EPSILON=1e-06
20
+ export LAMB_WEIGHT_DECAY_RATE=0.01
21
+ export LAMB_LEARNING_RATE_DECAY_POLY_POWER=1.0
22
+ export NUM_ACCUMULATION_STEPS=2
23
+ export SAMPLES_START_EVAL=0
24
+ export SAVE_CHECKPOINTS_STEPS=335
25
+ export PACKED_DATA=True
26
+ export USE_HOROVOD=True
27
+ export HLS_TYPE="HLS2"
28
+ export NUM_WORKERS_TOTAL=8
29
+ export TF_CPU_RUNTIME_FALLBACK=forbid
30
+ export TF_HCCL_MEMORY_ALLOWANCE_MB=1536
31
+ export HABANA_INITIAL_WORKSPACE_SIZE_MB=4600
32
+ export CPU_BIND_TYPE=cpu
33
+ export USE_LIGHTWEIGHT_CHECKPOINT=True
34
+ export DO_TRAIN=True
35
+ export DO_EVAL=True
36
+ export USE_ASYNC_CHECKPOINTING=True
37
+ export EXPERIMENTAL_SLACK=True
38
+ export SIGNALING_FROM_GRAPH=0
39
+
40
+ unset MPI_TCP_INCLUDE
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/HLS-Gaudi2-TF/launch_bert_hvd.sh ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ DEBUG=${DEBUG:-0}
4
+ if [[ $DEBUG -eq 1 ]]; then
5
+ set -x
6
+ env
7
+ fi
8
+
9
+ # Basic paths
10
+ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
11
+ export BASE_PATH="$( cd "$(dirname "$(readlink -f ${SCRIPT_DIR}/defaults.cfg)" )" && pwd)"
12
+ exit_code=0
13
+
14
+ OMPI_PREFIX=$(which mpirun)
15
+ export OMPI_PREFIX=$(dirname $(dirname ${OMPI_PREFIX}) )
16
+
17
+ function help()
18
+ {
19
+ echo "Usage:"
20
+ echo "$0 [ -key1 value1 -key2 value2 .... -keyn valuen ]"
21
+ echo "-c | --config Configuration file path (defaults to ./defaults.cfg)"
22
+ echo "-hf | --hostfile Host file path, 'localhost' is used if no file is provided"
23
+ echo "-u | --use_horovod Enable (0) or disable (1) horovod use"
24
+ echo "-ws | --warmup_steps"
25
+ echo "-lr | --learning_rate"
26
+ echo "-st | --stop_threshold"
27
+ echo "-acs | --num_accumul_steps"
28
+ echo "-tbs | --train_batchsize"
29
+ echo "-ebs | --eval_batchsize"
30
+ echo "-ts | --train_steps"
31
+ echo "-lb1 | --lamb_beta_1"
32
+ echo "-lb2 | --lamb_beta_2"
33
+ echo "-ep | --epsilon"
34
+ echo "-lwd | --lamb_weight_decay_rate"
35
+ echo "-ldp | --lamb_lr_decay_poly_power"
36
+ echo "-sbe | --samples_btw_eval"
37
+ echo "-sse | --samples_start_eval"
38
+ echo "-mes | --max_eval_steps"
39
+ echo "-w | --num_workers_total"
40
+ echo "-p | --packed_data Packed (0) or unpacked (1)"
41
+ echo "-sch | --save_checkpoints_steps"
42
+ echo "-cpu | --cpu_bind_type [ none | cpu | numa ]"
43
+ echo "-inputf | --input_files_dir"
44
+ echo "-evalf | --eval_files_dir"
45
+ echo "-od | --output_dir"
46
+ echo "-ckpt | --initial_checkpoint"
47
+ echo "-config | --config_dir"
48
+ echo "-hls | --hls_type"
49
+ echo "-tcp | --mpi_tcp_include"
50
+ echo "-dram | --use_dram_output"
51
+ echo "-lw | --light_weight"
52
+ echo "-lwi | --light_weight_impl [ basic (default) | sharded ]"
53
+ echo "-ac | --async_checkpointing"
54
+ echo "-ld | --log_dir"
55
+ echo "--do_train"
56
+ echo "--do_eval"
57
+ echo "--experimental_slack"
58
+ echo "-ndew | --num_dist_eval_workers Number of workers participating in distributed evaluation"
59
+ echo "-opt | --optimizer Type of optimizer, available options: 'lamb', 'sharded_lamb', 'adam'"
60
+ echo "-sfg | --signaling_from_graph Enable (1) or disable (0) SFG optimization."
61
+ }
62
+ #echo "-sws | --start_warmup_steps"
63
+
64
+ # Parse command line options
65
+ unset __config
66
+ unset __hostfile
67
+ unset __use_horovod
68
+ unset __warmup_steps
69
+ unset __learning_rate
70
+ unset __stop_threshold
71
+ unset __num_accumul_steps
72
+ unset __train_batchsize
73
+ unset __eval_batchsize
74
+ unset __train_steps
75
+ #unset __start_warmup_steps
76
+ unset __lamb_beta_1
77
+ unset __lamb_beta_2
78
+ unset __epsilon
79
+ unset __lamb_weight_decay_rate
80
+ unset __lamb_lr_decay_poly_power
81
+ unset __samples_btw_eval
82
+ unset __samples_start_eval
83
+ unset __max_eval_steps
84
+ unset __num_workers_total
85
+ unset __packed_data
86
+ unset __save_checkpoints_steps
87
+ unset __cpu_bind_type
88
+ unset __input_files_dir
89
+ unset __eval_files_dir
90
+ unset __output_dir
91
+ unset __initial_checkpoint
92
+ unset __config_dir
93
+ unset __hls_type
94
+ unset __mpi_tcp_include
95
+ unset __use_dram_output
96
+ unset __light_weight
97
+ unset __light_weight_impl
98
+ unset __async_checkpointing
99
+ unset __log_dir
100
+ unset __do_train
101
+ unset __do_eval
102
+ unset __experimental_slack
103
+ unset __num_dist_eval_workers
104
+ unset __optimizer
105
+ unset __aux_scirpt_params
106
+ unset __ssh_port
107
+ unset __signaling_from_graph
108
+
109
+ while [ -n "$1" ]; do
110
+ case $1 in
111
+ -c | --config )
112
+ shift
113
+ __config=$1
114
+ ;;
115
+ -hf | --hostfile)
116
+ shift
117
+ __hostfile=$1
118
+ ;;
119
+ -u | --use_horovod )
120
+ shift
121
+ __use_horovod=$1
122
+ ;;
123
+ -ws | --warmup_steps )
124
+ shift
125
+ __warmup_steps=$1
126
+ ;;
127
+ -lr | --learning_rate )
128
+ shift
129
+ __learning_rate=$1
130
+ ;;
131
+ -st | --stop_threshold )
132
+ shift
133
+ __stop_threshold=$1
134
+ ;;
135
+ -acs | --num_accumul_steps )
136
+ shift
137
+ __num_accumul_steps=$1
138
+ ;;
139
+ -tbs | --train_batchsize )
140
+ shift
141
+ __train_batchsize=$1
142
+ ;;
143
+ -ebs | --eval_batchsize)
144
+ shift
145
+ __eval_batchsize=$1
146
+ ;;
147
+ -ts | --train_steps)
148
+ shift
149
+ __train_steps=$1
150
+ ;;
151
+ -lb1 | --lamb_beta_1)
152
+ shift
153
+ __lamb_beta_1=$1
154
+ ;;
155
+ -lb2 | --lamb_beta_2)
156
+ shift
157
+ __lamb_beta_2=$1
158
+ ;;
159
+ -ep | --epsilon)
160
+ shift
161
+ __epsilon=$1
162
+ ;;
163
+ -lwd | --lamb_weight_decay_rate)
164
+ shift
165
+ __lamb_weight_decay_rate=$1
166
+ ;;
167
+ -ldp | --lamb_lr_decay_poly_power)
168
+ shift
169
+ __lamb_lr_decay_poly_power=$1
170
+ ;;
171
+ -sbe | --samples_btw_eval)
172
+ shift
173
+ __samples_btw_eval=$1
174
+ ;;
175
+ -sse | --samples_start_eval)
176
+ shift
177
+ __samples_start_eval=$1
178
+ ;;
179
+ -mes | --max_eval_steps)
180
+ shift
181
+ __max_eval_steps=$1
182
+ ;;
183
+ -w | --num_workers_total)
184
+ shift
185
+ __num_workers_total=$1
186
+ ;;
187
+ -p | --packed_data)
188
+ shift
189
+ __packed_data=$1
190
+ ;;
191
+ -sch | --save_checkpoints_steps)
192
+ shift
193
+ __save_checkpoints_steps=$1
194
+ ;;
195
+ -cpu | --cpu_bind_type)
196
+ shift
197
+ __cpu_bind_type=$1
198
+ case ${__cpu_bind_type} in
199
+ numa | cpu | none )
200
+ ;;
201
+ *)
202
+ echo "--cpu-pin must be one of the following numa | cpu | none "
203
+ exit 1
204
+ esac
205
+ ;;
206
+ -inputf | --input_files_dir)
207
+ shift
208
+ __input_files_dir=$1
209
+ ;;
210
+ -sfg | --signaling_from_graph)
211
+ shift
212
+ __signaling_from_graph=$1
213
+ ;;
214
+ -evalf | --eval_files_dir)
215
+ shift
216
+ __eval_files_dir=$1
217
+ ;;
218
+ -od | --output_dir)
219
+ shift
220
+ __output_dir=$1
221
+ ;;
222
+ -ckpt | --initial_checkpoint)
223
+ shift
224
+ __initial_checkpoint=$1
225
+ ;;
226
+ -config | --config_dir)
227
+ shift
228
+ __config_dir=$1
229
+ ;;
230
+ -hls | --hls_type)
231
+ shift
232
+ __hls_type=$1
233
+ ;;
234
+ -tcp | --mpi_tcp_include)
235
+ shift
236
+ __mpi_tcp_include=$1
237
+ ;;
238
+ -dram | --use_dram_output)
239
+ shift
240
+ __use_dram_output=$1
241
+ ;;
242
+ -lw | --light_weight)
243
+ shift
244
+ __light_weight=$1
245
+ ;;
246
+ -lwi | --light_weight_impl)
247
+ shift
248
+ __light_weight_impl=$1
249
+ ;;
250
+ -ac | --async_checkpointing)
251
+ shift
252
+ __async_checkpointing=$1
253
+ ;;
254
+ -ld | --log_dir)
255
+ shift
256
+ __log_dir=$1
257
+ ;;
258
+ --do_train)
259
+ shift
260
+ __do_train=$1
261
+ ;;
262
+ --do_eval)
263
+ shift
264
+ __do_eval=$1
265
+ ;;
266
+ --experimental_slack)
267
+ shift
268
+ __experimental_slack=$1
269
+ ;;
270
+ -ndew | --num_dist_eval_workers)
271
+ shift
272
+ __num_dist_eval_workers=$1
273
+ ;;
274
+ -opt | --optimizer)
275
+ shift
276
+ __optimizer=$1
277
+ ;;
278
+ -port | --ssh_port)
279
+ shift
280
+ __ssh_port=$1
281
+ ;;
282
+ -h | --help)
283
+ help
284
+ exit 1
285
+ ;;
286
+ * )
287
+ __aux_param=$1
288
+ shift
289
+ echo "The parameter $1 will be passed directly to python script"
290
+ __aux_scirpt_params="${__aux_scirpt_params}:${__aux_param}=${1}"
291
+ ;;
292
+ esac
293
+ shift
294
+ done
295
+
296
+ export CFG_FILE=${__config:-"${BASE_PATH}/defaults.cfg"}
297
+ if [[ -f ${CFG_FILE} ]]; then
298
+ source ${CFG_FILE}
299
+ else
300
+ echo "Could not find ${CFG_FILE}"
301
+ exit 1
302
+ fi
303
+
304
+ # Set default values for environmental variable
305
+ export HOST_FILE=${__hostfile:-"${OMPI_MCA_orte_default_hostfile}"}
306
+ export SSH_PORT=${__ssh_port:-"3022"}
307
+
308
+ if [[ -z "${HABANA_LOGS}" ]]; then
309
+ export HABANA_LOGS="/var/logs/habana_logs"
310
+ echo "Creating default directory for habana_logs."
311
+ mkdir -p $HABANA_LOGS
312
+ fi
313
+ export EVAL_FILES_DIR=${EVAL_FILES_DIR}
314
+ export OUTPUT_DIR=${OUTPUT_DIR}
315
+ export PHASE1_CKPT=${INITIAL_CHECKPOINT}
316
+ export INITIAL_CHECKPOINT=${INITIAL_CHECKPOINT}
317
+ export BERT_CONFIG_DIR=${BERT_CONFIG_DIR}
318
+ export NUM_WORKERS_PER_HLS=${NUM_WORKERS_PER_HLS}
319
+ export OPTIMIZE_DMA_ENGINES_ALLOCATION=${OPTIMIZE_DMA_ENGINES_ALLOCATION}
320
+ export TF_CPU_RUNTIME_FALLBACK=${TF_CPU_RUNTIME_FALLBACK}
321
+ export TF_HCCL_MEMORY_ALLOWANCE_MB=${TF_HCCL_MEMORY_ALLOWANCE_MB}
322
+ export HABANA_INITIAL_WORKSPACE_SIZE_MB=${HABANA_INITIAL_WORKSPACE_SIZE_MB}
323
+
324
+ # Override defaults with command line options if needed
325
+ export MPI_TCP_INCLUDE=${__mpi_tcp_include:-$MPI_TCP_INCLUDE}
326
+ export USE_HOROVOD=${__use_horovod:-$USE_HOROVOD}
327
+ export WARMUP_STEPS=${__warmup_steps:-$WARMUP_STEPS}
328
+ export LEARNING_RATE=${__learning_rate:-$LEARNING_RATE}
329
+ export STOP_THRESHOLD=${__stop_threshold:-$STOP_THRESHOLD}
330
+ export NUM_ACCUMULATION_STEPS=${__num_accumul_steps:-$NUM_ACCUMULATION_STEPS}
331
+ export TRAIN_BATCH_SIZE=${__train_batchsize:-$TRAIN_BATCH_SIZE}
332
+ export EVAL_BATCH_SIZE=${__eval_batchsize:-$EVAL_BATCH_SIZE}
333
+ export TRAIN_STEPS=${__train_steps:-$TRAIN_STEPS}
334
+ export LAMB_BETA_1=${__lamb_beta_1:-$LAMB_BETA_1}
335
+ export LAMB_BETA_2=${__lamb_beta_2:-$LAMB_BETA_2}
336
+ export EPSILON=${__epsilon:-$EPSILON}
337
+ export LAMB_WEIGHT_DECAY_RATE=${__lamb_weight_decay_rate:-$LAMB_WEIGHT_DECAY_RATE}
338
+ export LAMB_LEARNING_RATE_DECAY_POLY_POWER=${__lamb_lr_decay_poly_power:-$LAMB_LEARNING_RATE_DECAY_POLY_POWER}
339
+ export SAMPLES_START_EVAL=${__samples_start_eval:-$SAMPLES_START_EVAL}
340
+ export MAX_EVAL_STEPS=${__max_eval_steps:-$MAX_EVAL_STEPS}
341
+ export NUM_WORKERS_TOTAL=${__num_workers_total:-$NUM_WORKERS_TOTAL}
342
+ export PACKED_DATA=${__packed_data:-$PACKED_DATA}
343
+ export SAVE_CHECKPOINTS_STEPS=${__save_checkpoints_steps:-$SAVE_CHECKPOINTS_STEPS}
344
+ SAMPLES_BETWEEN_EVAL=$(($TRAIN_BATCH_SIZE*$NUM_WORKERS_TOTAL*$NUM_ACCUMULATION_STEPS*$SAVE_CHECKPOINTS_STEPS))
345
+ export SAMPLES_BETWEEN_EVAL=${__samples_btw_eval:-$SAMPLES_BETWEEN_EVAL}
346
+ export CPU_BIND_TYPE=${__cpu_bind_type:-$CPU_BIND_TYPE}
347
+ export EVAL_FILES_DIR=${__eval_files_dir:-$EVAL_FILES_DIR}
348
+ export SIGNALING_FROM_GRAPH=${__signaling_from_graph:-$SIGNALING_FROM_GRAPH}
349
+ export OUTPUT_DIR=${__output_dir:-$OUTPUT_DIR}
350
+ export PHASE1_CKPT=${__initial_checkpoint:-$INITIAL_CHECKPOINT}
351
+ export BERT_CONFIG_DIR=${__config_dir:-$BERT_CONFIG_DIR}
352
+ export HLS_TYPE=${__hls_type:-$HLS_TYPE}
353
+ export USE_DRAM_OUTPUT=${__use_dram_output:-"True"}
354
+ export USE_LIGHTWEIGHT_CHECKPOINT=${__light_weight:-$USE_LIGHTWEIGHT_CHECKPOINT}
355
+ export LIGHTWEIGHT_CHECKPOINT_IMPL=${__light_weight_impl:-"basic"}
356
+ export USE_ASYNC_CHECKPOINTING=${__async_checkpointing:-$USE_ASYNC_CHECKPOINTING}
357
+ export LOG_DIR=${__log_dir:-$LOG_DIR}
358
+ export DO_TRAIN=${__do_train:-$DO_TRAIN}
359
+ export DO_EVAL=${__do_eval:-$DO_EVAL}
360
+ export EXPERIMENTAL_SLACK=${__experimental_slack:-$EXPERIMENTAL_SLACK}
361
+ export NUM_DIST_EVAL_WORKERS=${__num_dist_eval_workers:-$NUM_DIST_EVAL_WORKERS}
362
+ export AUX_PARAMS=${__aux_scirpt_params:-$AUX_PARAMS}
363
+ export OPTIMIZER=${__optimizer:-$OPTIMIZER}
364
+
365
+ if [[ "$HLS_TYPE" == "HLS2" ]]; then
366
+ export NUM_WORKERS_PER_HLS=8
367
+ else
368
+ "============== WRONG HLS TYPE!! ==============="
369
+ exit -1
370
+ fi
371
+
372
+ if [ "$PACKED_DATA" == "False" ]; then
373
+ export INPUT_FILES_DIR=${__input_files_dir:-$INPUT_FILES_DIR_UNPACKED}
374
+ else
375
+ export INPUT_FILES_DIR=${__input_files_dir:-$INPUT_FILES_DIR_PACKED}
376
+ fi
377
+
378
+ if [ "$USE_HOROVOD" == "True" ]; then
379
+ export HOROVOD_STALL_CHECK_DISABLE=1
380
+ echo HOROVOD_STALL_CHECK_DISABLE=$HOROVOD_STALL_CHECK_DISABLE
381
+
382
+ # SAO:ON by default
383
+ export TF_DISABLE_SCOPED_ALLOCATOR=${TF_DISABLE_SCOPED_ALLOCATOR:-False}
384
+ echo TF_DISABLE_SCOPED_ALLOCATOR=$TF_DISABLE_SCOPED_ALLOCATOR
385
+ fi
386
+
387
+ function getmulti_hls_ips()
388
+ {
389
+ multi_hcl_ip="MULTI_HLS_IPS="
390
+ hostsFile=$1
391
+ firstHost=1
392
+ hostCount=0
393
+
394
+ # iterate over non-empty and non-commented lines
395
+ for h in $(cat $hostsFile | sed '/^$/d' | grep -v '^#'); do
396
+ if [[ $firstHost -eq 1 ]]; then
397
+ firstHost=0
398
+ else
399
+ multi_hcl_ip+=","
400
+ fi
401
+ multi_hcl_ip+=$h
402
+ hostCount=$((hostCount + 1))
403
+ done
404
+
405
+ echo "[getmulti_hls_ips] Host Count : $hostCount"
406
+ echo "[getmulti_hls_ips] Exporting : $multi_hcl_ip"
407
+ export $multi_hcl_ip
408
+ }
409
+
410
+
411
+ function run_per_ip()
412
+ {
413
+ if [ -n "$OMPI_COMM_WORLD_SIZE" ]; then
414
+ print_error "Function run_per_ip is not meant to be ran from within an OpenMPI context. It is intended to invoke mpirun by itelf."
415
+ exit 1
416
+ fi
417
+ _cmd="$@"
418
+ # Due to technical difficulties with the following solution, the _cmd stderr shall be redirected to stdout.
419
+ if [[ -z ${MULTI_HLS_IPS} ]]; then
420
+ echo "[launch_bert_hvd] MULTI_HLS_IPS undefined - maybe a missing /root/shared/hosts file?"
421
+ exit -1
422
+ else
423
+ if [ -n "$MPI_TCP_INCLUDE" ]; then
424
+ _option_btl_tcp_if_include="--mca btl_tcp_if_include ${MPI_TCP_INCLUDE}"
425
+ else
426
+ _option_btl_tcp_if_include=""
427
+ fi
428
+ mpirun --allow-run-as-root \
429
+ --mca plm_rsh_args -p${SSH_PORT} \
430
+ ${_option_btl_tcp_if_include} \
431
+ --tag-output \
432
+ --merge-stderr-to-stdout \
433
+ --prefix ${OMPI_PREFIX} \
434
+ -H ${MULTI_HLS_IPS} \
435
+ bash -c "`declare`; `declare -x`; ($_cmd 2>&1)" 2>/dev/null
436
+ fi
437
+ }
438
+
439
+ export MULTI_HLS_IPS=localhost
440
+ if [[ -f ${HOST_FILE} ]]; then
441
+ getmulti_hls_ips ${HOST_FILE}
442
+ fi
443
+
444
+ # Create recipes directory if it does not exist and adjust dirctory name
445
+ # if we are collecting traces - which require debug information
446
+ run_per_ip mkdir -p ${OUTPUT_DIR} # 2>/dev/null
447
+ run_per_ip rm -rf ${OUTPUT_DIR}/* # 2>/dev/null
448
+ run_per_ip mkdir -p ${LOG_DIR}
449
+ mkdir -p ${LOG_DIR}
450
+
451
+ run_per_ip pip install -r $BASE_PATH/../TensorFlow/nlp/bert/requirements.txt
452
+
453
+ #run_per_ip rm -rf /tmp/checkpoint /tmp/eval /tmp/events.out.tfevents.* /tmp/graph.pbtxt /tmp/model.ckpt-*
454
+ #run_per_ip rm -rf /tmp/rank_*/checkpoint /tmp/rank_*/eval /tmp/rank_*/events.out.tfevents.* /tmp/rank_*/graph.pbtxt /tmp/rank_*/model.ckpt-*
455
+
456
+ function setup_libjemalloc()
457
+ {
458
+ local libjemalloc_1_lib="libjemalloc.so.1"
459
+ local libjemalloc_2_lib="libjemalloc.so.2"
460
+ local is_v2_not_present=`LD_PRELOAD=${libjemalloc_2_lib} head -0 2>&1 > /dev/null`
461
+
462
+ if [ -z "${is_v2_not_present}" ]; then
463
+ export LD_PRELOAD=${libjemalloc_2_lib}:$LD_PRELOAD
464
+ else
465
+ export LD_PRELOAD=${libjemalloc_1_lib}:$LD_PRELOAD
466
+ fi
467
+ }
468
+ run_per_ip setup_libjemalloc
469
+
470
+ if [[ -z ${MULTI_HLS_IPS} ]]; then
471
+ echo "[launch_bert_hvd] MULTI_HLS_IPS undefined - maybe a missing /root/shared/hosts file?"
472
+ exit -1
473
+ else
474
+ IFS=',' read -ra IPS <<< "$MULTI_HLS_IPS"
475
+ let MPI_NP=${#IPS[@]}*${NUM_WORKERS_PER_HLS}
476
+ export NUM_WORKERS_TOTAL=${NUM_WORKERS_TOTAL:-$MPI_NP}
477
+
478
+ if [[ $NUM_WORKERS_TOTAL != $MPI_NP ]]; then
479
+ echo $NUM_WORKERS_TOTAL $MPI_NP
480
+ echo "=============== WRONG NUMBER_WORKERS_TOTAL!! ==============="
481
+ exit -1
482
+ fi
483
+
484
+ echo NUM_WORKERS_TOTAL=$NUM_WORKERS_TOTAL
485
+
486
+ function generate_mpi_hostfile()
487
+ {
488
+ echo "Generating MPI hostfile..."
489
+ local num_nodes=${2:-8}
490
+ local file_name="hostfile"
491
+ export MPI_HOSTFILE_PATH=$1/${file_name}
492
+
493
+ rm -rf ${MPI_HOSTFILE_PATH}
494
+ echo "PATH: ${MPI_HOSTFILE_PATH}"
495
+ touch ${MPI_HOSTFILE_PATH}
496
+
497
+ IFS=',' read -ra IPS <<< "$MULTI_HLS_IPS"
498
+ for i in "${IPS[@]}"; do
499
+ echo "$i slots=${num_nodes}" >> ${MPI_HOSTFILE_PATH}
500
+ done
501
+ echo "Config: "
502
+ cat ${MPI_HOSTFILE_PATH}
503
+ }
504
+
505
+ generate_mpi_hostfile ${OUTPUT_DIR} ${NUM_WORKERS_PER_HLS}
506
+
507
+ export testdate=`date +%Y-%m-%d`
508
+ export testtime=`date +%H%M%S`
509
+ export OUTPUT_DIR=${__output_dir:-/root/scratch/bert/bert_gaudi${NUM_WORKERS_TOTAL}_${testdate}_${testtime}}
510
+
511
+ run_per_ip mkdir -p ${OUTPUT_DIR}
512
+
513
+ run_per_ip rm -f $LOG_DIR/result_*
514
+ run_per_ip rm -f ${LOG_DIR}/tf_bert_pretraining_lamb.log
515
+
516
+ LOGFILE=$LOG_DIR/tf_bert_pretraining_lamb.log
517
+ export TF_RECIPE_CACHE_PATH=/tmp/bert_pretrain/phase_2
518
+ run_per_ip mkdir -p $TF_RECIPE_CACHE_PATH
519
+
520
+ MPI_MAP_BY=socket
521
+ MPI_MAP_BY_PE=`lscpu | grep "^CPU(s):"| awk -v NUM=${NUM_WORKERS_PER_HLS} '{print int($2/NUM/2)}'`
522
+ if [[ "$CPU_BIND_TYPE" == "numa" || "$CPU_BIND_TYPE" == "none" ]]; then
523
+ MPIRUN_ARGS_MAP_BY_PE="-bind-to none"
524
+ else
525
+ MPIRUN_ARGS_MAP_BY_PE="--bind-to core --map-by $MPI_MAP_BY:PE=$MPI_MAP_BY_PE"
526
+ fi
527
+
528
+ if [ -n "$MPI_TCP_INCLUDE" ]; then
529
+ _option_btl_tcp_if_include="--mca btl_tcp_if_include ${MPI_TCP_INCLUDE}"
530
+ else
531
+ _option_btl_tcp_if_include=""
532
+ fi
533
+
534
+ TRAINING_COMMAND="mpirun --allow-run-as-root \
535
+ --display-map \
536
+ --report-bindings \
537
+ --bind-to none \
538
+ -np ${NUM_WORKERS_TOTAL}\
539
+ --hostfile ${MPI_HOSTFILE_PATH} \
540
+ --prefix ${OMPI_PREFIX} \
541
+ --mca plm_rsh_args -p${SSH_PORT} \
542
+ ${_option_btl_tcp_if_include} \
543
+ --merge-stderr-to-stdout \
544
+ --tag-output \
545
+ --output-filename ${LOG_DIR}/bert_log \
546
+ -x USE_HOROVOD=${USE_HOROVOD} \
547
+ -x TF_MODULES_RELEASE_BUILD=/usr/lib/habanalabs/ \
548
+ -x HABANA_LOGS=${HABANA_LOGS} \
549
+ -x LEARNING_RATE=${LEARNING_RATE} \
550
+ -x STOP_THRESHOLD=${STOP_THRESHOLD} \
551
+ -x NUM_ACCUMULATION_STEPS=${NUM_ACCUMULATION_STEPS} \
552
+ -x TRAIN_BATCH_SIZE=${TRAIN_BATCH_SIZE} \
553
+ -x EVAL_BATCH_SIZE=${EVAL_BATCH_SIZE} \
554
+ -x TRAIN_STEPS=${TRAIN_STEPS} \
555
+ -x NUM_WORKERS_TOTAL=${NUM_WORKERS_TOTAL} \
556
+ -x WARMUP_STEPS=${WARMUP_STEPS} \
557
+ -x LAMB_BETA_1=${LAMB_BETA_1} \
558
+ -x LAMB_BETA_2=${LAMB_BETA_2} \
559
+ -x EPSILON=${EPSILON} \
560
+ -x LAMB_WEIGHT_DECAY_RATE=${LAMB_WEIGHT_DECAY_RATE} \
561
+ -x LAMB_LEARNING_RATE_DECAY_POLY_POWER=${LAMB_LEARNING_RATE_DECAY_POLY_POWER} \
562
+ -x SAMPLES_BETWEEN_EVAL=${SAMPLES_BETWEEN_EVAL} \
563
+ -x SAMPLES_START_EVAL=${SAMPLES_START_EVAL} \
564
+ -x MAX_EVAL_STEPS=${MAX_EVAL_STEPS} \
565
+ -x INPUT_FILES_DIR=${INPUT_FILES_DIR} \
566
+ -x EVAL_FILES_DIR=${EVAL_FILES_DIR} \
567
+ -x OUTPUT_DIR=${OUTPUT_DIR} \
568
+ -x PHASE1_CKPT=${PHASE1_CKPT} \
569
+ -x BERT_CONFIG_DIR=${BERT_CONFIG_DIR} \
570
+ -x OPTIMIZE_DMA_ENGINES_ALLOCATION=${OPTIMIZE_DMA_ENGINES_ALLOCATION} \
571
+ -x TF_CPU_RUNTIME_FALLBACK=${TF_CPU_RUNTIME_FALLBACK} \
572
+ -x TF_HCCL_MEMORY_ALLOWANCE_MB=${TF_HCCL_MEMORY_ALLOWANCE_MB} \
573
+ -x HABANA_INITIAL_WORKSPACE_SIZE_MB=${HABANA_INITIAL_WORKSPACE_SIZE_MB} \
574
+ -x HLS_TYPE=${HLS_TYPE} \
575
+ -x MPI_TCP_INCLUDE=${MPI_TCP_INCLUDE} \
576
+ -x SAVE_CHECKPOINTS_STEPS=${SAVE_CHECKPOINTS_STEPS} \
577
+ -x PACKED_DATA=${PACKED_DATA} \
578
+ -x TESTDATE=${testdate} \
579
+ -x TESTTIME=${testtime} \
580
+ -x CPU_BIND_TYPE=${CPU_BIND_TYPE} \
581
+ ${MPIRUN_ARGS_MAP_BY_PE} \
582
+ -x NUM_WORKERS_PER_HLS=${NUM_WORKERS_PER_HLS} \
583
+ -x USE_DRAM_OUTPUT=${USE_DRAM_OUTPUT} \
584
+ -x USE_LIGHTWEIGHT_CHECKPOINT=${USE_LIGHTWEIGHT_CHECKPOINT} \
585
+ -x LIGHTWEIGHT_CHECKPOINT_IMPL=${LIGHTWEIGHT_CHECKPOINT_IMPL} \
586
+ -x USE_ASYNC_CHECKPOINTING=${USE_ASYNC_CHECKPOINTING} \
587
+ -x LOG_DIR=${LOG_DIR} \
588
+ -x TF_RECIPE_CACHE_PATH \
589
+ -x DO_TRAIN=${DO_TRAIN} \
590
+ -x DO_EVAL=${DO_EVAL} \
591
+ -x EXPERIMENTAL_SLACK=${EXPERIMENTAL_SLACK} \
592
+ -x NUM_DIST_EVAL_WORKERS=${NUM_DIST_EVAL_WORKERS} \
593
+ -x WARMUP_STEPS=${WARMUP_STEPS}
594
+ -x AUX_PARAMS=${AUX_PARAMS} \
595
+ -x TF_ENABLE_DYNAMIC_SHAPES=${TF_ENABLE_DYNAMIC_SHAPES} \
596
+ -x OPTIMIZER=${OPTIMIZER} \
597
+ -x SIGNALING_FROM_GRAPH=${SIGNALING_FROM_GRAPH} \
598
+ ${BASE_PATH}/run.sh"
599
+
600
+ echo "TRAINING COMMAND = ${TRAINING_COMMAND}"
601
+ printf "[launch_bert_hvd] Starting training...\n\n"
602
+ time $TRAINING_COMMAND |& tee -a $LOGFILE
603
+ fi
604
+ run_per_ip rm -rf $OUTPUT_DIR/*/model.ckpt-*
605
+ rm -rf $BASE_PATH/log
606
+ cp /root/build_log.csv ${OUTPUT_DIR}/
607
+ cp ${MPI_HOSTFILE_PATH} ${OUTPUT_DIR}/
608
+ cp -r $LOG_DIR/bert_log $BASE_PATH/log
609
+ cp $TF_RECIPE_CACHE_PATH/tf_bert_pretraining* ${OUTPUT_DIR}/
610
+ chmod -R 777 ${OUTPUT_DIR}
611
+ exit $exit_code
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/HLS-Gaudi2-TF/run.sh ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /bin/bash
2
+
3
+ #set -x
4
+ ###############################################################################
5
+ # Copyright (C) 2020-2023 Habana Labs, Ltd. an Intel Company
6
+ #
7
+ ###############################################################################
8
+
9
+ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
10
+ export BASE_PATH="$( cd "$(dirname "$(readlink -f ${SCRIPT_DIR}/defaults.cfg)" )" && pwd)"
11
+ export PYTHONPATH=${BASE_PATH}:${BASE_PATH}/../TensorFlow/common
12
+
13
+ PT_VERSION=`python3 -c 'import sys; print(f"{sys.version_info[0]}.{sys.version_info[1]}")'`
14
+ TF_VERSION=`python3 -c "import tensorflow as tf; print(tf.__version__.replace('.', '_'))"`
15
+ PATCH_PATH=/usr/local/lib/python${PT_VERSION}/dist-packages/habana_frameworks/tensorflow/tf${TF_VERSION}/lib/habanalabs
16
+ export PYTHONPATH=${PATCH_PATH}:${PYTHONPATH}
17
+
18
+ TRAIN_BATCH_SIZE=${TRAIN_BATCH_SIZE:-7}
19
+ EVAL_BATCH_SIZE=${EVAL_BATCH_SIZE:-125}
20
+ LEARNING_RATE=${LEARNING_RATE:-5e-5}
21
+ PRECISION=${PRECISION:-fp32}
22
+ WARMUP_STEPS=${WARMUP_STEPS:-0}
23
+ TRAIN_STEPS=${TRAIN_STEPS:-8103}
24
+ SAVE_CHECKPOINTS_STEPS=${SAVE_CHECKPOINTS_STEPS:-335}
25
+ NUM_ACCUMULATION_STEPS=${NUM_ACCUMULATION_STEPS:-4}
26
+ SAMPLES_BETWEEN_EVAL=${SAMPLES_BETWEEN_EVAL:-150080}
27
+ STOP_THRESHOLD=${STOP_THRESHOLD:-0.720}
28
+ SAMPLES_START_EVAL=${SAMPLES_START_EVAL:-3000000}
29
+ MAX_EVAL_STEPS=${MAX_EVAL_STEPS:-0}
30
+ IS_DIST_EVAL_ENABLED=${IS_DIST_EVAL_ENABLED:-false}
31
+ MAX_SEQ_LENGTH=${MAX_SEQ_LENGTH:-512}
32
+ MAX_PRED_PER_SEQ=${MAX_PRED_PER_SEQ:-76}
33
+ FAST_PERF_ONLY=${FAST_PERF_ONLY:-0}
34
+ PACKED_DATA=${PACKED_DATA:-False}
35
+ TESTDATE=${TESTDATE}
36
+ TESTTIME=${TESTTIME}
37
+ LAMB_BETA_1=${LAMB_BETA_1:-0.9}
38
+ LAMB_BETA_2=${LAMB_BETA_2:-0.999}
39
+ EPSILON=${EPSILON:-1e-6}
40
+ LAMB_WEIGHT_DECAY_RATE=${LAMB_WEIGHT_DECAY_RATE:-0.01}
41
+ LAMB_LEARNING_RATE_DECAY_POLY_POWER=${LAMB_LEARNING_RATE_DECAY_POLY_POWER:-1.0}
42
+ NUM_WORKERS_PER_HLS=${NUM_WORKERS_PER_HLS:-4}
43
+ DO_TRAIN=${DO_TRAIN:-True}
44
+ DO_EVAL=${DO_EVAL:-True}
45
+ EXPERIMENTAL_SLACK=${EXPERIMENTAL_SLACK:-True}
46
+ NUM_DIST_EVAL_WORKERS=${NUM_DIST_EVAL_WORKERS:-0}
47
+ OPTIMIZER=${OPTIMIZER:-'lamb'}
48
+
49
+ export TF_BF16_CONVERSION=${BASE_PATH}/../TensorFlow/common/bf16_config/bert.json
50
+ export USE_LIGHTWEIGHT_CHECKPOINT=${USE_LIGHTWEIGHT_CHECKPOINT:-True}
51
+ export LIGHTWEIGHT_CHECKPOINT_IMPL=${LIGHTWEIGHT_CHECKPOINT_IMPL:-"basic"}
52
+ export USE_ASYNC_CHECKPOINTING=${USE_ASYNC_CHECKPOINTING:-False}
53
+ export BERT_CONFIG_FILE=${BERT_CONFIG_FILE:-${BERT_CONFIG_DIR}/bert_config.json}
54
+
55
+ if [[ $SIGNALING_FROM_GRAPH -eq 1 ]]; then
56
+ export TF_DISABLE_SCOPED_ALLOCATOR=True
57
+ export HOROVOD_FUSION_THRESHOLD=0
58
+ export TF_USE_SIGNALING_FROM_ENCAP_OP=1
59
+ else
60
+ export TF_USE_SIGNALING_FROM_ENCAP_OP=0
61
+ fi
62
+
63
+ # Currently sharded LAMB works only when ScopedAllocator is disabled and loop unrolling is False
64
+ if [ $OPTIMIZER == "sharded_lamb" ]; then
65
+ export TF_DISABLE_SCOPED_ALLOCATOR=True
66
+ AUX_PARAMS="${AUX_PARAMS} --loop_unrolling_for_train_op=False"
67
+ fi
68
+
69
+ # Under the hood, AMP (Arithmetic Mixed Precision) training is applied via TF_BF16_CONVERSION
70
+ # default precision is fp32.
71
+ precision="--noamp"
72
+
73
+ USE_HOROVOD=${USE_HOROVOD:-"False"}
74
+ if [ $USE_HOROVOD == "True" ]; then
75
+ horovod="--horovod --allreduce_post_accumulation=True"
76
+ IS_DIST_EVAL_ENABLED="True"
77
+ else
78
+ horovod=""
79
+ fi
80
+
81
+ #PHASE 1 Config
82
+ export PHASE1_CKPT=${PHASE1_CKPT:-/root/datasets/bert_pretraining/MLPerf_BERT_checkpoint/model.ckpt-28252}
83
+ export INPUT_FILES_DIR=${INPUT_FILES_DIR:-/root/datasets/bert_pretraining/training}
84
+ export EVAL_FILES_DIR=${EVAL_FILES_DIR:-/root/datasets/bert_pretraining/evaluation}
85
+
86
+ #Generate Host Folder
87
+ if [ $USE_DRAM_OUTPUT == "True" ]; then
88
+ host=$(hostname)
89
+ if [ "$OMPI_COMM_WORLD_LOCAL_RANK" == "0" ]; then
90
+ mkdir -p /mnt/dramfs
91
+ mount -t tmpfs -o size=200g tmpfs /mnt/dramfs
92
+ fi
93
+ export OUTPUT_DIR=/mnt/dramfs/bert_gaudi${NUM_WORKERS_TOTAL}_${TESTDATE}_${TESTTIME}/${host}
94
+ mkdir -p $OUTPUT_DIR
95
+ fi
96
+
97
+ # clear cache
98
+ if [[ $OMPI_COMM_WORLD_LOCAL_RANK -eq 0 ]]; then
99
+ PROC_FS=${PROC_FS:-"/proc"}
100
+ sync && echo 3 > $PROC_FS/sys/vm/drop_caches
101
+ fi
102
+
103
+ if [ $PACKED_DATA == "False" ]; then
104
+ packing_arg=""
105
+ else
106
+ packing_arg="--enable_packed_data_mode --avg_seq_per_pack=2"
107
+ fi
108
+
109
+ AUX_PARAMS=$(echo ${AUX_PARAMS} | sed s/:/\ /g)
110
+
111
+ enable_device_warmup=True
112
+
113
+ TRAIN_COMMAND="python3 ${BASE_PATH}/../TensorFlow/nlp/bert/run_pretraining.py \
114
+ --input_files_dir=$INPUT_FILES_DIR \
115
+ --init_checkpoint=$PHASE1_CKPT \
116
+ --eval_files_dir=$EVAL_FILES_DIR\
117
+ --output_dir=$OUTPUT_DIR \
118
+ --bert_config_file=$BERT_CONFIG_FILE \
119
+ --do_train=$DO_TRAIN \
120
+ --do_eval=$DO_EVAL \
121
+ --experimental_slack=$EXPERIMENTAL_SLACK \
122
+ --is_dist_eval_enabled=$IS_DIST_EVAL_ENABLED \
123
+ --train_batch_size=$TRAIN_BATCH_SIZE \
124
+ --eval_batch_size=$EVAL_BATCH_SIZE \
125
+ --max_eval_steps=$MAX_EVAL_STEPS \
126
+ --max_seq_length=$MAX_SEQ_LENGTH \
127
+ --max_predictions_per_seq=$MAX_PRED_PER_SEQ \
128
+ --num_train_steps=$TRAIN_STEPS \
129
+ --num_accumulation_steps=$NUM_ACCUMULATION_STEPS \
130
+ --num_warmup_steps=$WARMUP_STEPS \
131
+ --save_checkpoints_steps=$SAVE_CHECKPOINTS_STEPS \
132
+ --learning_rate=$LEARNING_RATE \
133
+ $horovod \
134
+ $precision \
135
+ $packing_arg \
136
+ --enable_device_warmup=$enable_device_warmup \
137
+ --samples_between_eval=$SAMPLES_BETWEEN_EVAL \
138
+ --stop_threshold=$STOP_THRESHOLD \
139
+ --samples_start_eval=$SAMPLES_START_EVAL \
140
+ --beta_1=$LAMB_BETA_1 \
141
+ --beta_2=$LAMB_BETA_2 \
142
+ --epsilon=$EPSILON \
143
+ --weight_decay_rate=$LAMB_WEIGHT_DECAY_RATE \
144
+ --power=$LAMB_LEARNING_RATE_DECAY_POLY_POWER \
145
+ --enable_habana_backend \
146
+ --dllog_path=$LOG_DIR/bert_dllog.json \
147
+ --use_lightweight_checkpoint=$USE_LIGHTWEIGHT_CHECKPOINT \
148
+ --lightweight_checkpoint_impl=$LIGHTWEIGHT_CHECKPOINT_IMPL \
149
+ --use_async_checkpointing=$USE_ASYNC_CHECKPOINTING \
150
+ --num_dist_eval_workers=$NUM_DIST_EVAL_WORKERS \
151
+ --optimizer_type=$OPTIMIZER \
152
+ ${AUX_PARAMS}
153
+ "
154
+
155
+ LD_PRELOAD=${PRELOAD_PATH} ${TRAIN_COMMAND}
156
+
157
+ if [[ $OMPI_COMM_WORLD_LOCAL_RANK == "0" ]]; then
158
+ rm -rf $OUTPUT_DIR/*/model.ckpt-*
159
+ rm -rf $OUTPUT_DIR/*/checkpoint
160
+ if [[ $USE_DRAM_OUTPUT == "True" ]]; then
161
+ cp -r $LOG_DIR/result_* /root/scratch/bert/bert_gaudi${NUM_WORKERS_TOTAL}_${TESTDATE}_${TESTTIME}
162
+ rm -rf /mnt/dramfs/bert_gaudi${NUM_WORKERS_TOTAL}_${TESTDATE}_${TESTTIME}
163
+ fi
164
+ fi
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/chop_hdf5_files.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019-2022 NVIDIA CORPORATION. All rights reserved.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+
14
+ import glob
15
+ import h5py
16
+ import multiprocessing
17
+ import numpy as np
18
+ from os import path, makedirs
19
+ from tqdm import tqdm
20
+ import argparse
21
+ import logging
22
+
23
+ parser = argparse.ArgumentParser(
24
+ description="Training data sharding for BERT.")
25
+ parser.add_argument(
26
+ '--input_hdf5_dir',
27
+ type=str,
28
+ default='hdf5',
29
+ help='Input hdf5_file path')
30
+ parser.add_argument(
31
+ '--output_hdf5_dir',
32
+ type=str,
33
+ default='',
34
+ help='Output hdf5_file path')
35
+ parser.add_argument(
36
+ '--num_shards',
37
+ type=int,
38
+ default=2048,
39
+ help='Number of output shards (default 2048)')
40
+ parser.add_argument(
41
+ '--max_seq_length',
42
+ type=int,
43
+ default=512,
44
+ help='The maximum number of tokens within a sequence. (default 512)')
45
+ parser.add_argument(
46
+ '--max_predictions_per_seq',
47
+ type=int,
48
+ default=76,
49
+ help='The maximum number of predictions within a sequence. (default 76)')
50
+ args = parser.parse_args()
51
+
52
+ max_seq_length = args.max_seq_length
53
+ max_predictions_per_seq = args.max_predictions_per_seq
54
+ n_output_shards = args.num_shards
55
+ input_path = args.input_hdf5_dir
56
+ logging.basicConfig(level=logging.INFO)
57
+
58
+ hdf5_compression_method = None
59
+
60
+ input_files = sorted(glob.glob(input_path + '/part-00???-of-00500.hdf5', recursive=False))
61
+ logging.info('n_input_shards = {}'.format(len(input_files)))
62
+ logging.info('n_output_shards = {}'.format(n_output_shards))
63
+
64
+ output_shards_dir = path.join(args.output_hdf5_dir,'hdf5_{}_shards_uncompressed'.format(n_output_shards))
65
+ try:
66
+ makedirs(output_shards_dir)
67
+ except OSError as error:
68
+ logging.info('Output directory : {} already exists. Overwritting ...'.format(output_shards_dir))
69
+
70
+ ofile_prefix = path.join(output_shards_dir, 'part_')
71
+ ofile_suffix = '_of_{:05d}.hdf5'.format(n_output_shards)
72
+
73
+
74
+ # First pass over data to get sample count (read only the smallest array to get count)
75
+ n_samples = 0
76
+ for ifile in tqdm(input_files, total=len(input_files)):
77
+ h5_ifile = h5py.File(ifile, 'r')
78
+ n_samples += h5_ifile['next_sentence_labels'].shape[0]
79
+ h5_ifile.close()
80
+
81
+ # Find a "nominal" number of samples per shard (calculated to always go over by one shard size)
82
+ # Find excess samples in last shard and distribute removal of excess over first "N" shards (could be done over last, but it doesn't matter and math is easier this way)
83
+ # (since 0 <= excess < nominal_shard_size, the max imbalance will be 1 sample to minimize the straggler effect)
84
+ n_sample_per_ofile_nominal = (n_samples + n_output_shards - 1) // n_output_shards
85
+ n_excess = n_output_shards * n_sample_per_ofile_nominal - n_samples # Always a positive number
86
+ logging.info('Total number of samples: {}. Sample per shard {}/{}'.format(n_samples, n_sample_per_ofile_nominal-1, n_sample_per_ofile_nominal))
87
+
88
+ logging.info('creating {} output file handles. This could take a while.'.format(n_output_shards))
89
+ ofile_handles = [h5py.File('{}{:05d}{}'.format(ofile_prefix, shard, ofile_suffix), 'w') for shard in range(n_output_shards)]
90
+
91
+ ofile_idx = 0 # which output file
92
+ ofile_entry_idx = 0 # index into an individual data element of an output file
93
+ ifile_entry_idx = 0
94
+
95
+ n_samples_in_this_shard = n_sample_per_ofile_nominal - 1
96
+ o_input_ids = np.ndarray((n_samples_in_this_shard, max_seq_length))
97
+ o_input_masks = np.ndarray((n_samples_in_this_shard, max_seq_length))
98
+ o_segment_ids = np.ndarray((n_samples_in_this_shard, max_seq_length))
99
+ o_masked_lm_positions = np.ndarray((n_samples_in_this_shard, max_predictions_per_seq))
100
+ o_masked_lm_ids = np.ndarray((n_samples_in_this_shard, max_predictions_per_seq))
101
+ o_next_sentence_labels = np.ndarray((n_samples_in_this_shard))
102
+
103
+ for ifile in tqdm(input_files, total=len(input_files)):
104
+ h5_ifile = h5py.File(ifile, 'r')
105
+
106
+ ifile_entry_idx = 0
107
+ f_input_ids = h5_ifile['input_ids'][:]
108
+ f_input_masks = h5_ifile['input_mask'][:]
109
+ f_segment_ids = h5_ifile['segment_ids'][:]
110
+ f_masked_lm_positions = h5_ifile['masked_lm_positions'][:]
111
+ f_masked_lm_ids = h5_ifile['masked_lm_ids'][:]
112
+ f_next_sentence_labels = h5_ifile['next_sentence_labels'][:]
113
+
114
+ h5_ifile.close()
115
+
116
+ # This could be vectorized but keeping it simple due to lack of time
117
+ while ifile_entry_idx < f_input_ids.shape[0]:
118
+ if ofile_entry_idx == n_samples_in_this_shard:
119
+ ofile_handles[ofile_idx].create_dataset("input_ids", data=o_input_ids, dtype='i2', compression=hdf5_compression_method)
120
+ ofile_handles[ofile_idx].create_dataset("input_mask", data=o_input_masks, dtype='i1', compression=hdf5_compression_method)
121
+ ofile_handles[ofile_idx].create_dataset("segment_ids", data=o_segment_ids, dtype='i1', compression=hdf5_compression_method)
122
+ ofile_handles[ofile_idx].create_dataset("masked_lm_positions", data=o_masked_lm_positions, dtype='i2', compression=hdf5_compression_method)
123
+ ofile_handles[ofile_idx].create_dataset("masked_lm_ids", data=o_masked_lm_ids, dtype='i2', compression=hdf5_compression_method)
124
+ ofile_handles[ofile_idx].create_dataset("next_sentence_labels", data=o_next_sentence_labels, dtype='i1', compression=hdf5_compression_method)
125
+ ofile_handles[ofile_idx].flush()
126
+ ofile_handles[ofile_idx].close()
127
+
128
+ ofile_entry_idx = 0
129
+ ofile_idx += 1
130
+
131
+ n_samples_in_this_shard = n_sample_per_ofile_nominal
132
+ if ofile_entry_idx < n_excess:
133
+ n_samples_in_this_shard -= 1
134
+
135
+ o_input_ids = np.ndarray((n_samples_in_this_shard, max_seq_length))
136
+ o_input_masks = np.ndarray((n_samples_in_this_shard, max_seq_length))
137
+ o_segment_ids = np.ndarray((n_samples_in_this_shard, max_seq_length))
138
+ o_masked_lm_positions = np.ndarray((n_samples_in_this_shard, max_predictions_per_seq))
139
+ o_masked_lm_ids = np.ndarray((n_samples_in_this_shard, max_predictions_per_seq))
140
+ o_next_sentence_labels = np.ndarray((n_samples_in_this_shard))
141
+
142
+ o_input_ids[ofile_entry_idx] = f_input_ids[ifile_entry_idx]
143
+ o_input_masks[ofile_entry_idx] = f_input_masks[ifile_entry_idx]
144
+ o_segment_ids[ofile_entry_idx] = f_segment_ids[ifile_entry_idx]
145
+ o_masked_lm_positions[ofile_entry_idx] = f_masked_lm_positions[ifile_entry_idx]
146
+ o_masked_lm_ids[ofile_entry_idx] = f_masked_lm_ids[ifile_entry_idx]
147
+ o_next_sentence_labels[ofile_entry_idx] = f_next_sentence_labels[ifile_entry_idx]
148
+ ofile_entry_idx += 1
149
+
150
+ ifile_entry_idx += 1
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/create_pretraining_data.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2019-2022 NVIDIA CORPORATION. All rights reserved.
3
+ # Copyright 2020 MLBenchmark Group. All rights reserved.
4
+
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Create masked LM/next sentence masked_lm TF examples for BERT."""
18
+
19
+ from __future__ import absolute_import
20
+ from __future__ import division
21
+ from __future__ import print_function
22
+
23
+ import collections
24
+ import random
25
+ import tokenization
26
+ import tensorflow as tf
27
+
28
+ import h5py
29
+ import numpy as np
30
+
31
+ hdf5_compression_method = None
32
+
33
+ #flags = tf.flags
34
+ flags = tf.compat.v1.flags
35
+
36
+ FLAGS = flags.FLAGS
37
+
38
+ flags.DEFINE_string("input_file", None,
39
+ "Input raw text file (or comma-separated list of files).")
40
+
41
+ flags.DEFINE_string(
42
+ "output_file", None,
43
+ "Output TF example file (or comma-separated list of files).")
44
+
45
+ flags.DEFINE_string("vocab_file", None,
46
+ "The vocabulary file that the BERT model was trained on.")
47
+
48
+ flags.DEFINE_bool(
49
+ "do_lower_case", True,
50
+ "Whether to lower case the input text. Should be True for uncased "
51
+ "models and False for cased models.")
52
+
53
+ flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.")
54
+
55
+ flags.DEFINE_integer("max_predictions_per_seq", 20,
56
+ "Maximum number of masked LM predictions per sequence.")
57
+
58
+ flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
59
+
60
+ flags.DEFINE_integer(
61
+ "dupe_factor", 10,
62
+ "Number of times to duplicate the input data (with different masks).")
63
+
64
+ flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
65
+
66
+ flags.DEFINE_float(
67
+ "short_seq_prob", 0.1,
68
+ "Probability of creating sequences which are shorter than the "
69
+ "maximum length.")
70
+
71
+
72
+ class TrainingInstance(object):
73
+ """A single training instance (sentence pair)."""
74
+
75
+ def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
76
+ is_random_next):
77
+ self.tokens = tokens
78
+ self.segment_ids = segment_ids
79
+ self.is_random_next = is_random_next
80
+ self.masked_lm_positions = masked_lm_positions
81
+ self.masked_lm_labels = masked_lm_labels
82
+
83
+ def __str__(self):
84
+ s = ""
85
+ s += "tokens: %s\n" % (" ".join(
86
+ [tokenization.printable_text(x) for x in self.tokens]))
87
+ s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
88
+ s += "is_random_next: %s\n" % self.is_random_next
89
+ s += "masked_lm_positions: %s\n" % (" ".join(
90
+ [str(x) for x in self.masked_lm_positions]))
91
+ s += "masked_lm_labels: %s\n" % (" ".join(
92
+ [tokenization.printable_text(x) for x in self.masked_lm_labels]))
93
+ s += "\n"
94
+ return s
95
+
96
+ def __repr__(self):
97
+ return self.__str__()
98
+
99
+
100
+ def write_instance_to_example_files(instances, tokenizer, max_seq_length,
101
+ max_predictions_per_seq, output_files):
102
+ """Create TF example files from `TrainingInstance`s."""
103
+ writers = []
104
+ h5_writers = []
105
+
106
+ expected_instances_per_file = len(instances) // len(output_files) + 500 # Over-allocation to avoid resizing
107
+ for output_file in output_files:
108
+ h5_writers.append({
109
+ 'handle' : h5py.File(output_file + ".hdf5", 'w'),
110
+ 'input_ids' : np.zeros([expected_instances_per_file, max_seq_length], dtype="int32"),
111
+ 'input_mask' : np.zeros([expected_instances_per_file, max_seq_length], dtype="int32"),
112
+ 'segment_ids' : np.zeros([expected_instances_per_file, max_seq_length], dtype="int32"),
113
+ 'masked_lm_positions' : np.zeros([expected_instances_per_file, max_predictions_per_seq], dtype="int32"),
114
+ 'masked_lm_ids' : np.zeros([expected_instances_per_file, max_predictions_per_seq], dtype="int32"),
115
+ 'next_sentence_labels' : np.zeros(expected_instances_per_file, dtype="int32"),
116
+ 'len' : 0 })
117
+
118
+ writer_index = 0
119
+
120
+ total_written = 0
121
+
122
+ features_h5 = collections.OrderedDict()
123
+
124
+ for (inst_index, instance) in enumerate(instances):
125
+ input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
126
+ input_mask = [1] * len(input_ids)
127
+ segment_ids = list(instance.segment_ids)
128
+ assert len(input_ids) <= max_seq_length
129
+
130
+ while len(input_ids) < max_seq_length:
131
+ input_ids.append(0)
132
+ input_mask.append(0)
133
+ segment_ids.append(0)
134
+
135
+ assert len(input_ids) == max_seq_length
136
+ assert len(input_mask) == max_seq_length
137
+ assert len(segment_ids) == max_seq_length
138
+
139
+ masked_lm_positions = list(instance.masked_lm_positions)
140
+ masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
141
+ masked_lm_weights = [1.0] * len(masked_lm_ids)
142
+
143
+ while len(masked_lm_positions) < max_predictions_per_seq:
144
+ masked_lm_positions.append(0)
145
+ masked_lm_ids.append(0)
146
+ masked_lm_weights.append(0.0)
147
+
148
+ next_sentence_label = 1 if instance.is_random_next else 0
149
+
150
+ h5_writers[writer_index]['input_ids'][inst_index] = input_ids
151
+ h5_writers[writer_index]['input_mask'][inst_index] = input_mask
152
+ h5_writers[writer_index]['segment_ids'][inst_index] = segment_ids
153
+ h5_writers[writer_index]['masked_lm_positions'][inst_index] = masked_lm_positions
154
+ h5_writers[writer_index]['masked_lm_ids'][inst_index] = masked_lm_ids
155
+ h5_writers[writer_index]['next_sentence_labels'][inst_index] = next_sentence_label
156
+ h5_writers[writer_index]['len'] += 1
157
+
158
+ writer_index = (writer_index + 1) % len(h5_writers)
159
+
160
+ total_written += 1
161
+
162
+ if inst_index < 20:
163
+ tf.compat.v1.logging.info("*** Example ***")
164
+ tf.compat.v1.logging.info("tokens: %s" % " ".join(
165
+ [tokenization.printable_text(x) for x in instance.tokens]))
166
+
167
+ print("saving data")
168
+ for h5_writer in h5_writers:
169
+ my_size = h5_writer['len']
170
+ h5_writer['handle'].create_dataset('input_ids', data=h5_writer['input_ids'][:my_size], dtype='i2', compression=hdf5_compression_method)
171
+ h5_writer['handle'].create_dataset('input_mask', data=h5_writer['input_mask'][:my_size], dtype='i1', compression=hdf5_compression_method)
172
+ h5_writer['handle'].create_dataset('segment_ids', data=h5_writer['segment_ids'][:my_size], dtype='i1', compression=hdf5_compression_method)
173
+ h5_writer['handle'].create_dataset('masked_lm_positions', data=h5_writer['masked_lm_positions'][:my_size], dtype='i2', compression=hdf5_compression_method)
174
+ h5_writer['handle'].create_dataset('masked_lm_ids', data=h5_writer['masked_lm_ids'][:my_size], dtype='i2', compression=hdf5_compression_method)
175
+ h5_writer['handle'].create_dataset('next_sentence_labels', data=h5_writer['next_sentence_labels'][:my_size], dtype='i1', compression=hdf5_compression_method)
176
+ h5_writer['handle'].flush()
177
+ h5_writer['handle'].close()
178
+
179
+ tf.compat.v1.logging.info("Wrote %d total instances", total_written)
180
+
181
+
182
+ def create_int_feature(values):
183
+ feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
184
+ return feature
185
+
186
+ def create_float_feature(values):
187
+ feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
188
+ return feature
189
+
190
+ def create_training_instances(input_files, tokenizer, max_seq_length,
191
+ dupe_factor, short_seq_prob, masked_lm_prob,
192
+ max_predictions_per_seq, rng):
193
+ """Create `TrainingInstance`s from raw text."""
194
+ all_documents = [[]]
195
+
196
+ # Input file format:
197
+ # (1) One sentence per line. These should ideally be actual sentences, not
198
+ # entire paragraphs or arbitrary spans of text. (Because we use the
199
+ # sentence boundaries for the "next sentence prediction" task).
200
+ # (2) Blank lines between documents. Document boundaries are needed so
201
+ # that the "next sentence prediction" task doesn't span between documents.
202
+ for input_file in input_files:
203
+ with tf.compat.v1.gfile.GFile(input_file, "r") as reader:
204
+ while True:
205
+ line = tokenization.convert_to_unicode(reader.readline())
206
+ if not line:
207
+ break
208
+ line = line.strip()
209
+
210
+ # Empty lines are used as document delimiters
211
+ if not line:
212
+ all_documents.append([])
213
+ tokens = tokenizer.tokenize(line)
214
+ if tokens:
215
+ all_documents[-1].append(tokens)
216
+
217
+ # Remove empty documents
218
+ all_documents = [x for x in all_documents if x]
219
+ rng.shuffle(all_documents)
220
+
221
+ vocab_words = list(tokenizer.vocab.keys())
222
+ instances = []
223
+ for _ in range(dupe_factor):
224
+ for document_index in range(len(all_documents)):
225
+ instances.extend(
226
+ create_instances_from_document(
227
+ all_documents, document_index, max_seq_length, short_seq_prob,
228
+ masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
229
+
230
+ rng.shuffle(instances)
231
+ return instances
232
+
233
+
234
+ def create_instances_from_document(
235
+ all_documents, document_index, max_seq_length, short_seq_prob,
236
+ masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
237
+ """Creates `TrainingInstance`s for a single document."""
238
+ document = all_documents[document_index]
239
+
240
+ # Account for [CLS], [SEP], [SEP]
241
+ max_num_tokens = max_seq_length - 3
242
+
243
+ # We *usually* want to fill up the entire sequence since we are padding
244
+ # to `max_seq_length` anyways, so short sequences are generally wasted
245
+ # computation. However, we *sometimes*
246
+ # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
247
+ # sequences to minimize the mismatch between pre-training and fine-tuning.
248
+ # The `target_seq_length` is just a rough target however, whereas
249
+ # `max_seq_length` is a hard limit.
250
+ target_seq_length = max_num_tokens
251
+ if rng.random() < short_seq_prob:
252
+ target_seq_length = rng.randint(2, max_num_tokens)
253
+
254
+ # We DON'T just concatenate all of the tokens from a document into a long
255
+ # sequence and choose an arbitrary split point because this would make the
256
+ # next sentence prediction task too easy. Instead, we split the input into
257
+ # segments "A" and "B" based on the actual "sentences" provided by the user
258
+ # input.
259
+ instances = []
260
+ current_chunk = []
261
+ current_length = 0
262
+ i = 0
263
+ while i < len(document):
264
+ segment = document[i]
265
+ current_chunk.append(segment)
266
+ current_length += len(segment)
267
+ if i == len(document) - 1 or current_length >= target_seq_length:
268
+ if current_chunk:
269
+ # `a_end` is how many segments from `current_chunk` go into the `A`
270
+ # (first) sentence.
271
+ a_end = 1
272
+ if len(current_chunk) >= 2:
273
+ a_end = rng.randint(1, len(current_chunk) - 1)
274
+
275
+ tokens_a = []
276
+ for j in range(a_end):
277
+ tokens_a.extend(current_chunk[j])
278
+
279
+ tokens_b = []
280
+ # Random next
281
+ is_random_next = False
282
+ if len(current_chunk) == 1 or rng.random() < 0.5:
283
+ is_random_next = True
284
+ target_b_length = target_seq_length - len(tokens_a)
285
+
286
+ # This should rarely go for more than one iteration for large
287
+ # corpora. However, just to be careful, we try to make sure that
288
+ # the random document is not the same as the document
289
+ # we're processing.
290
+ for _ in range(10):
291
+ random_document_index = rng.randint(0, len(all_documents) - 1)
292
+ if random_document_index != document_index:
293
+ break
294
+
295
+ random_document = all_documents[random_document_index]
296
+ random_start = rng.randint(0, len(random_document) - 1)
297
+ for j in range(random_start, len(random_document)):
298
+ tokens_b.extend(random_document[j])
299
+ if len(tokens_b) >= target_b_length:
300
+ break
301
+ # We didn't actually use these segments so we "put them back" so
302
+ # they don't go to waste.
303
+ num_unused_segments = len(current_chunk) - a_end
304
+ i -= num_unused_segments
305
+ # Actual next
306
+ else:
307
+ is_random_next = False
308
+ for j in range(a_end, len(current_chunk)):
309
+ tokens_b.extend(current_chunk[j])
310
+ truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
311
+
312
+ assert len(tokens_a) >= 1
313
+ assert len(tokens_b) >= 1
314
+
315
+ tokens = []
316
+ segment_ids = []
317
+ tokens.append("[CLS]")
318
+ segment_ids.append(0)
319
+ for token in tokens_a:
320
+ tokens.append(token)
321
+ segment_ids.append(0)
322
+
323
+ tokens.append("[SEP]")
324
+ segment_ids.append(0)
325
+
326
+ for token in tokens_b:
327
+ tokens.append(token)
328
+ segment_ids.append(1)
329
+ tokens.append("[SEP]")
330
+ segment_ids.append(1)
331
+
332
+ (tokens, masked_lm_positions,
333
+ masked_lm_labels) = create_masked_lm_predictions(
334
+ tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
335
+ instance = TrainingInstance(
336
+ tokens=tokens,
337
+ segment_ids=segment_ids,
338
+ is_random_next=is_random_next,
339
+ masked_lm_positions=masked_lm_positions,
340
+ masked_lm_labels=masked_lm_labels)
341
+ instances.append(instance)
342
+ current_chunk = []
343
+ current_length = 0
344
+ i += 1
345
+
346
+ return instances
347
+
348
+ MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
349
+ ["index", "label"])
350
+
351
+ def create_masked_lm_predictions(tokens, masked_lm_prob,
352
+ max_predictions_per_seq, vocab_words, rng):
353
+ """Creates the predictions for the masked LM objective."""
354
+
355
+ cand_indexes = []
356
+ for (i, token) in enumerate(tokens):
357
+ if token == "[CLS]" or token == "[SEP]":
358
+ continue
359
+ cand_indexes.append(i)
360
+
361
+ rng.shuffle(cand_indexes)
362
+
363
+ output_tokens = list(tokens)
364
+
365
+ num_to_predict = min(max_predictions_per_seq,
366
+ max(1, int(round(len(tokens) * masked_lm_prob))))
367
+
368
+ masked_lms = []
369
+ covered_indexes = set()
370
+ for index in cand_indexes:
371
+ if len(masked_lms) >= num_to_predict:
372
+ break
373
+ if index in covered_indexes:
374
+ continue
375
+ covered_indexes.add(index)
376
+
377
+ masked_token = None
378
+ # 80% of the time, replace with [MASK]
379
+ if rng.random() < 0.8:
380
+ masked_token = "[MASK]"
381
+ else:
382
+ # 10% of the time, keep original
383
+ if rng.random() < 0.5:
384
+ masked_token = tokens[index]
385
+ # 10% of the time, replace with random word
386
+ else:
387
+ masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
388
+
389
+ output_tokens[index] = masked_token
390
+
391
+ masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
392
+
393
+ masked_lms = sorted(masked_lms, key=lambda x: x.index)
394
+
395
+ masked_lm_positions = []
396
+ masked_lm_labels = []
397
+ for p in masked_lms:
398
+ masked_lm_positions.append(p.index)
399
+ masked_lm_labels.append(p.label)
400
+
401
+ return (output_tokens, masked_lm_positions, masked_lm_labels)
402
+
403
+
404
+ def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
405
+ """Truncates a pair of sequences to a maximum sequence length."""
406
+ while True:
407
+ total_length = len(tokens_a) + len(tokens_b)
408
+ if total_length <= max_num_tokens:
409
+ break
410
+
411
+ trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
412
+ assert len(trunc_tokens) >= 1
413
+
414
+ # We want to sometimes truncate from the front and sometimes from the
415
+ # back to add more randomness and avoid biases.
416
+ if rng.random() < 0.5:
417
+ del trunc_tokens[0]
418
+ else:
419
+ trunc_tokens.pop()
420
+
421
+
422
+ def main(_):
423
+ tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
424
+
425
+ tokenizer = tokenization.FullTokenizer(
426
+ vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
427
+
428
+ input_files = []
429
+ for input_pattern in FLAGS.input_file.split(","):
430
+ input_files.extend(tf.compat.v1.gfile.Glob(input_pattern))
431
+
432
+ tf.compat.v1.logging.info("*** Reading from input files ***")
433
+ for input_file in input_files:
434
+ tf.compat.v1.logging.info(" %s", input_file)
435
+
436
+ rng = random.Random(FLAGS.random_seed)
437
+ instances = create_training_instances(
438
+ input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
439
+ FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
440
+ rng)
441
+
442
+ output_files = FLAGS.output_file.split(",")
443
+ tf.compat.v1.logging.info("*** Writing to output files ***")
444
+ for output_file in output_files:
445
+ tf.compat.v1.logging.info(" %s", output_file)
446
+
447
+ write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,
448
+ FLAGS.max_predictions_per_seq, output_files)
449
+
450
+
451
+ if __name__ == "__main__":
452
+ flags.mark_flag_as_required("input_file")
453
+ flags.mark_flag_as_required("output_file")
454
+ flags.mark_flag_as_required("vocab_file")
455
+ tf.compat.v1.app.run()
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/create_pretraining_data_wrapper.sh ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright (c) 2019-2022 NVIDIA CORPORATION. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
16
+
17
+ INPUT=${1}
18
+ OUTPUT=${2}/$(basename $INPUT)
19
+ VOCAB=${3}
20
+
21
+ python3 ${SCRIPT_DIR}/create_pretraining_data.py \
22
+ --input_file=${INPUT} \
23
+ --output_file=${OUTPUT} \
24
+ --vocab_file=${VOCAB} \
25
+ --do_lower_case=True \
26
+ --max_seq_length=512 \
27
+ --max_predictions_per_seq=76 \
28
+ --masked_lm_prob=0.15 \
29
+ --random_seed=12345 \
30
+ --dupe_factor=10
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/pick_eval_samples.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Script for picking certain number of samples.
2
+ """
3
+
4
+ import argparse
5
+ import time
6
+ import logging
7
+ import collections
8
+ import h5py
9
+ import numpy as np
10
+
11
+ parser = argparse.ArgumentParser(
12
+ description="Eval sample picker for BERT.")
13
+ parser.add_argument(
14
+ '--input_hdf5_file',
15
+ type=str,
16
+ default='',
17
+ help='Input hdf5_file path')
18
+ parser.add_argument(
19
+ '--output_hdf5_file',
20
+ type=str,
21
+ default='',
22
+ help='Output hdf5_file path')
23
+ parser.add_argument(
24
+ '--num_examples_to_pick',
25
+ type=int,
26
+ default=10000,
27
+ help='Number of examples to pick')
28
+ parser.add_argument(
29
+ '--max_seq_length',
30
+ type=int,
31
+ default=512,
32
+ help='The maximum number of tokens within a sequence.')
33
+ parser.add_argument(
34
+ '--max_predictions_per_seq',
35
+ type=int,
36
+ default=76,
37
+ help='The maximum number of predictions within a sequence.')
38
+ args = parser.parse_args()
39
+
40
+ max_seq_length = args.max_seq_length
41
+ max_predictions_per_seq = args.max_predictions_per_seq
42
+ logging.basicConfig(level=logging.INFO)
43
+
44
+ if __name__ == '__main__':
45
+ tic = time.time()
46
+ h5_ifile = h5py.File(args.input_hdf5_file, 'r')
47
+ num_examples = h5_ifile.get('next_sentence_labels').shape[0]
48
+
49
+ input_ids = np.zeros([args.num_examples_to_pick, max_seq_length], dtype="int16")
50
+ input_mask = np.zeros([args.num_examples_to_pick, max_seq_length], dtype="int8")
51
+ segment_ids = np.zeros([args.num_examples_to_pick, max_seq_length], dtype="int8")
52
+ masked_lm_positions = np.zeros([args.num_examples_to_pick, max_predictions_per_seq], dtype="int16")
53
+ masked_lm_ids = np.zeros([args.num_examples_to_pick, max_predictions_per_seq], dtype="int16")
54
+ next_sentence_labels = np.zeros(args.num_examples_to_pick, dtype="int8")
55
+
56
+ # hdf5_compression_method = "gzip"
57
+ hdf5_compression_method = None
58
+ i = 0
59
+ pick_ratio = num_examples / args.num_examples_to_pick
60
+ num_examples_picked = 0
61
+ for i in range(args.num_examples_to_pick):
62
+ idx = int(i * pick_ratio)
63
+ input_ids[i,:] = h5_ifile['input_ids'][idx,:]
64
+ input_mask[i,:] = h5_ifile['input_mask'][idx,:]
65
+ segment_ids[i,:] = h5_ifile['segment_ids'][idx,:]
66
+ masked_lm_positions[i,:] = h5_ifile['masked_lm_positions'][idx,:]
67
+ masked_lm_ids[i,:] = h5_ifile['masked_lm_ids'][idx,:]
68
+ next_sentence_labels[i] = h5_ifile['next_sentence_labels'][idx]
69
+ num_examples_picked += 1
70
+
71
+ h5_writer = h5py.File(args.output_hdf5_file+".hdf5", 'w')
72
+ h5_writer.create_dataset('input_ids', data=input_ids, dtype='i2', compression=hdf5_compression_method)
73
+ h5_writer.create_dataset('input_mask', data=input_mask, dtype='i1', compression=hdf5_compression_method)
74
+ h5_writer.create_dataset('segment_ids', data=segment_ids, dtype='i1', compression=hdf5_compression_method)
75
+ h5_writer.create_dataset('masked_lm_positions', data=masked_lm_positions, dtype='i2', compression=hdf5_compression_method)
76
+ h5_writer.create_dataset('masked_lm_ids', data=masked_lm_ids, dtype='i2', compression=hdf5_compression_method)
77
+ h5_writer.create_dataset('next_sentence_labels', data=next_sentence_labels, dtype='i1', compression=hdf5_compression_method)
78
+ h5_writer.flush()
79
+ h5_writer.close()
80
+
81
+ toc = time.time()
82
+ logging.info("Picked %d examples out of %d samples in %.2f sec",
83
+ args.num_examples_to_pick, num_examples, toc - tic)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/pick_eval_samples_varlength.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Script for picking certain number of samples.
2
+ """
3
+
4
+ import argparse
5
+ import time
6
+ import logging
7
+ import collections
8
+ import h5py
9
+ import numpy as np
10
+
11
+ parser = argparse.ArgumentParser(
12
+ description="Eval sample picker for BERT.")
13
+ parser.add_argument(
14
+ '--input_hdf5_file',
15
+ type=str,
16
+ default='',
17
+ help='Input hdf5_file path')
18
+ parser.add_argument(
19
+ '--output_hdf5_file',
20
+ type=str,
21
+ default='',
22
+ help='Output hdf5_file path')
23
+ parser.add_argument(
24
+ '--num_examples_to_pick',
25
+ type=int,
26
+ default=10000,
27
+ help='Number of examples to pick')
28
+ parser.add_argument(
29
+ '--max_seq_length',
30
+ type=int,
31
+ default=512,
32
+ help='The maximum number of tokens within a sequence.')
33
+ parser.add_argument(
34
+ '--max_predictions_per_seq',
35
+ type=int,
36
+ default=76,
37
+ help='The maximum number of predictions within a sequence.')
38
+ args = parser.parse_args()
39
+
40
+ max_seq_length = args.max_seq_length
41
+ max_predictions_per_seq = args.max_predictions_per_seq
42
+ logging.basicConfig(level=logging.INFO)
43
+
44
+ if __name__ == '__main__':
45
+ tic = time.time()
46
+ h5_ifile = h5py.File(args.input_hdf5_file, 'r')
47
+ num_examples = h5_ifile.get('next_sentence_labels').shape[0]
48
+
49
+ # hdf5_compression_method = "gzip"
50
+ hdf5_compression_method = None
51
+
52
+ h5_writer = h5py.File(args.output_hdf5_file+".hdf5", 'w')
53
+ input_ids = h5_writer.create_dataset('input_ids', (args.num_examples_to_pick,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method)
54
+ segment_ids = h5_writer.create_dataset('segment_ids', (args.num_examples_to_pick,), dtype=h5py.vlen_dtype(np.dtype('int8')), compression=hdf5_compression_method)
55
+ masked_lm_positions = h5_writer.create_dataset('masked_lm_positions', (args.num_examples_to_pick,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method)
56
+ masked_lm_ids = h5_writer.create_dataset('masked_lm_ids', (args.num_examples_to_pick,), dtype=h5py.vlen_dtype(np.dtype('int16')), compression=hdf5_compression_method)
57
+ next_sentence_labels = h5_writer.create_dataset('next_sentence_labels', data=np.zeros(args.num_examples_to_pick, dtype="int8"), dtype='i1', compression=hdf5_compression_method)
58
+
59
+ i = 0
60
+ pick_ratio = num_examples / args.num_examples_to_pick
61
+ num_examples_picked = 0
62
+ for i in range(args.num_examples_to_pick):
63
+ idx = int(i * pick_ratio)
64
+ input_ids[i] = h5_ifile['input_ids'][idx, :sum(h5_ifile['input_mask'][idx])]
65
+ segment_ids[i] = h5_ifile['segment_ids'][idx, :sum(h5_ifile['input_mask'][idx])]
66
+ masked_lm_positions[i] = h5_ifile['masked_lm_positions'][idx, :sum(h5_ifile['masked_lm_positions'][idx]!=0)]
67
+ masked_lm_ids[i] = h5_ifile['masked_lm_ids'][idx, :sum(h5_ifile['masked_lm_positions'][idx]!=0)]
68
+ next_sentence_labels[i] = h5_ifile['next_sentence_labels'][idx]
69
+ num_examples_picked += 1
70
+
71
+ h5_writer.flush()
72
+ h5_writer.close()
73
+
74
+ toc = time.time()
75
+ logging.info("Picked %d examples out of %d samples in %.2f sec",
76
+ args.num_examples_to_pick, num_examples, toc - tic)
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/prepare_data.sh ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright (c) 2019-2022 NVIDIA CORPORATION. All rights reserved.
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ ###############################################################################
16
+ # Copyright (c) 2022, Habana Labs Ltd. All rights reserved.
17
+ ###############################################################################
18
+
19
+ function usage()
20
+ {
21
+ cat << HEREDOC
22
+
23
+ Usage: $progname [-o|--outputdir PATH] [-h|--help TIME_STR]
24
+
25
+ optional arguments:
26
+ -h, --help show this help message and exit
27
+ -o, --outputdir PATH pass in a localization of resulting dataset
28
+ -s, --skip-download skip downloading raw files from GDrive (assuming it already has been done)
29
+ -p, --shards number of resulting shards. For small scales (less than 256 nodes) use 2048. For sacles >256 4320 is recommended (default 4320)
30
+
31
+ HEREDOC
32
+ }
33
+
34
+ SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
35
+
36
+ #if no arguments passed
37
+ DATADIR=/workspace/bert_data
38
+ SKIP=0
39
+ SHARDS=4320
40
+
41
+ #parse passed arguments
42
+ while [[ $# -gt 0 ]]; do
43
+ key="$1"
44
+
45
+ case $key in
46
+ -h|--help)
47
+ usage
48
+ exit 0
49
+ ;;
50
+ -o|--outputdir)
51
+ DATADIR="$2"
52
+ shift # past argument
53
+ shift # past value
54
+ ;;
55
+ -p|--shards)
56
+ SHARDS="$2"
57
+ shift # past argument
58
+ shift # past value
59
+ ;;
60
+ -s|--skip-download)
61
+ SKIP=1
62
+ shift
63
+ ;;
64
+ *) # unknown option
65
+ usage
66
+ exit 1
67
+ ;;
68
+ esac
69
+ done
70
+
71
+
72
+ echo "Preparing Mlperf BERT dataset in ${DATADIR}"
73
+ mkdir -p ${DATADIR}
74
+
75
+ if (( SKIP==0 )) ; then
76
+
77
+ mkdir -p ${DATADIR}/phase1 && cd ${DATADIR}/phase1
78
+ ### Download
79
+ # bert_config.json
80
+ gdown https://drive.google.com/uc?id=1fbGClQMi2CoMv7fwrwTC5YYPooQBdcFW
81
+ # vocab.txt
82
+ gdown https://drive.google.com/uc?id=1USK108J6hMM_d27xCHi738qBL8_BT1u1
83
+
84
+ ### Download dataset
85
+ mkdir -p ${DATADIR}/download && cd ${DATADIR}/download
86
+ # md5 sums
87
+ gdown https://drive.google.com/uc?id=1tmMgLwoBvbEJEHXh77sqrXYw5RpqT8R_
88
+ # processed chunks
89
+ gdown https://drive.google.com/uc?id=14xV2OUGSQDG_yDBrmbSdcDC-QGeqpfs_
90
+ # unpack results and verify md5sums
91
+ tar -xzf results_text.tar.gz && (cd results4 && md5sum --check ../bert_reference_results_text_md5.txt)
92
+
93
+
94
+ ### Download TF1 checkpoint
95
+ mkdir -p ${DATADIR}/phase1 && cd ${DATADIR}/phase1
96
+ # model.ckpt-28252.data-00000-of-00001
97
+ gdown https://drive.google.com/uc?id=1chiTBljF0Eh1U5pKs6ureVHgSbtU8OG_
98
+ # model.ckpt-28252.index
99
+ gdown https://drive.google.com/uc?id=1Q47V3K3jFRkbJ2zGCrKkKk-n0fvMZsa0
100
+ # model.ckpt-28252.meta
101
+ gdown https://drive.google.com/uc?id=1vAcVmXSLsLeQ1q7gvHnQUSth5W_f_pwv
102
+
103
+ cd ${DATADIR}
104
+
105
+ fi
106
+ ### Create HDF5 files for training
107
+ mkdir -p ${DATADIR}/hdf5/training
108
+ bash ${SCRIPT_DIR}/parallel_create_hdf5.sh -i ${DATADIR}/download/results4 -o ${DATADIR}/hdf5/training -v ${DATADIR}/phase1/vocab.txt
109
+
110
+ ### Chop HDF5 files into chunks
111
+ ulimit -n 10000 # handles potential OSError Too many open files
112
+ python3 ${SCRIPT_DIR}/chop_hdf5_files.py \
113
+ --num_shards ${SHARDS} \
114
+ --input_hdf5_dir ${DATADIR}/hdf5/training \
115
+ --output_hdf5_dir ${DATADIR}/hdf5/training-${SHARDS}
116
+
117
+ ### Convert fixed length to variable length format
118
+ mkdir -p ${DATADIR}/hdf5/training-${SHARDS}/hdf5_${SHARDS}_shards_varlength
119
+ CPUS=$( ls -d /sys/devices/system/cpu/cpu[[:digit:]]* | wc -w )
120
+ CPUS=$((CPUS / 2))
121
+ ls -1 ${DATADIR}/hdf5/training-${SHARDS}/hdf5_${SHARDS}_shards_uncompressed | \
122
+ xargs --max-args=1 --max-procs=${CPUS} -I{} python3 ${SCRIPT_DIR}/convert_fixed2variable.py \
123
+ --input_hdf5_file ${DATADIR}/hdf5/training-${SHARDS}/hdf5_${SHARDS}_shards_uncompressed/{} \
124
+ --output_hdf5_file ${DATADIR}/hdf5/training-${SHARDS}/hdf5_${SHARDS}_shards_varlength/{}
125
+
126
+ #### Create full HDF5 files for evaluation
127
+ mkdir -p ${DATADIR}/hdf5/eval
128
+ python3 ${SCRIPT_DIR}/create_pretraining_data.py \
129
+ --input_file=${DATADIR}/download/results4/eval.txt \
130
+ --output_file=${DATADIR}/hdf5/eval/eval_all \
131
+ --vocab_file=${DATADIR}/phase1/vocab.txt \
132
+ --do_lower_case=True \
133
+ --max_seq_length=512 \
134
+ --max_predictions_per_seq=76 \
135
+ --masked_lm_prob=0.15 \
136
+ --random_seed=12345 \
137
+ --dupe_factor=10
138
+
139
+ #### pick 10k samples for evaluation
140
+ python3 ${SCRIPT_DIR}/pick_eval_samples.py \
141
+ --input_hdf5_file=${DATADIR}/hdf5/eval/eval_all.hdf5 \
142
+ --output_hdf5_file=${DATADIR}/hdf5/eval/part_eval_10k \
143
+ --num_examples_to_pick=10000
144
+
145
+ #### Convert fixed length to variable length format
146
+ mkdir -p ${DATADIR}/hdf5/eval_varlength
147
+ python3 ${SCRIPT_DIR}/convert_fixed2variable.py --input_hdf5_file ${DATADIR}/hdf5/eval/part_eval_10k.hdf5 \
148
+ --output_hdf5_file ${DATADIR}/hdf5/eval_varlength/part_eval_10k.hdf5
149
+
150
+ #### Convert Tensorflow checkpoint to Pytorch one
151
+ python3 ${SCRIPT_DIR}/../convert_tf_checkpoint.py \
152
+ --tf_checkpoint ${DATADIR}/phase1/model.ckpt-28252 \
153
+ --bert_config_path ${DATADIR}/phase1/bert_config.json \
154
+ --output_checkpoint ${DATADIR}/phase1/model.ckpt-28252.pt
docker/bloom13b/Model-References/MLPERF3.1/Training/benchmarks/bert/implementations/PyTorch/input_preprocessing/tokenization.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tokenization classes."""
2
+
3
+ from __future__ import absolute_import
4
+ from __future__ import division
5
+ from __future__ import print_function
6
+
7
+ import collections
8
+ import re
9
+ import unicodedata
10
+
11
+ from absl import flags
12
+ import six
13
+ import tensorflow.compat.v1 as tf
14
+
15
+ FLAGS = flags.FLAGS
16
+
17
+ flags.DEFINE_bool(
18
+ "preserve_unused_tokens", False,
19
+ "If True, Wordpiece tokenization will not be applied to words in the vocab."
20
+ )
21
+
22
+ _UNUSED_TOKEN_RE = re.compile("^\\[unused\\d+\\]$")
23
+
24
+
25
+ def preserve_token(token, vocab):
26
+ """Returns True if the token should forgo tokenization and be preserved."""
27
+ if not FLAGS.preserve_unused_tokens:
28
+ return False
29
+ if token not in vocab:
30
+ return False
31
+ return bool(_UNUSED_TOKEN_RE.search(token))
32
+
33
+
34
+ def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
35
+ """Checks whether the casing config is consistent with the checkpoint name."""
36
+
37
+ # The casing has to be passed in by the user and there is no explicit check
38
+ # as to whether it matches the checkpoint. The casing information probably
39
+ # should have been stored in the bert_config.json file, but it's not, so
40
+ # we have to heuristically detect it to validate.
41
+
42
+ if not init_checkpoint:
43
+ return
44
+
45
+ m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
46
+ if m is None:
47
+ return
48
+
49
+ model_name = m.group(1)
50
+
51
+ lower_models = [
52
+ "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
53
+ "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
54
+ ]
55
+
56
+ cased_models = [
57
+ "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
58
+ "multi_cased_L-12_H-768_A-12"
59
+ ]
60
+
61
+ is_bad_config = False
62
+ if model_name in lower_models and not do_lower_case:
63
+ is_bad_config = True
64
+ actual_flag = "False"
65
+ case_name = "lowercased"
66
+ opposite_flag = "True"
67
+
68
+ if model_name in cased_models and do_lower_case:
69
+ is_bad_config = True
70
+ actual_flag = "True"
71
+ case_name = "cased"
72
+ opposite_flag = "False"
73
+
74
+ if is_bad_config:
75
+ raise ValueError(
76
+ "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
77
+ "However, `%s` seems to be a %s model, so you "
78
+ "should pass in `--do_lower_case=%s` so that the fine-tuning matches "
79
+ "how the model was pre-training. If this error is wrong, please "
80
+ "just comment out this check." % (actual_flag, init_checkpoint,
81
+ model_name, case_name, opposite_flag))
82
+
83
+
84
+ def convert_to_unicode(text):
85
+ """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
86
+ if six.PY3:
87
+ if isinstance(text, str):
88
+ return text
89
+ elif isinstance(text, bytes):
90
+ return text.decode("utf-8", "ignore")
91
+ else:
92
+ raise ValueError("Unsupported string type: %s" % (type(text)))
93
+ elif six.PY2:
94
+ if isinstance(text, str):
95
+ return text.decode("utf-8", "ignore")
96
+ elif isinstance(text, unicode):
97
+ return text
98
+ else:
99
+ raise ValueError("Unsupported string type: %s" % (type(text)))
100
+ else:
101
+ raise ValueError("Not running on Python2 or Python 3?")
102
+
103
+
104
+ def printable_text(text):
105
+ """Returns text encoded in a way suitable for print or `tf.logging`."""
106
+
107
+ # These functions want `str` for both Python2 and Python3, but in one case
108
+ # it's a Unicode string and in the other it's a byte string.
109
+ if six.PY3:
110
+ if isinstance(text, str):
111
+ return text
112
+ elif isinstance(text, bytes):
113
+ return text.decode("utf-8", "ignore")
114
+ else:
115
+ raise ValueError("Unsupported string type: %s" % (type(text)))
116
+ elif six.PY2:
117
+ if isinstance(text, str):
118
+ return text
119
+ elif isinstance(text, unicode):
120
+ return text.encode("utf-8")
121
+ else:
122
+ raise ValueError("Unsupported string type: %s" % (type(text)))
123
+ else:
124
+ raise ValueError("Not running on Python2 or Python 3?")
125
+
126
+
127
+ def load_vocab(vocab_file):
128
+ """Loads a vocabulary file into a dictionary."""
129
+ vocab = collections.OrderedDict()
130
+ with tf.gfile.GFile(vocab_file, "r") as reader:
131
+ while True:
132
+ token = convert_to_unicode(reader.readline())
133
+ if not token:
134
+ break
135
+ token = token.strip()
136
+ if token not in vocab:
137
+ vocab[token] = len(vocab)
138
+ return vocab
139
+
140
+
141
+ def convert_by_vocab(vocab, items):
142
+ """Converts a sequence of [tokens|ids] using the vocab."""
143
+ output = []
144
+ for item in items:
145
+ output.append(vocab[item])
146
+ return output
147
+
148
+
149
+ def convert_tokens_to_ids(vocab, tokens):
150
+ return convert_by_vocab(vocab, tokens)
151
+
152
+
153
+ def convert_ids_to_tokens(inv_vocab, ids):
154
+ return convert_by_vocab(inv_vocab, ids)
155
+
156
+
157
+ def whitespace_tokenize(text):
158
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
159
+ text = text.strip()
160
+ if not text:
161
+ return []
162
+ tokens = text.split()
163
+ return tokens
164
+
165
+
166
+ class FullTokenizer(object):
167
+ """Runs end-to-end tokenziation."""
168
+
169
+ def __init__(self, vocab_file, do_lower_case=True):
170
+ self.vocab = load_vocab(vocab_file)
171
+ self.inv_vocab = {v: k for k, v in self.vocab.items()}
172
+ self.basic_tokenizer = BasicTokenizer(
173
+ do_lower_case=do_lower_case, vocab=self.vocab)
174
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
175
+
176
+ def tokenize(self, text):
177
+ split_tokens = []
178
+ for token in self.basic_tokenizer.tokenize(text):
179
+ if preserve_token(token, self.vocab):
180
+ split_tokens.append(token)
181
+ continue
182
+ for sub_token in self.wordpiece_tokenizer.tokenize(token):
183
+ split_tokens.append(sub_token)
184
+
185
+ return split_tokens
186
+
187
+ def convert_tokens_to_ids(self, tokens):
188
+ return convert_by_vocab(self.vocab, tokens)
189
+
190
+ def convert_ids_to_tokens(self, ids):
191
+ return convert_by_vocab(self.inv_vocab, ids)
192
+
193
+
194
+ class BasicTokenizer(object):
195
+ """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
196
+
197
+ def __init__(self, do_lower_case=True, vocab=tuple()):
198
+ """Constructs a BasicTokenizer.
199
+
200
+ Args:
201
+ do_lower_case: Whether to lower case the input.
202
+ vocab: A container of tokens to not mutate during tokenization.
203
+ """
204
+ self.do_lower_case = do_lower_case
205
+ self.vocab = vocab
206
+
207
+ def tokenize(self, text):
208
+ """Tokenizes a piece of text."""
209
+ text = convert_to_unicode(text)
210
+ text = self._clean_text(text)
211
+
212
+ # This was added on November 1st, 2018 for the multilingual and Chinese
213
+ # models. This is also applied to the English models now, but it doesn't
214
+ # matter since the English models were not trained on any Chinese data
215
+ # and generally don't have any Chinese data in them (there are Chinese
216
+ # characters in the vocabulary because Wikipedia does have some Chinese
217
+ # words in the English Wikipedia.).
218
+ text = self._tokenize_chinese_chars(text)
219
+
220
+ orig_tokens = whitespace_tokenize(text)
221
+ split_tokens = []
222
+ for token in orig_tokens:
223
+ if preserve_token(token, self.vocab):
224
+ split_tokens.append(token)
225
+ continue
226
+ if self.do_lower_case:
227
+ token = token.lower()
228
+ token = self._run_strip_accents(token)
229
+ split_tokens.extend(self._run_split_on_punc(token))
230
+
231
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
232
+ return output_tokens
233
+
234
+ def _run_strip_accents(self, text):
235
+ """Strips accents from a piece of text."""
236
+ text = unicodedata.normalize("NFD", text)
237
+ output = []
238
+ for char in text:
239
+ cat = unicodedata.category(char)
240
+ if cat == "Mn":
241
+ continue
242
+ output.append(char)
243
+ return "".join(output)
244
+
245
+ def _run_split_on_punc(self, text):
246
+ """Splits punctuation on a piece of text."""
247
+ chars = list(text)
248
+ i = 0
249
+ start_new_word = True
250
+ output = []
251
+ while i < len(chars):
252
+ char = chars[i]
253
+ if _is_punctuation(char):
254
+ output.append([char])
255
+ start_new_word = True
256
+ else:
257
+ if start_new_word:
258
+ output.append([])
259
+ start_new_word = False
260
+ output[-1].append(char)
261
+ i += 1
262
+
263
+ return ["".join(x) for x in output]
264
+
265
+ def _tokenize_chinese_chars(self, text):
266
+ """Adds whitespace around any CJK character."""
267
+ output = []
268
+ for char in text:
269
+ cp = ord(char)
270
+ if self._is_chinese_char(cp):
271
+ output.append(" ")
272
+ output.append(char)
273
+ output.append(" ")
274
+ else:
275
+ output.append(char)
276
+ return "".join(output)
277
+
278
+ def _is_chinese_char(self, cp):
279
+ """Checks whether CP is the codepoint of a CJK character."""
280
+ # This defines a "chinese character" as anything in the CJK Unicode block:
281
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
282
+ #
283
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
284
+ # despite its name. The modern Korean Hangul alphabet is a different block,
285
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
286
+ # space-separated words, so they are not treated specially and handled
287
+ # like the all of the other languages.
288
+ if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
289
+ (cp >= 0x3400 and cp <= 0x4DBF) or #
290
+ (cp >= 0x20000 and cp <= 0x2A6DF) or #
291
+ (cp >= 0x2A700 and cp <= 0x2B73F) or #
292
+ (cp >= 0x2B740 and cp <= 0x2B81F) or #
293
+ (cp >= 0x2B820 and cp <= 0x2CEAF) or
294
+ (cp >= 0xF900 and cp <= 0xFAFF) or #
295
+ (cp >= 0x2F800 and cp <= 0x2FA1F)): #
296
+ return True
297
+
298
+ return False
299
+
300
+ def _clean_text(self, text):
301
+ """Performs invalid character removal and whitespace cleanup on text."""
302
+ output = []
303
+ for char in text:
304
+ cp = ord(char)
305
+ if cp == 0 or cp == 0xfffd or _is_control(char):
306
+ continue
307
+ if _is_whitespace(char):
308
+ output.append(" ")
309
+ else:
310
+ output.append(char)
311
+ return "".join(output)
312
+
313
+
314
+ class WordpieceTokenizer(object):
315
+ """Runs WordPiece tokenziation."""
316
+
317
+ def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
318
+ self.vocab = vocab
319
+ self.unk_token = unk_token
320
+ self.max_input_chars_per_word = max_input_chars_per_word
321
+
322
+ def tokenize(self, text):
323
+ """Tokenizes a piece of text into its word pieces.
324
+
325
+ This uses a greedy longest-match-first algorithm to perform tokenization
326
+ using the given vocabulary.
327
+
328
+ For example:
329
+ input = "unaffable"
330
+ output = ["un", "##aff", "##able"]
331
+
332
+ Args:
333
+ text: A single token or whitespace separated tokens. This should have
334
+ already been passed through `BasicTokenizer.
335
+
336
+ Returns:
337
+ A list of wordpiece tokens.
338
+ """
339
+
340
+ text = convert_to_unicode(text)
341
+
342
+ output_tokens = []
343
+ for token in whitespace_tokenize(text):
344
+ chars = list(token)
345
+ if len(chars) > self.max_input_chars_per_word:
346
+ output_tokens.append(self.unk_token)
347
+ continue
348
+
349
+ is_bad = False
350
+ start = 0
351
+ sub_tokens = []
352
+ while start < len(chars):
353
+ end = len(chars)
354
+ cur_substr = None
355
+ while start < end:
356
+ substr = "".join(chars[start:end])
357
+ if start > 0:
358
+ substr = "##" + substr
359
+ if substr in self.vocab:
360
+ cur_substr = substr
361
+ break
362
+ end -= 1
363
+ if cur_substr is None:
364
+ is_bad = True
365
+ break
366
+ sub_tokens.append(cur_substr)
367
+ start = end
368
+
369
+ if is_bad:
370
+ output_tokens.append(self.unk_token)
371
+ else:
372
+ output_tokens.extend(sub_tokens)
373
+ return output_tokens
374
+
375
+
376
+ def _is_whitespace(char):
377
+ """Checks whether `chars` is a whitespace character."""
378
+ # \t, \n, and \r are technically control characters but we treat them
379
+ # as whitespace since they are generally considered as such.
380
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
381
+ return True
382
+ cat = unicodedata.category(char)
383
+ if cat == "Zs":
384
+ return True
385
+ return False
386
+
387
+
388
+ def _is_control(char):
389
+ """Checks whether `chars` is a control character."""
390
+ # These are technically control characters but we count them as whitespace
391
+ # characters.
392
+ if char == "\t" or char == "\n" or char == "\r":
393
+ return False
394
+ cat = unicodedata.category(char)
395
+ if cat in ("Cc", "Cf"):
396
+ return True
397
+ return False
398
+
399
+
400
+ def _is_punctuation(char):
401
+ """Checks whether `chars` is a punctuation character."""
402
+ cp = ord(char)
403
+ # We treat all non-letter/number ASCII as punctuation.
404
+ # Characters such as "^", "$", and "`" are not in the Unicode
405
+ # Punctuation class but we treat them as punctuation anyways, for
406
+ # consistency.
407
+ if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
408
+ (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
409
+ return True
410
+ cat = unicodedata.category(char)
411
+ if cat.startswith("P"):
412
+ return True
413
+ return False
docker/intel_code/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/bgpt-env/
2
+ llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/wandb/
3
+ llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/lm-evaluation-harness/
docker/intel_code/README.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # intel_code
2
+ intel code for llama
docker/intel_code/llama13b/Megatron-DeepSpeed/CODEOWNERS ADDED
@@ -0,0 +1 @@
 
 
1
+ * @jeffra @samyam @tjruwase @ShadenSmith @conglongli @awan-10 @cli99 @eltonzheng @minjiaz @RezaYazdaniAminabadi @duli2012 @mrwyattii @yaozhewei @arashb @xiaoxiawu-microsoft @guanhuawang
docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_gpt.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ """Pretrain GPT"""
5
+
6
+ import torch
7
+ import math
8
+ from functools import partial
9
+ from megatron import get_args
10
+ from megatron import print_rank_0
11
+ from megatron import get_timers
12
+ from megatron import get_tokenizer
13
+ from megatron.core import mpu, tensor_parallel
14
+ from megatron.core.enums import ModelType
15
+ from megatron.data.gpt_dataset import build_train_valid_test_datasets
16
+ from megatron.model import GPTModel, GPTModelPipe
17
+ from megatron.training import pretrain
18
+ from megatron.utils import get_ltor_masks_and_position_ids
19
+ from megatron.utils import average_losses_across_data_parallel_group, update_rotary_pos_emb
20
+ from megatron.arguments import core_transformer_config_from_args
21
+
22
+ import deepspeed
23
+ from deepspeed.runtime.utils import see_memory_usage
24
+ from deepspeed.accelerator.real_accelerator import get_accelerator
25
+ import os
26
+ import subprocess
27
+
28
+ from torch import nn
29
+ import torch.nn.functional as F
30
+
31
+
32
+ def model_provider(pre_process=True, post_process=True):
33
+ """Build the model."""
34
+
35
+ print_rank_0('building GPT model ...')
36
+ see_memory_usage(f"Before Building Model", force=True)
37
+
38
+ if get_accelerator().device_name() == "hpu":
39
+ os.environ['DEEPSPEED_HPU_SYNC_INSIDE_INIT'] = "1"
40
+ os.environ['DEEPSPEED_SYNC_MICRO_BATCH_STEP'] = "1"
41
+
42
+ args = get_args()
43
+ config = core_transformer_config_from_args(args)
44
+ with deepspeed.zero.Init(sequence_data_parallel_group=mpu.get_sequence_data_parallel_group(),
45
+ remote_device=None if args.remote_device == 'none' else args.remote_device,
46
+ config_dict_or_path=args.deepspeed_config,
47
+ enabled=args.zero_stage == 3,
48
+ mpu=mpu):
49
+ if args.deepspeed and not args.no_pipeline_parallel:
50
+ model = GPTModelPipe(
51
+ config=config,
52
+ num_tokentypes=0,
53
+ parallel_output=True
54
+ )
55
+ # This is a hack to give us a reference to get_batch_pipe from within training.py
56
+ # We need to call model.set_batch_fn after deepspeed.initialize
57
+ model._megatron_batch_fn = get_batch_pipe
58
+
59
+ # Predompute the attention mask and store it in args. This avoids having to
60
+ # pipeline it as an activation during training. The mask is constant, and thus
61
+ # we can reuse it.
62
+ attention_mask = torch.tril(torch.ones(
63
+ (1, args.seq_length, args.seq_length), device=get_accelerator().current_device_name())).view(
64
+ 1, 1, args.seq_length, args.seq_length)
65
+
66
+ # Convert attention mask to binary:
67
+ attention_mask = (attention_mask < 0.5)
68
+ if args.fp16:
69
+ attention_mask = attention_mask.half()
70
+ elif args.bf16:
71
+ attention_mask = attention_mask.bfloat16()
72
+
73
+ if args.mask_tensor_adding:
74
+ args.attn_mask = attention_mask * -10000.0
75
+ else:
76
+ # Attention mask must be bool.
77
+ args.attn_mask = attention_mask.to(torch.bool)
78
+
79
+ # For prertaining, since sequence length is fixed, cache rotary embedding in args, to avoid communicating around
80
+ if args.use_rotary_position_embeddings:
81
+ update_rotary_pos_emb(args.seq_length)
82
+
83
+ else:
84
+ model = GPTModel(
85
+ config=config,
86
+ num_tokentypes=0,
87
+ parallel_output=True,
88
+ pre_process=pre_process,
89
+ post_process=post_process
90
+ )
91
+ see_memory_usage(f"After Building Model", force=True)
92
+ return model
93
+
94
+
95
+ def get_batch(data_iterator):
96
+ """Generate a batch"""
97
+ args = get_args()
98
+ tokenizer = get_tokenizer()
99
+
100
+ # Items and their type.
101
+ keys = ['text']
102
+ datatype = torch.int64
103
+
104
+ # Broadcast data.
105
+ if data_iterator is not None:
106
+ data = next(data_iterator)
107
+ else:
108
+ data = None
109
+ data_b = tensor_parallel.broadcast_data(keys, data, datatype)
110
+
111
+ # Unpack.
112
+ tokens_ = data_b['text'].long()
113
+ if not args.use_seq_len_plus_one_tokens:
114
+ labels = torch.roll(tokens_, shifts=-1, dims=1)
115
+ labels[:, -1] = -1
116
+ tokens = tokens_
117
+ else:
118
+ labels = tokens_[:, 1:].contiguous()
119
+ tokens = tokens_[:, :-1].contiguous()
120
+
121
+ # Get the masks and postition ids.
122
+ skip_mask = args.use_flash_attn or args.use_flash_attn_triton
123
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
124
+ tokens,
125
+ tokenizer.eod,
126
+ args.reset_position_ids,
127
+ args.reset_attention_mask,
128
+ args.eod_mask_loss,
129
+ skip_mask,
130
+ labels = labels,
131
+ dummy_sample= None,)
132
+
133
+ # For DS's sequence parallel
134
+ seq_parallel_world_size = mpu.get_sequence_parallel_world_size()
135
+ seq_parallel_world_rank = mpu.get_sequence_parallel_rank()
136
+
137
+ # For Megatron's sequence parallel
138
+ if args.sequence_parallel:
139
+ seq_parallel_world_size = mpu.get_tensor_model_parallel_world_size()
140
+ seq_parallel_world_rank = mpu.get_tensor_model_parallel_rank()
141
+ seq_length = tokens.size(1)
142
+
143
+ assert seq_length % seq_parallel_world_size == 0
144
+ sub_seq_length = seq_length // seq_parallel_world_size
145
+ sub_seq_start = seq_parallel_world_rank * sub_seq_length
146
+ sub_seq_end = (seq_parallel_world_rank + 1) * sub_seq_length
147
+
148
+ tokens[tokens == -1] = 0
149
+ labels[labels == -1] = 0
150
+
151
+ tokens = tokens[:, sub_seq_start:sub_seq_end]
152
+ position_ids = position_ids[:, sub_seq_start:sub_seq_end]
153
+ # For DS's sequence parallel
154
+ if mpu.get_sequence_parallel_world_size() > 1:
155
+ labels = labels[:, sub_seq_start:sub_seq_end]
156
+
157
+ return tokens, labels, loss_mask, attention_mask, position_ids
158
+
159
+ def data_post_process(data, data_sampler_state_dict):
160
+ args = get_args()
161
+ if args.data_efficiency_curriculum_learning:
162
+ if 'seqlen_truncate' in data_sampler_state_dict['current_difficulties']:
163
+ args.data_efficiency_curriculum_learning_seqlen_type = 'seqlen_truncate'
164
+ current_seqlen = data_sampler_state_dict['current_difficulties']['seqlen_truncate']
165
+ if current_seqlen < args.seq_length:
166
+ data['text'] = data['text'][:, :(current_seqlen+1)].contiguous()
167
+ elif 'seqlen_reshape' in data_sampler_state_dict['current_difficulties']:
168
+ args.data_efficiency_curriculum_learning_seqlen_type = 'seqlen_reshape'
169
+ current_seqlen = data_sampler_state_dict['current_difficulties']['seqlen_reshape']
170
+ if current_seqlen < args.seq_length:
171
+ orig_num_token = torch.numel(data['text'])
172
+ reshape_len = (data['text'].size()[1] // (current_seqlen+1)) * (current_seqlen+1)
173
+ data['text'] = torch.cat((data['text'][:, :reshape_len].contiguous().view(-1, current_seqlen+1),
174
+ data['text'][:, -(current_seqlen+1):]), 0).contiguous()
175
+ num_row = math.ceil(orig_num_token / (current_seqlen+1))
176
+ num_row = min(num_row, data['text'].size()[0])
177
+ if num_row > 1 and num_row % 2 != 0:
178
+ num_row -= 1
179
+ data['text'] = data['text'][:num_row, :].contiguous()
180
+ else:
181
+ args.data_efficiency_curriculum_learning_seqlen_type = None
182
+ return data
183
+
184
+ def get_batch_pipe(data):
185
+ """Modification of `get_batch` to work on `next(data_iterator)` instead of `data_iterator`"""
186
+ args = get_args()
187
+ tokenizer = get_tokenizer()
188
+
189
+ # Items and their type.
190
+ keys = ['text']
191
+ datatype = torch.int64
192
+
193
+ # Broadcast data.
194
+ data_b = tensor_parallel.broadcast_data(keys, data, datatype)
195
+
196
+ # Unpack.
197
+ tokens_ = data_b['text'].long()
198
+ if not args.use_seq_len_plus_one_tokens:
199
+ labels = torch.roll(tokens_, shifts=-1, dims=1)
200
+ labels[:, -1] = -1
201
+ tokens = tokens_
202
+ else:
203
+ labels = tokens_[:, 1:].contiguous()
204
+ tokens = tokens_[:, :-1].contiguous()
205
+
206
+ # Get the masks and postition ids.
207
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
208
+ tokens,
209
+ tokenizer.eod,
210
+ args.reset_position_ids,
211
+ args.reset_attention_mask,
212
+ args.eod_mask_loss,
213
+ labels = labels,
214
+ dummy_sample = None,)
215
+
216
+ tokens[tokens == -1] = 0
217
+ labels[labels == -1] = 0
218
+
219
+ if args.curriculum_learning_legacy and args.curriculum_seqlen < tokens.size()[1]:
220
+ # seqlen-based curriculum learning
221
+ # tokens, position_ids, labels, loss_mask have size [batch size, seqlen]
222
+ tokens = tokens[:, :args.curriculum_seqlen].contiguous()
223
+ position_ids = position_ids[:, :args.curriculum_seqlen].contiguous()
224
+ if labels is not None:
225
+ labels = labels[:, :args.curriculum_seqlen].contiguous()
226
+ loss_mask = loss_mask[:, :args.curriculum_seqlen].contiguous()
227
+
228
+ return (tokens, position_ids, attention_mask), (labels, loss_mask)
229
+
230
+
231
+ def loss_func(loss_mask, moe_loss, mos_loss, output_tensor):
232
+ args = get_args()
233
+ losses = output_tensor.float()
234
+ loss_mask = loss_mask.view(-1).float()
235
+ loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
236
+
237
+ # Reduce loss for logging.
238
+ averaged_loss = average_losses_across_data_parallel_group([loss])
239
+ if args.mos or args.kd:
240
+ # assert max(args.num_experts) >= 1
241
+ loss = loss + moe_loss + mos_loss
242
+ if args.mos:
243
+ return loss, {'total loss': loss, 'lm loss': averaged_loss[0], 'moe loss': moe_loss, 'mos loss': mos_loss}
244
+ elif args.kd:
245
+ return loss, {'total loss': loss, 'lm loss': averaged_loss[0], 'moe loss': moe_loss, 'kd loss': mos_loss}
246
+ print_rank_0('>>> total loss: {}, lm loss {}, kd loss {}'.format(loss, averaged_loss[0], mos_loss))
247
+ else:
248
+ if max(args.num_experts) <= 1:
249
+ return loss, {'lm loss': averaged_loss[0]}
250
+ else:
251
+ loss = loss + moe_loss
252
+ return loss, {'lm loss': averaged_loss[0], 'moe loss': moe_loss}
253
+
254
+ def calculate_mos_loss(args, stu_output, teacher_model, tokens, position_ids, attention_mask):
255
+ mos_loss = 0
256
+ alpha = args.kd_alpha_ce
257
+ beta = args.kd_beta_ce
258
+ kd_temp = args.kd_temp
259
+
260
+ if teacher_model:
261
+ with torch.no_grad():
262
+ if args.curriculum_learning_legacy and args.curriculum_seqlen < args.seq_length:
263
+ assert args.curriculum_seqlen is not None
264
+ curriculum_seqlen = args.curriculum_seqlen
265
+ tokens = tokens[:, :curriculum_seqlen].contiguous()
266
+ position_ids = position_ids[:, :curriculum_seqlen].contiguous()
267
+ attention_mask = attention_mask[:, :, :curriculum_seqlen, :curriculum_seqlen].contiguous()
268
+ # No need to truncate labels as we do not need it for the teacher logits
269
+ tea_output, tea_other_losses = teacher_model(tokens, position_ids, attention_mask)
270
+ assert stu_output.size() == tea_output.size(), 'teacher and student output should match in size. Student: {}, Teacher: {}, CL seq length {}'.format(stu_output.size(), tea_output.size(), args.curriculum_seqlen)
271
+
272
+ student_logits = F.log_softmax(stu_output / kd_temp, dim=2)
273
+ tea_logits = F.softmax(tea_output / kd_temp, dim=2) # The target logits is expected to be probabilities. If we use log_softmax, then we need to set target_log to true when initializing the KLDivLoss.
274
+
275
+ mos_loss = kd_temp * kd_temp * nn.KLDivLoss(reduction='batchmean')(student_logits, tea_logits)
276
+
277
+ mos_loss = mos_loss.div(args.seq_length) * beta
278
+ return mos_loss
279
+
280
+ def forward_step(data_iterator, model):
281
+ """Forward step."""
282
+ args = get_args()
283
+ timers = get_timers()
284
+
285
+ # Get the batch.
286
+ timers('batch-generator', log_level=2).start()
287
+ tokens, labels, loss_mask, attention_mask, position_ids = get_batch(
288
+ data_iterator)
289
+ timers('batch-generator').stop()
290
+
291
+ if args.data_efficiency_curriculum_learning:
292
+ args.curriculum_seqlen = tokens.size()[1]
293
+ if hasattr(args, 'data_efficiency_curriculum_learning_seqlen_type') and \
294
+ args.data_efficiency_curriculum_learning_seqlen_type == 'seqlen_reshape':
295
+ args.data_efficiency_curriculum_learning_numel = torch.numel(tokens)
296
+
297
+ if args.mos or args.kd:
298
+ # The forward func can return either the loss or the logits, depending on whether passing in the labels or not.
299
+ stu_output, other_losses = model(tokens, position_ids, attention_mask)
300
+ if args.curriculum_learning_legacy and args.curriculum_seqlen < args.seq_length:
301
+ assert args.curriculum_seqlen is not None
302
+ labels = labels[:, :args.curriculum_seqlen].contiguous()
303
+ output_tensor = tensor_parallel.vocab_parallel_cross_entropy(stu_output.contiguous().float(), labels)
304
+ else:
305
+ output_tensor, other_losses = model(tokens, position_ids, attention_mask,
306
+ labels=labels)
307
+ if args.curriculum_learning_legacy and args.curriculum_seqlen < args.seq_length:
308
+ loss_mask = loss_mask[:, :args.curriculum_seqlen].contiguous()
309
+
310
+ moe_losses = []
311
+ for moe_loss in other_losses:
312
+ if moe_loss is not None:
313
+ moe_losses.append(moe_loss)
314
+ moe_loss = sum(moe_losses) * args.moe_loss_coeff
315
+
316
+ mos_loss = 0
317
+ if args.mos or args.kd:
318
+ assert model.training
319
+ if args.teacher_forward and args.teacher_model is not None:
320
+ mos_loss = calculate_mos_loss(args, stu_output,
321
+ args.teacher_model[0], tokens, position_ids, attention_mask)
322
+
323
+ # Output_tensor stores the standard loss, loos_func calculates the total loss.
324
+ return output_tensor, partial(loss_func, loss_mask, moe_loss, mos_loss)
325
+
326
+
327
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
328
+ """Build train, valid, and test datasets."""
329
+ args = get_args()
330
+
331
+ print_rank_0('> building train, validation, and test datasets '
332
+ 'for GPT ...')
333
+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
334
+ data_prefix=args.data_path,
335
+ data_impl=args.data_impl,
336
+ splits_string=args.split,
337
+ train_valid_test_num_samples=train_val_test_num_samples,
338
+ seq_length=args.seq_length,
339
+ seed=args.seed,
340
+ skip_warmup=(not args.mmap_warmup),
341
+ train_data_prefix=args.train_data_path,
342
+ valid_data_prefix=args.valid_data_path,
343
+ test_data_prefix=args.test_data_path,
344
+ data_cache_path=args.data_cache_path,
345
+ use_seq_len_plus_one_tokens=args.use_seq_len_plus_one_tokens)
346
+ print_rank_0("> finished creating GPT datasets ...")
347
+
348
+ return train_ds, valid_ds, test_ds
349
+
350
+
351
+ def command_exists(cmd):
352
+ result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
353
+ return result.wait() == 0
354
+
355
+
356
+ def git_ds_info():
357
+ from deepspeed.env_report import main as ds_report
358
+ ds_report()
359
+
360
+ # Write out version/git info
361
+ git_hash_cmd = "git rev-parse --short HEAD"
362
+ git_branch_cmd = "git rev-parse --abbrev-ref HEAD"
363
+ if command_exists('git'):
364
+ try:
365
+ result = subprocess.check_output(git_hash_cmd, shell=True)
366
+ git_hash = result.decode('utf-8').strip()
367
+ result = subprocess.check_output(git_branch_cmd, shell=True)
368
+ git_branch = result.decode('utf-8').strip()
369
+ except subprocess.CalledProcessError:
370
+ git_hash = "unknown"
371
+ git_branch = "unknown"
372
+ else:
373
+ git_hash = "unknown"
374
+ git_branch = "unknown"
375
+ print(f'**** Git info for Megatron: git_hash={git_hash} git_branch={git_branch} ****')
376
+
377
+
378
+ if __name__ == "__main__":
379
+ git_ds_info()
380
+ pretrain(train_valid_test_datasets_provider,
381
+ model_provider,
382
+ ModelType.encoder_or_decoder,
383
+ forward_step,
384
+ args_defaults={'tokenizer_type': 'GPT2BPETokenizer'},
385
+ data_post_process=data_post_process)
docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_gpt_core.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Pretrain GPT"""
4
+
5
+ import torch
6
+ from functools import partial
7
+ from megatron import get_args
8
+ from megatron.arguments import core_transformer_config_from_args
9
+ from megatron import print_rank_0
10
+ from megatron import get_timers
11
+ from megatron import get_tokenizer
12
+ from megatron.core import tensor_parallel
13
+ from megatron.core.enums import ModelType
14
+ from megatron.data.gpt_dataset import build_train_valid_test_datasets
15
+ from megatron.core.models.gpt import GPTModel
16
+ from megatron.training import pretrain
17
+ from megatron.utils import get_ltor_masks_and_position_ids
18
+ from megatron.utils import average_losses_across_data_parallel_group
19
+
20
+ def model_provider(pre_process=True, post_process=True):
21
+ """Build the model."""
22
+
23
+ args = get_args()
24
+ config = core_transformer_config_from_args(args)
25
+
26
+ print_rank_0('building GPT model ...')
27
+ model = GPTModel(
28
+ config=config,
29
+ vocab_size=args.padded_vocab_size,
30
+ max_sequence_length=args.max_position_embeddings,
31
+ pre_process=pre_process,
32
+ post_process=post_process,
33
+ fp16_lm_cross_entropy=args.fp16_lm_cross_entropy,
34
+ parallel_output=True,
35
+ share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights
36
+ )
37
+ return model
38
+
39
+
40
+ def get_batch(data_iterator):
41
+ """Generate a batch"""
42
+ args = get_args()
43
+ tokenizer = get_tokenizer()
44
+
45
+ # Items and their type.
46
+ keys = ['text']
47
+ datatype = torch.int64
48
+
49
+ # Broadcast data.
50
+ if data_iterator is not None:
51
+ data = next(data_iterator)
52
+ else:
53
+ data = None
54
+ data_b = tensor_parallel.broadcast_data(keys, data, datatype)
55
+
56
+ # Unpack.
57
+ tokens_ = data_b['text'].long()
58
+ labels = tokens_[:, 1:].contiguous()
59
+ tokens = tokens_[:, :-1].contiguous()
60
+
61
+ # Get the masks and postition ids.
62
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
63
+ tokens,
64
+ tokenizer.eod,
65
+ args.reset_position_ids,
66
+ args.reset_attention_mask,
67
+ args.eod_mask_loss)
68
+
69
+ return tokens, labels, loss_mask, attention_mask, position_ids
70
+
71
+ def loss_func(loss_mask, output_tensor):
72
+ losses = output_tensor.float()
73
+ loss_mask = loss_mask.view(-1).float()
74
+ loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
75
+
76
+ # Reduce loss for logging.
77
+ averaged_loss = average_losses_across_data_parallel_group([loss])
78
+
79
+ return loss, {'lm loss': averaged_loss[0]}
80
+
81
+
82
+ def forward_step(data_iterator, model):
83
+ """Forward step."""
84
+ args = get_args()
85
+ timers = get_timers()
86
+
87
+ # Get the batch.
88
+ timers('batch-generator', log_level=2).start()
89
+ tokens, labels, loss_mask, attention_mask, position_ids = get_batch(
90
+ data_iterator)
91
+ timers('batch-generator').stop()
92
+
93
+ output_tensor = model(tokens, position_ids, attention_mask,
94
+ labels=labels)
95
+
96
+ return output_tensor, partial(loss_func, loss_mask)
97
+
98
+
99
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
100
+ """Build train, valid, and test datasets."""
101
+ args = get_args()
102
+
103
+ print_rank_0('> building train, validation, and test datasets '
104
+ 'for GPT ...')
105
+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
106
+ data_prefix=args.data_path,
107
+ data_impl=args.data_impl,
108
+ splits_string=args.split,
109
+ train_valid_test_num_samples=train_val_test_num_samples,
110
+ seq_length=args.seq_length,
111
+ seed=args.seed,
112
+ skip_warmup=(not args.mmap_warmup),
113
+ train_data_prefix=args.train_data_path,
114
+ valid_data_prefix=args.valid_data_path,
115
+ test_data_prefix=args.test_data_path)
116
+ print_rank_0("> finished creating GPT datasets ...")
117
+
118
+ return train_ds, valid_ds, test_ds
119
+
120
+
121
+ if __name__ == "__main__":
122
+
123
+ pretrain(train_valid_test_datasets_provider, model_provider,
124
+ ModelType.encoder_or_decoder,
125
+ forward_step,
126
+ args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}
127
+ )
docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_retro.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Pretrain Retro."""
4
+
5
+ from functools import partial
6
+ import torch
7
+
8
+ from megatron import get_args, get_retro_args
9
+ from megatron import get_timers
10
+ from megatron import get_tokenizer
11
+ from megatron import print_rank_0
12
+ from megatron.core import mpu, tensor_parallel
13
+ from megatron.core.enums import ModelType
14
+ from megatron.model import GPTModel
15
+ from megatron.training import pretrain
16
+ from megatron.utils import get_ltor_masks_and_position_ids
17
+ from tools.retro.query.retro_dataset import get_retro_datasets
18
+
19
+ from pretrain_gpt import (
20
+ loss_func,
21
+ model_provider,
22
+ train_valid_test_datasets_provider as standard_datasets_provider,
23
+ )
24
+
25
+
26
+ def get_batch(data_iterator):
27
+ """Generate a batch"""
28
+ args = get_args()
29
+ retro_args = get_retro_args()
30
+ tokenizer = get_tokenizer()
31
+
32
+ # Items and their type.
33
+ keys = ['text']
34
+ datatype = torch.int64
35
+
36
+ if args.retro_add_retriever:
37
+ keys += 'neighbor_tokens',
38
+
39
+ # Broadcast data.
40
+ if data_iterator is not None:
41
+ data = next(data_iterator)
42
+ else:
43
+ data = None
44
+
45
+ data_b = tensor_parallel.broadcast_data(keys, data, datatype)
46
+
47
+ # Unpack.
48
+ tokens_ = data_b['text'].long()
49
+ labels = tokens_[:, 1:].contiguous()
50
+ tokens = tokens_[:, :-1].contiguous()
51
+
52
+ if args.retro_add_retriever:
53
+ # note: [bs * l * k, r]
54
+ # note: 2x == neighbor, continuation
55
+ neighbor_tokens = data_b['neighbor_tokens'] \
56
+ .view(-1, retro_args.retro_gpt_retrieved_length).long()
57
+
58
+ # Get the masks and postition ids.
59
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
60
+ tokens,
61
+ tokenizer.eod,
62
+ args.reset_position_ids,
63
+ args.reset_attention_mask,
64
+ args.eod_mask_loss)
65
+
66
+ if args.retro_add_retriever:
67
+ _, _, neighbor_position_ids = get_ltor_masks_and_position_ids(
68
+ neighbor_tokens,
69
+ tokenizer.eod,
70
+ args.reset_position_ids,
71
+ args.reset_attention_mask,
72
+ args.eod_mask_loss)
73
+ neighbor_attention_mask = None
74
+ return tokens, labels, loss_mask, attention_mask, position_ids, \
75
+ neighbor_tokens, neighbor_attention_mask, neighbor_position_ids
76
+ else:
77
+ return tokens, labels, loss_mask, attention_mask, position_ids
78
+
79
+
80
+ def forward_step(data_iterator, model):
81
+ """Forward step."""
82
+ args = get_args()
83
+ timers = get_timers()
84
+
85
+ # Get the batch.
86
+ timers('batch-generator').start()
87
+ if args.retro_add_retriever:
88
+ tokens, labels, loss_mask, attention_mask, position_ids, \
89
+ neighbor_tokens, neighbor_attention_mask, neighbor_position_ids = \
90
+ get_batch(data_iterator)
91
+ else:
92
+ tokens, labels, loss_mask, attention_mask, position_ids = get_batch(
93
+ data_iterator)
94
+ neighbor_tokens, neighbor_attention_mask, neighbor_position_ids = \
95
+ None, None, None
96
+ timers('batch-generator').stop()
97
+
98
+ output_tensor = model(tokens, position_ids, attention_mask,
99
+ retriever_input_ids=neighbor_tokens,
100
+ retriever_position_ids=neighbor_position_ids,
101
+ retriever_attn_mask=neighbor_attention_mask,
102
+ labels=labels)
103
+
104
+ return output_tensor, partial(loss_func, loss_mask)
105
+
106
+
107
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
108
+ """Build train, valid, and test datasets."""
109
+ args = get_args()
110
+ if args.retro_add_retriever:
111
+ return get_retro_datasets()
112
+ else:
113
+ return standard_datasets_provider(train_val_test_num_samples)
114
+
115
+
116
+ if __name__ == "__main__":
117
+
118
+ pretrain(train_valid_test_datasets_provider,
119
+ model_provider,
120
+ ModelType.retro_decoder,
121
+ forward_step,
122
+ args_defaults={'tokenizer_type': 'GPT2BPETokenizer',
123
+ 'retro_add_retriever': True})
docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_t5.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Pretrain T5"""
4
+
5
+ from functools import partial
6
+
7
+ import torch
8
+
9
+ from megatron import (
10
+ get_args,
11
+ get_timers,
12
+ print_rank_0
13
+ )
14
+ from megatron.core import tensor_parallel
15
+ from megatron.core.enums import ModelType
16
+ from megatron.data.dataset_utils import build_train_valid_test_datasets
17
+ from megatron.model import T5Model
18
+ from megatron.training import pretrain
19
+ from megatron.utils import average_losses_across_data_parallel_group
20
+ from megatron.arguments import core_transformer_config_from_args
21
+
22
+
23
+ """
24
+ Pipeline parallelism for T5
25
+ ===========================
26
+
27
+ T5 is a model architecture with both encoder and decoder blocks.
28
+ Consequently, pipeline parallelism is implemented slightly differently
29
+ compared to architectures like GPT and BERT.
30
+
31
+ In particular, when pipeline_model_parallel_world_size > 1, each stage
32
+ either executes an encoder block or a decoder block. The
33
+ --pipeline-model-parallel-split-rank argument controls the rank at which
34
+ the split happens: all ranks lower than this argument execute the
35
+ encoder block, and all ranks equal to or higher than this argument value
36
+ execute the decoder block.
37
+
38
+ In the encoder section of the model, only one tensor is sent downstream:
39
+ the intermediate encoder_hidden_state. In the decoder section of the
40
+ model, two tensors are sent downstream in the forward pass: the fully
41
+ computed encoder_hidden_state, and the intermediate decoder_hidden_state.
42
+
43
+ In particular, these are the shapes of the tensors sent between
44
+ different workers:
45
+ If rank is in decoder section:
46
+ intermediate decoder_hidden_state (pre-transpose),
47
+ complete encoder_hidden_state (post-transpose).
48
+ If rank is at boundary between encoder and decoder sections:
49
+ complete encoder_hidden_state (post-transpose).
50
+ If rank is in encoder section:
51
+ intermediate encoder_hidden_state (pre-transpose).
52
+
53
+ Additionally, we have code in the backward_step function in schedules.py
54
+ to accumulate the encoder_hidden_state gradient across skip connections
55
+ (encoder_hidden_state fed in as input to each layer in the decoder).
56
+ """
57
+
58
+
59
+ def model_provider(pre_process=True, post_process=True,
60
+ add_encoder=True, add_decoder=True):
61
+ """Build the model."""
62
+
63
+ print_rank_0('building T5 model ...')
64
+ config = core_transformer_config_from_args(get_args())
65
+ model = T5Model(config=config,
66
+ num_tokentypes=0,
67
+ parallel_output=True,
68
+ pre_process=pre_process,
69
+ post_process=post_process,
70
+ add_encoder=add_encoder,
71
+ add_decoder=add_decoder)
72
+ return model
73
+
74
+
75
+ def get_batch(data_iterator):
76
+ """Build the batch."""
77
+
78
+ keys = ['text_enc', 'text_dec', 'labels', 'loss_mask',
79
+ 'enc_mask', 'dec_mask', 'enc_dec_mask']
80
+ datatype = torch.int64
81
+
82
+ # Broadcast data.
83
+ if data_iterator is not None:
84
+ data = next(data_iterator)
85
+ else:
86
+ data = None
87
+ data_b = tensor_parallel.broadcast_data(keys, data, datatype)
88
+
89
+ # Unpack.
90
+ tokens_enc = data_b['text_enc'].long()
91
+ tokens_dec = data_b['text_dec'].long()
92
+ labels = data_b['labels'].long()
93
+ loss_mask = data_b['loss_mask'].float()
94
+
95
+ enc_mask = (data_b['enc_mask'] < 0.5)
96
+ dec_mask = (data_b['dec_mask'] < 0.5)
97
+ enc_dec_mask = (data_b['enc_dec_mask'] < 0.5)
98
+
99
+ return tokens_enc, tokens_dec, loss_mask, labels, \
100
+ enc_mask, dec_mask, enc_dec_mask
101
+
102
+
103
+ def loss_func(loss_mask, output_tensor):
104
+ lm_loss_ = output_tensor.float()
105
+ lm_loss = torch.sum(
106
+ lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
107
+
108
+ loss = lm_loss
109
+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])
110
+
111
+ return loss, {'lm loss': averaged_losses[0]}
112
+
113
+
114
+ def forward_step(data_iterator, model):
115
+ """Forward step."""
116
+ args = get_args()
117
+ timers = get_timers()
118
+
119
+ # Get the batch.
120
+ timers('batch generator', log_level=2).start()
121
+ tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask \
122
+ = get_batch(data_iterator)
123
+ timers('batch generator').stop()
124
+
125
+ # Forward model lm_labels
126
+ output_tensor = model(tokens_enc,
127
+ tokens_dec,
128
+ enc_mask,
129
+ dec_mask,
130
+ enc_dec_mask,
131
+ tokentype_ids=None,
132
+ lm_labels=lm_labels)
133
+
134
+ return output_tensor, partial(loss_func, loss_mask)
135
+
136
+
137
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
138
+ """Build train, valid, and test datasets."""
139
+ args = get_args()
140
+
141
+ print_rank_0('> building train, validation, and test datasets '
142
+ 'for T5 ...')
143
+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
144
+ data_prefix=args.data_path,
145
+ data_impl=args.data_impl,
146
+ splits_string=args.split,
147
+ train_valid_test_num_samples=train_val_test_num_samples,
148
+ max_seq_length=args.encoder_seq_length,
149
+ max_seq_length_dec=args.decoder_seq_length,
150
+ masked_lm_prob=args.mask_prob,
151
+ short_seq_prob=args.short_seq_prob,
152
+ seed=args.seed,
153
+ skip_warmup=(not args.mmap_warmup),
154
+ dataset_type='t5')
155
+ print_rank_0("> finished creating T5 datasets ...")
156
+
157
+ return train_ds, valid_ds, test_ds
158
+
159
+
160
+ if __name__ == "__main__":
161
+
162
+ pretrain(train_valid_test_datasets_provider, model_provider, ModelType.encoder_and_decoder,
163
+ forward_step, args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_vision_classify.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Pretrain VIT"""
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from functools import partial
8
+ from megatron import get_args, get_timers, print_rank_0
9
+ from megatron.core.enums import ModelType
10
+ from megatron.data.vit_dataset import build_train_valid_datasets
11
+ from megatron.model.vision.classification import VitClassificationModel
12
+ from megatron.model.vision.classification import MitClassificationModel
13
+ from megatron.training import pretrain
14
+ from megatron.utils import average_losses_across_data_parallel_group
15
+ from megatron.arguments import core_transformer_config_from_args
16
+
17
+
18
+ def model_provider(pre_process=True, post_process=True):
19
+ """Build the model."""
20
+
21
+ args = get_args()
22
+ config = core_transformer_config_from_args(args)
23
+ if args.vision_backbone_type == 'vit':
24
+ print_rank_0("building VIT model ...")
25
+ model = VitClassificationModel(config=config,
26
+ num_classes=args.num_classes,
27
+ pre_process=pre_process,
28
+ post_process=post_process)
29
+ elif args.vision_backbone_type == 'mit':
30
+ print_rank_0("building MIT model ...")
31
+ model = MitClassificationModel(num_classes=args.num_classes,
32
+ pre_process=pre_process,
33
+ post_process=post_process)
34
+ else:
35
+ raise Exception('{} vision backbone is not supported.'.format(
36
+ args.vision_backbone_type))
37
+ return model
38
+
39
+
40
+ def get_batch(data_iterator):
41
+ """Build the batch."""
42
+ data = next(data_iterator)
43
+
44
+ # only data parallelism; no need for broadcast
45
+ images = data[0].cuda()
46
+ labels = data[1].cuda()
47
+
48
+ return images, labels
49
+
50
+
51
+ def loss_func(labels, output_tensor):
52
+ logits = output_tensor.contiguous().float()
53
+ loss = F.cross_entropy(logits, labels)
54
+
55
+ outputs = torch.argmax(logits, -1)
56
+ correct = (outputs == labels).float()
57
+ accuracy = torch.mean(correct)
58
+
59
+ averaged_loss = average_losses_across_data_parallel_group([loss, accuracy])
60
+
61
+ return loss, {"loss": averaged_loss[0], "accuracy": averaged_loss[1]}
62
+
63
+
64
+ def forward_step(data_iterator, model):
65
+ """Forward step."""
66
+ timers = get_timers()
67
+
68
+ # Get the batch.
69
+ timers("batch-generator", log_level=2).start()
70
+ (
71
+ images,
72
+ labels,
73
+ ) = get_batch(data_iterator)
74
+ timers("batch-generator").stop()
75
+
76
+ # Forward model. lm_labels
77
+ output_tensor = model(images)
78
+
79
+ return output_tensor, partial(loss_func, labels)
80
+
81
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
82
+ """Build train, valid, and test datasets."""
83
+ args = get_args()
84
+
85
+ print_rank_0(
86
+ "> building train, validation, and test datasets " "for VIT ..."
87
+ )
88
+ train_ds, valid_ds = build_train_valid_datasets(
89
+ data_path=args.data_path,
90
+ image_size=(args.img_h, args.img_w)
91
+ )
92
+ print_rank_0("> finished creating VIT datasets ...")
93
+
94
+ return train_ds, valid_ds, None
95
+
96
+
97
+ if __name__ == "__main__":
98
+
99
+ pretrain(
100
+ train_valid_test_datasets_provider,
101
+ model_provider,
102
+ ModelType.encoder_or_decoder,
103
+ forward_step,
104
+ args_defaults={'dataloader_type': 'cyclic', 'vision_pretraining': True}
105
+ )
docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_vision_dino.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+ import torch.nn as nn
6
+ import numpy as np
7
+ import torch.distributed as dist
8
+ from functools import partial
9
+ from megatron import get_args, get_timers, print_rank_0
10
+ from megatron.core.enums import ModelType
11
+ from megatron.data.vit_dataset import build_train_valid_datasets
12
+ from megatron.model.vision.dino import DINOPretrainModel
13
+ from megatron.model.vision.knn_monitor import knn_predict, get_feature_bank
14
+ from megatron.training import pretrain
15
+ from megatron.utils import average_losses_across_data_parallel_group, unwrap_model
16
+ from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
17
+ from megatron.model import DistributedDataParallel as LocalDDP
18
+ from megatron.model import Float16Module
19
+ from megatron.arguments import core_transformer_config_from_args
20
+
21
+ def model_provider(pre_process=True, post_process=True):
22
+ """Build the model."""
23
+ config = core_transformer_config_from_args(get_args())
24
+ return DINOPretrainModel(config, pre_process=pre_process, post_process=post_process)
25
+
26
+ def get_batch(data_iterator):
27
+ """Build the batch."""
28
+ data = next(data_iterator)
29
+
30
+ # only data parallelism; no need for broadcast
31
+ if isinstance(data[0], list):
32
+ images = [aug.cuda() for aug in data[0]]
33
+ else:
34
+ images = data[0].cuda()
35
+ labels = data[1].cuda()
36
+
37
+ return images, labels
38
+
39
+
40
+ def loss_func(model, labels, output_tensor, collect_data=False):
41
+ args = get_args()
42
+
43
+ model = unwrap_model(
44
+ model,
45
+ (torchDDP, LocalDDP, Float16Module)
46
+ )
47
+ if model.training:
48
+ student_output, teacher_output = output_tensor
49
+ loss = model.dino_loss(student_output, teacher_output, args.curr_iteration)
50
+ averaged_loss = average_losses_across_data_parallel_group([loss])
51
+ return loss, {"loss": averaged_loss[0]}
52
+ else:
53
+ _, teacher_feature = output_tensor
54
+ feature_bank, feature_labels, classes = get_feature_bank()
55
+ feature = F.normalize(teacher_feature.float(), dim=1)
56
+
57
+ knn_accs = []
58
+ for k in [10, 20, 100, 200]:
59
+ pred_labels = knn_predict(feature, feature_bank,
60
+ feature_labels, classes, k, 0.07)
61
+ knn_acc = (pred_labels[:, 0] == labels).float().mean()
62
+ knn_accs.append(knn_acc)
63
+
64
+ averaged_loss = average_losses_across_data_parallel_group(knn_accs)
65
+ return 0, {"knn_acc_10": averaged_loss[0],
66
+ "knn_acc_20": averaged_loss[1],
67
+ "knn_acc_100": averaged_loss[2],
68
+ "knn_acc_200": averaged_loss[3]}
69
+
70
+
71
+ def forward_step(data_iterator, model):
72
+ """Forward step."""
73
+ timers = get_timers()
74
+
75
+ # Get the batch.
76
+ timers("batch-generator", log_level=2).start()
77
+ (
78
+ images,
79
+ labels,
80
+ ) = get_batch(data_iterator)
81
+ timers("batch-generator").stop()
82
+
83
+ return model(images), partial(loss_func, model, labels)
84
+
85
+
86
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
87
+ """Build train, valid, and test datasets."""
88
+ args = get_args()
89
+
90
+ print_rank_0(
91
+ "> building train, validation, and test datasets " "for VIT ..."
92
+ )
93
+ train_ds, valid_ds = build_train_valid_datasets(
94
+ data_path=args.data_path,
95
+ image_size=(args.img_h, args.img_w)
96
+ )
97
+ print_rank_0("> finished creating VIT datasets ...")
98
+
99
+ return train_ds, valid_ds, None
100
+
101
+
102
+ if __name__ == "__main__":
103
+ pretrain(
104
+ train_valid_test_datasets_provider,
105
+ model_provider,
106
+ ModelType.encoder_or_decoder,
107
+ forward_step,
108
+ args_defaults={'dataloader_type': 'cyclic', 'vision_pretraining': True}
109
+ )
110
+
docker/intel_code/llama13b/Megatron-DeepSpeed/pretrain_vision_inpaint.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Pretrain VIT"""
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from functools import partial
8
+ from megatron import get_args, get_timers, print_rank_0, print_rank_last
9
+ from megatron.core.enums import ModelType
10
+ from megatron.data.vit_dataset import build_train_valid_datasets
11
+ from megatron.model.vision.inpainting import VitInpaintingModel
12
+ from megatron.model.vision.inpainting import MitInpaintingModel
13
+ from megatron.training import pretrain
14
+ from megatron.utils import average_losses_across_data_parallel_group
15
+ from tasks.vision.metrics import SSIM, PSNR
16
+ from megatron.arguments import core_transformer_config_from_args
17
+
18
+ def model_provider(pre_process=True, post_process=True):
19
+ """Build the model."""
20
+ args = get_args()
21
+ config = core_transformer_config_from_args(args)
22
+ if args.vision_backbone_type == 'vit':
23
+ model = VitInpaintingModel(config,
24
+ pre_process=pre_process,
25
+ post_process=post_process)
26
+ elif args.vision_backbone_type == 'mit':
27
+ model = MitInpaintingModel(pre_process=pre_process,
28
+ post_process=post_process)
29
+ else:
30
+ raise Exception('{} vision backbone is not supported.'.format(
31
+ args.vision_backbone_type))
32
+ return model
33
+
34
+
35
+ def get_batch(data_iterator):
36
+ """Build the batch."""
37
+ data = next(data_iterator)
38
+
39
+ # only data parallelism; no need for broadcast
40
+ images = data[0][0].cuda()
41
+ masks = data[0][1].cuda()
42
+ return images, masks
43
+
44
+
45
+ def loss_func(images, masks, masked_images, outputs, collect_data=False):
46
+ outputs = outputs.contiguous().float()
47
+ masks_flip = 1-masks
48
+ flip_masked_outputs = outputs.masked_fill(masks_flip.bool(), 0)
49
+ flip_masked_images = images.masked_fill(masks_flip.bool(), 0)
50
+
51
+ ssim_fun = SSIM()
52
+ psnr_fun = PSNR()
53
+
54
+ if not collect_data:
55
+ mask_count = torch.count_nonzero(masks)
56
+ loss = F.mse_loss(
57
+ flip_masked_outputs,
58
+ flip_masked_images.float(),
59
+ reduction="sum"
60
+ )
61
+ loss = loss/mask_count
62
+ ssim = ssim_fun(flip_masked_outputs, flip_masked_images.float())
63
+ psnr = psnr_fun(flip_masked_outputs, flip_masked_images.float())
64
+
65
+ averaged_loss = average_losses_across_data_parallel_group(
66
+ [loss, psnr, ssim]
67
+ )
68
+
69
+ return loss, {"loss": averaged_loss[0],
70
+ "psnr": averaged_loss[1],
71
+ 'ssim': averaged_loss[2]}
72
+ else:
73
+ synth_images = masked_images.float() + flip_masked_outputs
74
+ ssim = ssim_fun(synth_images, images.float())
75
+ psnr = psnr_fun(synth_images, images.float())
76
+ return torch.cat((images, masked_images, synth_images), dim=2), ssim, psnr
77
+
78
+
79
+ def forward_step(data_iterator, model):
80
+ """Forward step."""
81
+ timers = get_timers()
82
+
83
+ # Get the batch.
84
+ timers("batch-generator", log_level=2).start()
85
+ (
86
+ images,
87
+ masks,
88
+ ) = get_batch(data_iterator)
89
+ timers("batch-generator").stop()
90
+
91
+ masked_images = images.masked_fill(masks.bool(), 0)
92
+ outputs = model(masked_images)
93
+
94
+ # Forward mode
95
+ return outputs, partial(loss_func, images, masks, masked_images)
96
+
97
+
98
+ def process_non_loss_data(data, iteration, writer):
99
+ psnr_sum = 0
100
+ ssim_sum = 0
101
+ for (output_tb, ssim, psnr) in data:
102
+ output_tb[output_tb < 0] = 0
103
+ output_tb[output_tb > 1] = 1
104
+ writer.add_images("gt-input-output-vald", output_tb,
105
+ global_step=iteration, walltime=None,
106
+ dataformats='NCHW')
107
+ psnr_sum = psnr_sum + psnr.item()
108
+ ssim_sum = ssim_sum + ssim.item()
109
+ psnr = psnr_sum/len(data)
110
+ ssim = ssim_sum/len(data)
111
+ writer.add_scalar('PSNR generate value-validation', psnr, iteration)
112
+ writer.add_scalar('SSIM generate value-validation', ssim, iteration)
113
+
114
+
115
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
116
+ """Build train, valid, and test datasets."""
117
+ args = get_args()
118
+
119
+ print_rank_0(
120
+ "> building train, validation, and test datasets " "for VIT ..."
121
+ )
122
+ train_ds, valid_ds = build_train_valid_datasets(
123
+ data_path=args.data_path,
124
+ image_size=(args.img_h, args.img_w)
125
+ )
126
+ print_rank_0("> finished creating VIT datasets ...")
127
+
128
+ return train_ds, valid_ds, None
129
+
130
+
131
+ if __name__ == "__main__":
132
+
133
+ pretrain(
134
+ train_valid_test_datasets_provider,
135
+ model_provider,
136
+ ModelType.encoder_or_decoder,
137
+ forward_step,
138
+ process_non_loss_data,
139
+ args_defaults={'dataloader_type': 'cyclic', 'vision_pretraining': True}
140
+ )
docker/intel_code/llama13b/Megatron-DeepSpeed/setup.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+
3
+ """Setup for pip package."""
4
+
5
+ import importlib.util
6
+ import os
7
+ import setuptools
8
+
9
+ spec = importlib.util.spec_from_file_location('package_info', 'megatron/core/package_info.py')
10
+ package_info = importlib.util.module_from_spec(spec)
11
+ spec.loader.exec_module(package_info)
12
+
13
+
14
+ __contact_emails__ = package_info.__contact_emails__
15
+ __contact_names__ = package_info.__contact_names__
16
+ __description__ = package_info.__description__
17
+ __download_url__ = package_info.__download_url__
18
+ __homepage__ = package_info.__homepage__
19
+ __keywords__ = package_info.__keywords__
20
+ __license__ = package_info.__license__
21
+ __package_name__ = package_info.__package_name__
22
+ __repository_url__ = package_info.__repository_url__
23
+ __version__ = package_info.__version__
24
+
25
+
26
+ if os.path.exists('megatron/core/README.md'):
27
+ with open("megatron/core/README.md", "r", encoding='utf-8') as fh:
28
+ long_description = fh.read()
29
+ long_description_content_type = "text/markdown"
30
+
31
+ else:
32
+ long_description = 'See ' + __homepage__
33
+ long_description_content_type = "text/plain"
34
+
35
+
36
+ ###############################################################################
37
+ # Dependency Loading #
38
+ # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
39
+
40
+ def req_file(filename, folder="megatron/core"):
41
+ with open(os.path.join(folder, filename), encoding='utf-8') as f:
42
+ content = f.readlines()
43
+ # you may also want to remove whitespace characters
44
+ # Example: `\n` at the end of each line
45
+ return [x.strip() for x in content]
46
+
47
+ install_requires = req_file("requirements.txt")
48
+
49
+ ###############################################################################
50
+
51
+ setuptools.setup(
52
+ name=__package_name__,
53
+ # Versions should comply with PEP440. For a discussion on single-sourcing
54
+ # the version across setup.py and the project code, see
55
+ # https://packaging.python.org/en/latest/single_source_version.html
56
+ version=__version__,
57
+ description=__description__,
58
+ long_description=long_description,
59
+ long_description_content_type=long_description_content_type,
60
+ # The project's main homepage.
61
+ url=__repository_url__,
62
+ download_url=__download_url__,
63
+ # Author details
64
+ author=__contact_names__,
65
+ author_email=__contact_emails__,
66
+ # maintainer Details
67
+ maintainer=__contact_names__,
68
+ maintainer_email=__contact_emails__,
69
+ # The licence under which the project is released
70
+ license=__license__,
71
+ classifiers=[
72
+ # How mature is this project? Common values are
73
+ # 1 - Planning
74
+ # 2 - Pre-Alpha
75
+ # 3 - Alpha
76
+ # 4 - Beta
77
+ # 5 - Production/Stable
78
+ # 6 - Mature
79
+ # 7 - Inactive
80
+ 'Development Status :: 5 - Production/Stable',
81
+ # Indicate who your project is intended for
82
+ 'Intended Audience :: Developers',
83
+ 'Intended Audience :: Science/Research',
84
+ 'Intended Audience :: Information Technology',
85
+ # Indicate what your project relates to
86
+ 'Topic :: Scientific/Engineering',
87
+ 'Topic :: Scientific/Engineering :: Mathematics',
88
+ 'Topic :: Scientific/Engineering :: Image Recognition',
89
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
90
+ 'Topic :: Software Development :: Libraries',
91
+ 'Topic :: Software Development :: Libraries :: Python Modules',
92
+ 'Topic :: Utilities',
93
+ # Pick your license as you wish (should match "license" above)
94
+ 'License :: OSI Approved :: BSD License',
95
+ # Supported python versions
96
+ 'Programming Language :: Python :: 3',
97
+ 'Programming Language :: Python :: 3.8',
98
+ 'Programming Language :: Python :: 3.9',
99
+ # Additional Setting
100
+ 'Environment :: Console',
101
+ 'Natural Language :: English',
102
+ 'Operating System :: OS Independent',
103
+ ],
104
+ packages=setuptools.find_packages(),
105
+ install_requires=install_requires,
106
+
107
+ # Add in any packaged data.
108
+ include_package_data=True,
109
+ # PyPI package information.
110
+ keywords=__keywords__,
111
+ )