Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/.gitignore +1 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/5-cross-validation.sh +14 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/LICENSE +203 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/README.md +316 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/bf16_config/unet.json +85 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/dataset/data_loader.py +234 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/dataset/preprocess_data.py +160 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/dataset/transforms.py +218 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/main.py +114 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/model/layers.py +134 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/model/losses.py +89 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/model/model_fn.py +89 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/model/unet3d.py +82 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/requirements.txt +4 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/runtime/arguments.py +166 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/runtime/hooks.py +138 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/runtime/parse_results.py +66 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/runtime/setup.py +126 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/.gitignore +2 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/LICENSE +31 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/NOTICE +17 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/README.md +300 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/bf16_config/unet.json +85 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/data_loading/data_loader.py +249 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/download_dataset.py +41 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/model/layers.py +206 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/model/unet.py +61 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/requirements.txt +5 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/arguments.py +214 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/losses.py +49 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/parse_results.py +82 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/run.py +241 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/setup.py +94 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/train_and_evaluate.sh +38 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/unet2d.py +93 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/LICENSE +192 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/README.md +184 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/config.py +30 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/models/__init__.py +0 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/models/models.py +381 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/requirements.txt +3 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/train.py +297 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/utils/__init__.py +0 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/utils/dataset.py +134 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/utils/distribution_utils.py +61 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/utils/image_processing.py +468 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/vit_keras/__init__.py +1 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/vit_keras/layers.py +184 -0
- docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/vit_keras/utils.py +207 -0
.gitattributes
CHANGED
@@ -126,3 +126,4 @@ docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatr
|
|
126 |
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed_old/wandb/run-20240424_103821-kdru3pwo/logs/debug-internal.log filter=lfs diff=lfs merge=lfs -text
|
127 |
docker/intel_code/llama13b/Model-References/PyTorch/examples/computer_vision/data/MNIST/raw/t10k-images-idx3-ubyte filter=lfs diff=lfs merge=lfs -text
|
128 |
docker/intel_code/llama13b/Model-References/PyTorch/examples/computer_vision/data/MNIST/raw/train-images-idx3-ubyte filter=lfs diff=lfs merge=lfs -text
|
|
|
|
126 |
docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed_old/wandb/run-20240424_103821-kdru3pwo/logs/debug-internal.log filter=lfs diff=lfs merge=lfs -text
|
127 |
docker/intel_code/llama13b/Model-References/PyTorch/examples/computer_vision/data/MNIST/raw/t10k-images-idx3-ubyte filter=lfs diff=lfs merge=lfs -text
|
128 |
docker/intel_code/llama13b/Model-References/PyTorch/examples/computer_vision/data/MNIST/raw/train-images-idx3-ubyte filter=lfs diff=lfs merge=lfs -text
|
129 |
+
utilities/l3-routes filter=lfs diff=lfs merge=lfs -text
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
logs/*
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/5-cross-validation.sh
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
###############################################################################
|
2 |
+
# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
|
3 |
+
###############################################################################
|
4 |
+
#
|
5 |
+
# This script runs 5-fold cross-validation of UNet3D topology for 16000 iterations on single HLS
|
6 |
+
# Usage:
|
7 |
+
# bash 5-cross-validation.sh <path/to/dataset> <path/for/results> <batch size> <data type>
|
8 |
+
mpirun --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 $PYTHON main.py --use_horovod --data_dir $1 --model_dir $2/fold_0 --log_dir $2/fold_0 --batch_size $3 --dtype $4 --fold 0 --tensorboard_logging --log_all_workers > $2/result_$4_fold0.log
|
9 |
+
mpirun --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 $PYTHON main.py --use_horovod --data_dir $1 --model_dir $2/fold_1 --log_dir $2/fold_1 --batch_size $3 --dtype $4 --fold 1 --tensorboard_logging --log_all_workers > $2/result_$4_fold1.log
|
10 |
+
mpirun --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 $PYTHON main.py --use_horovod --data_dir $1 --model_dir $2/fold_2 --log_dir $2/fold_2 --batch_size $3 --dtype $4 --fold 2 --tensorboard_logging --log_all_workers > $2/result_$4_fold2.log
|
11 |
+
mpirun --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 $PYTHON main.py --use_horovod --data_dir $1 --model_dir $2/fold_3 --log_dir $2/fold_3 --batch_size $3 --dtype $4 --fold 3 --tensorboard_logging --log_all_workers > $2/result_$4_fold3.log
|
12 |
+
mpirun --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 $PYTHON main.py --use_horovod --data_dir $1 --model_dir $2/fold_4 --log_dir $2/fold_4 --batch_size $3 --dtype $4 --fold 4 --tensorboard_logging --log_all_workers > $2/result_$4_fold4.log
|
13 |
+
|
14 |
+
$PYTHON runtime/parse_results.py --model_dir $2 --env result_$4
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/LICENSE
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2021 Habana Labs, Ltd. an Intel Company
|
2 |
+
|
3 |
+
Apache License
|
4 |
+
Version 2.0, January 2004
|
5 |
+
http://www.apache.org/licenses/
|
6 |
+
|
7 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
8 |
+
|
9 |
+
1. Definitions.
|
10 |
+
|
11 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
12 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
13 |
+
|
14 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
15 |
+
the copyright owner that is granting the License.
|
16 |
+
|
17 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
18 |
+
other entities that control, are controlled by, or are under common
|
19 |
+
control with that entity. For the purposes of this definition,
|
20 |
+
"control" means (i) the power, direct or indirect, to cause the
|
21 |
+
direction or management of such entity, whether by contract or
|
22 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
23 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
24 |
+
|
25 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
26 |
+
exercising permissions granted by this License.
|
27 |
+
|
28 |
+
"Source" form shall mean the preferred form for making modifications,
|
29 |
+
including but not limited to software source code, documentation
|
30 |
+
source, and configuration files.
|
31 |
+
|
32 |
+
"Object" form shall mean any form resulting from mechanical
|
33 |
+
transformation or translation of a Source form, including but
|
34 |
+
not limited to compiled object code, generated documentation,
|
35 |
+
and conversions to other media types.
|
36 |
+
|
37 |
+
"Work" shall mean the work of authorship, whether in Source or
|
38 |
+
Object form, made available under the License, as indicated by a
|
39 |
+
copyright notice that is included in or attached to the work
|
40 |
+
(an example is provided in the Appendix below).
|
41 |
+
|
42 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
43 |
+
form, that is based on (or derived from) the Work and for which the
|
44 |
+
editorial revisions, annotations, elaborations, or other modifications
|
45 |
+
represent, as a whole, an original work of authorship. For the purposes
|
46 |
+
of this License, Derivative Works shall not include works that remain
|
47 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
48 |
+
the Work and Derivative Works thereof.
|
49 |
+
|
50 |
+
"Contribution" shall mean any work of authorship, including
|
51 |
+
the original version of the Work and any modifications or additions
|
52 |
+
to that Work or Derivative Works thereof, that is intentionally
|
53 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
54 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
55 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
56 |
+
means any form of electronic, verbal, or written communication sent
|
57 |
+
to the Licensor or its representatives, including but not limited to
|
58 |
+
communication on electronic mailing lists, source code control systems,
|
59 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
60 |
+
Licensor for the purpose of discussing and improving the Work, but
|
61 |
+
excluding communication that is conspicuously marked or otherwise
|
62 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
63 |
+
|
64 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
65 |
+
on behalf of whom a Contribution has been received by Licensor and
|
66 |
+
subsequently incorporated within the Work.
|
67 |
+
|
68 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
69 |
+
this License, each Contributor hereby grants to You a perpetual,
|
70 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
71 |
+
copyright license to reproduce, prepare Derivative Works of,
|
72 |
+
publicly display, publicly perform, sublicense, and distribute the
|
73 |
+
Work and such Derivative Works in Source or Object form.
|
74 |
+
|
75 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
76 |
+
this License, each Contributor hereby grants to You a perpetual,
|
77 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
78 |
+
(except as stated in this section) patent license to make, have made,
|
79 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
80 |
+
where such license applies only to those patent claims licensable
|
81 |
+
by such Contributor that are necessarily infringed by their
|
82 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
83 |
+
with the Work to which such Contribution(s) was submitted. If You
|
84 |
+
institute patent litigation against any entity (including a
|
85 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
86 |
+
or a Contribution incorporated within the Work constitutes direct
|
87 |
+
or contributory patent infringement, then any patent licenses
|
88 |
+
granted to You under this License for that Work shall terminate
|
89 |
+
as of the date such litigation is filed.
|
90 |
+
|
91 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
92 |
+
Work or Derivative Works thereof in any medium, with or without
|
93 |
+
modifications, and in Source or Object form, provided that You
|
94 |
+
meet the following conditions:
|
95 |
+
|
96 |
+
(a) You must give any other recipients of the Work or
|
97 |
+
Derivative Works a copy of this License; and
|
98 |
+
|
99 |
+
(b) You must cause any modified files to carry prominent notices
|
100 |
+
stating that You changed the files; and
|
101 |
+
|
102 |
+
(c) You must retain, in the Source form of any Derivative Works
|
103 |
+
that You distribute, all copyright, patent, trademark, and
|
104 |
+
attribution notices from the Source form of the Work,
|
105 |
+
excluding those notices that do not pertain to any part of
|
106 |
+
the Derivative Works; and
|
107 |
+
|
108 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
109 |
+
distribution, then any Derivative Works that You distribute must
|
110 |
+
include a readable copy of the attribution notices contained
|
111 |
+
within such NOTICE file, excluding those notices that do not
|
112 |
+
pertain to any part of the Derivative Works, in at least one
|
113 |
+
of the following places: within a NOTICE text file distributed
|
114 |
+
as part of the Derivative Works; within the Source form or
|
115 |
+
documentation, if provided along with the Derivative Works; or,
|
116 |
+
within a display generated by the Derivative Works, if and
|
117 |
+
wherever such third-party notices normally appear. The contents
|
118 |
+
of the NOTICE file are for informational purposes only and
|
119 |
+
do not modify the License. You may add Your own attribution
|
120 |
+
notices within Derivative Works that You distribute, alongside
|
121 |
+
or as an addendum to the NOTICE text from the Work, provided
|
122 |
+
that such additional attribution notices cannot be construed
|
123 |
+
as modifying the License.
|
124 |
+
|
125 |
+
You may add Your own copyright statement to Your modifications and
|
126 |
+
may provide additional or different license terms and conditions
|
127 |
+
for use, reproduction, or distribution of Your modifications, or
|
128 |
+
for any such Derivative Works as a whole, provided Your use,
|
129 |
+
reproduction, and distribution of the Work otherwise complies with
|
130 |
+
the conditions stated in this License.
|
131 |
+
|
132 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
133 |
+
any Contribution intentionally submitted for inclusion in the Work
|
134 |
+
by You to the Licensor shall be under the terms and conditions of
|
135 |
+
this License, without any additional terms or conditions.
|
136 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
137 |
+
the terms of any separate license agreement you may have executed
|
138 |
+
with Licensor regarding such Contributions.
|
139 |
+
|
140 |
+
6. Trademarks. This License does not grant permission to use the trade
|
141 |
+
names, trademarks, service marks, or product names of the Licensor,
|
142 |
+
except as required for reasonable and customary use in describing the
|
143 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
144 |
+
|
145 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
146 |
+
agreed to in writing, Licensor provides the Work (and each
|
147 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
148 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
149 |
+
implied, including, without limitation, any warranties or conditions
|
150 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
151 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
152 |
+
appropriateness of using or redistributing the Work and assume any
|
153 |
+
risks associated with Your exercise of permissions under this License.
|
154 |
+
|
155 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
156 |
+
whether in tort (including negligence), contract, or otherwise,
|
157 |
+
unless required by applicable law (such as deliberate and grossly
|
158 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
159 |
+
liable to You for damages, including any direct, indirect, special,
|
160 |
+
incidental, or consequential damages of any character arising as a
|
161 |
+
result of this License or out of the use or inability to use the
|
162 |
+
Work (including but not limited to damages for loss of goodwill,
|
163 |
+
work stoppage, computer failure or malfunction, or any and all
|
164 |
+
other commercial damages or losses), even if such Contributor
|
165 |
+
has been advised of the possibility of such damages.
|
166 |
+
|
167 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
168 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
169 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
170 |
+
or other liability obligations and/or rights consistent with this
|
171 |
+
License. However, in accepting such obligations, You may act only
|
172 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
173 |
+
of any other Contributor, and only if You agree to indemnify,
|
174 |
+
defend, and hold each Contributor harmless for any liability
|
175 |
+
incurred by, or claims asserted against, such Contributor by reason
|
176 |
+
of your accepting any such warranty or additional liability.
|
177 |
+
|
178 |
+
END OF TERMS AND CONDITIONS
|
179 |
+
|
180 |
+
APPENDIX: How to apply the Apache License to your work.
|
181 |
+
|
182 |
+
To apply the Apache License to your work, attach the following
|
183 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
184 |
+
replaced with your own identifying information. (Don't include
|
185 |
+
the brackets!) The text should be enclosed in the appropriate
|
186 |
+
comment syntax for the file format. We also recommend that a
|
187 |
+
file or class name and description of purpose be included on the
|
188 |
+
same "printed page" as the copyright notice for easier
|
189 |
+
identification within third-party archives.
|
190 |
+
|
191 |
+
Copyright [yyyy] [name of copyright owner]
|
192 |
+
|
193 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
194 |
+
you may not use this file except in compliance with the License.
|
195 |
+
You may obtain a copy of the License at
|
196 |
+
|
197 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
198 |
+
|
199 |
+
Unless required by applicable law or agreed to in writing, software
|
200 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
201 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
202 |
+
See the License for the specific language governing permissions and
|
203 |
+
limitations under the License.
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/README.md
ADDED
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# UNet3D for TensorFlow 2
|
2 |
+
|
3 |
+
This directory provides a script and recipe to train a 3D-UNet medical image segmentation model for TensorFlow 2.x to achieve state of the art accuracy, and is tested and maintained by Habana. For further information on performance, refer to [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-training-models/#performance).
|
4 |
+
|
5 |
+
For further information on training deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/).
|
6 |
+
|
7 |
+
## Table of Contents
|
8 |
+
|
9 |
+
* [Model-References](../../../README.md)
|
10 |
+
* [Model overview](#model-overview)
|
11 |
+
* [Setup](#setup)
|
12 |
+
* [Training and Examples](#training-and-examples)
|
13 |
+
* [Advanced](#advanced)
|
14 |
+
* [Supported Configuration](#supported-configuration)
|
15 |
+
* [Changelog](#changelog)
|
16 |
+
|
17 |
+
## Model Overview
|
18 |
+
|
19 |
+
The U-Net model is a convolutional neural network for 3D image segmentation. This repository contains a UNet3D implementation introduced in [3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation](https://arxiv.org/pdf/1606.06650), with modifications described in [No New-Net](https://arxiv.org/pdf/1809.10483). It is based on [UNet3D Medical Image Segmentation for TensorFlow 1.x](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_3D_Medical) repository.
|
20 |
+
|
21 |
+
### Model Architecture
|
22 |
+
|
23 |
+
UNet3D was first introduced by Olaf Ronneberger, Philip Fischer and Thomas Brox in [UNet3D Learning Dense Volumetric Segmentation from Sparse Annotation](https://arxiv.org/pdf/1606.06650). In this repository we host a UNet3D version adapted by Fabian Isensee et al. to brain tumor segmentation. UNet3D allows for seamless segmentation of 3D volumes, with high accuracy and performance, and can be adapted to solve many different segmentation problems.
|
24 |
+
|
25 |
+
The following figure shows the construction of the UNet3D model and its different components. UNet3D is composed of a contractive and an expanding path, that aims at building a bottleneck in its centermost part through a combination of convolution and pooling operations. After this bottleneck, the image is reconstructed through a combination of convolutions and upsampling. Skip connections are added with the goal of helping the backward flow of gradients in order to improve the training.
|
26 |
+
|
27 |
+

|
28 |
+
|
29 |
+
UNet3D consists of a contractive (left-side) and expanding (right-side) path. It repeatedly applies un-padded convolutions followed by max pooling for downsampling. Every step in the expanding path consists of an upsampling of the feature maps and a concatenation with the correspondingly cropped feature map from the contractive path.
|
30 |
+
|
31 |
+
### Model Changes
|
32 |
+
|
33 |
+
The following are the major changes that were implemented to the original model from [UNet3D Medical Image Segmentation for TensorFlow 1.x](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_3D_Medical):
|
34 |
+
|
35 |
+
* Changed some scripts to run the model on Gaudi. This includes loading habana tensorflow modules.
|
36 |
+
* Converted all the scripts to Tensorflow 2.x version.
|
37 |
+
* Added support for using bfloat16 precision instead of float16.
|
38 |
+
* Added further TensorBoard and performance logging options.
|
39 |
+
* Removed GPU specific files (examples/*, Dockerfile etc.) and some unused codes.
|
40 |
+
* Added further synthetic data and tensor dumping options.
|
41 |
+
* Enabled the tf.data.experimental.prefetch_to_device for HPU device to improve performance.
|
42 |
+
|
43 |
+
### Default Configuration
|
44 |
+
|
45 |
+
- Execution mode: train and evaluate
|
46 |
+
- Batch size: 2
|
47 |
+
- Data type: bfloat16
|
48 |
+
- Maximum number of steps: 16000
|
49 |
+
- Learning rate: 0.0002
|
50 |
+
- Loss: dice+ce
|
51 |
+
- Normalization block: instancenorm
|
52 |
+
- Include background in predictions and labels: False
|
53 |
+
- Number of Horovod workers (HPUs): 1
|
54 |
+
- Data augmentation: True
|
55 |
+
- Using XLA: True
|
56 |
+
- Resume training from checkpoint: False
|
57 |
+
- Logging losses and performance every N steps: 100
|
58 |
+
- Tensorboard logging: False
|
59 |
+
- Logging data from every worker: False
|
60 |
+
|
61 |
+
## Setup
|
62 |
+
|
63 |
+
Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/GAUDI_Installation_Guide.html) to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/TensorFlow/Model_Optimization_TensorFlow/Optimization_Training_Platform.html).
|
64 |
+
The guides will walk you through the process of setting up your system to run the model on Gaudi.
|
65 |
+
|
66 |
+
### Clone Habana Model-References
|
67 |
+
|
68 |
+
In the docker container, clone this repository and switch to the branch that matches your SynapseAI version. You can run the [`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) utility to determine the SynapseAI version.
|
69 |
+
|
70 |
+
```bash
|
71 |
+
git clone https://github.com/HabanaAI/Model-References /root/Model-References
|
72 |
+
cd /root/Model-References/TensorFlow/computer_vision/UNet3D
|
73 |
+
```
|
74 |
+
|
75 |
+
**Note:** If Model-References repository path is not in the PYTHONPATH, make sure you update it:
|
76 |
+
```bash
|
77 |
+
export PYTHONPATH=$PYTHONPATH:/root/Model-References
|
78 |
+
```
|
79 |
+
|
80 |
+
### Download and Pre-process the Dataset
|
81 |
+
|
82 |
+
1. To obtain the dataset, register on [Brain Tumor Segmentation 2019 dataset](https://www.med.upenn.edu/cbica/brats-2019/) website. The data should be downloaded and placed where `/dataset` in the container is mounted. The `dataset/preprocess_data.py` script will convert the raw data into tfrecord format used for training and evaluation.
|
83 |
+
|
84 |
+
2. To launch the script, run:
|
85 |
+
|
86 |
+
```bash
|
87 |
+
$PYTHON dataset/preprocess_data.py -i /dataset -o /dataset_preprocessed -v
|
88 |
+
```
|
89 |
+
|
90 |
+
### Install Model Requirements
|
91 |
+
|
92 |
+
1. In the docker container, go to the UNet3D directory:
|
93 |
+
|
94 |
+
```bash
|
95 |
+
cd /root/Model-References/TensorFlow/computer_vision/UNet3D
|
96 |
+
```
|
97 |
+
|
98 |
+
2. Install the required packages using pip:
|
99 |
+
|
100 |
+
```bash
|
101 |
+
$PYTHON -m pip install -r requirements.txt
|
102 |
+
```
|
103 |
+
|
104 |
+
## Training and Examples
|
105 |
+
|
106 |
+
### Single Card and Multi-Card Training Examples
|
107 |
+
|
108 |
+
**Run training on 1 HPU:**
|
109 |
+
|
110 |
+
```bash
|
111 |
+
$PYTHON main.py --data_dir <path/to/dataset> --dtype <precision> --model_dir <path/to/model_dir> --log_dir <path/to/log_dir> --tensorboard_logging
|
112 |
+
```
|
113 |
+
|
114 |
+
Run training on 1 HPU with batch size 2, bfloat16 precision and fold 0:
|
115 |
+
|
116 |
+
```bash
|
117 |
+
$PYTHON main.py --data_dir /dataset_preprocessed --dtype bf16 --model_dir /tmp/unet3d_1_hpu --log_dir /tmp/unet3d_1_hpu --tensorboard_logging
|
118 |
+
```
|
119 |
+
|
120 |
+
**Run training on 8 HPUs:**
|
121 |
+
|
122 |
+
**NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/TensorFlow/Tensorflow_Scaling_Guide/Horovod_Scaling/index.html#mpirun-configuration).
|
123 |
+
|
124 |
+
Run training on 8 HPUs via mpirun with batch size 2, bfloat16 precision and fold 0:
|
125 |
+
|
126 |
+
```bash
|
127 |
+
mpirun --allow-run-as-root --bind-to core --map-by socket:PE=6 --np 8 \
|
128 |
+
$PYTHON main.py --use_horovod --data_dir /dataset_preprocessed --dtype bf16 --model_dir /tmp/unet3d_8_hpus --log_dir /tmp/unet3d_8_hpus --tensorboard_logging --log_all_workers
|
129 |
+
```
|
130 |
+
**Run 5-fold Cross-Validation and compute average dice score:**
|
131 |
+
|
132 |
+
All the commands described above will train and evaluate the model on the dataset with fold 0. To perform 5-fold-cross-validation on the dataset and compute average dice score across 5 folds, the user can execute training script 5 times and calculate the average dice score manually or run bash script `5-cross-validation.sh`:
|
133 |
+
|
134 |
+
```bash
|
135 |
+
bash 5-cross-validation.sh <path/to/dataset> <path/for/results> <batch_size> <precision>
|
136 |
+
```
|
137 |
+
|
138 |
+
Run training on 8 HPUs 5-fold-cross-validation with batch size 2 and bfloat16 precision:
|
139 |
+
|
140 |
+
```bash
|
141 |
+
bash 5-cross-validation.sh /dataset_preprocessed /tmp/unet3d_8_hpus 2 bf16
|
142 |
+
```
|
143 |
+
## Advanced
|
144 |
+
|
145 |
+
The following sections provide further details on the dataset, running training and inference, and the training results.
|
146 |
+
|
147 |
+
### Scripts and Sample Code
|
148 |
+
|
149 |
+
In the root directory, the most important files are:
|
150 |
+
* `main.py`: Serves as the entry point to the application. Encapsulates the training routine.
|
151 |
+
* `requirements.txt`: Set of extra requirements for running U-Net.
|
152 |
+
|
153 |
+
The `dataset/` folder contains the necessary tools to train and perform inference using U-Net. Its main components are:
|
154 |
+
* `data_loader.py`: Implements the data loading and augmentation.
|
155 |
+
* `transforms.py`: Implements the data augmentation functions.
|
156 |
+
* `preprocess_data.py`: Implements the data conversion and pre-processing functionality.
|
157 |
+
|
158 |
+
The `runtime/` folder contains scripts with training and inference logic. Its contents are:
|
159 |
+
* `arguments.py`: Implements the command-line arguments parsing.
|
160 |
+
* `hooks.py`: Collects different metrics to be used for benchmarking and testing.
|
161 |
+
* `parse_results.py`: Defines a set of functions used for parsing the partial results.
|
162 |
+
* `setup.py`: Defines a set of functions to set the environment up.
|
163 |
+
|
164 |
+
The `model/` folder contains information about the building blocks of UNet3D and the way they are assembled. Its contents are:
|
165 |
+
* `layers.py`: Defines the different blocks that are used to assemble UNet3D.
|
166 |
+
* `losses.py`: Defines the different losses used during training and evaluation.
|
167 |
+
* `model_fn.py`: Defines the computational graph to optimize.
|
168 |
+
* `unet3d.py`: Defines the model architecture using the blocks from the `layers.py` file.
|
169 |
+
|
170 |
+
Other folders included in the root directory are:
|
171 |
+
* `images/`: Contains the model diagram
|
172 |
+
|
173 |
+
### Parameters
|
174 |
+
|
175 |
+
The complete list of the available parameters for the `main.py` script contains the following:
|
176 |
+
* `--exec_mode`: Select the execution mode to run the model (default: `train_and_evaluate`). The available modes:
|
177 |
+
* `train` - Trains the model and stores checkpoints in the directory passed using `--model_dir`
|
178 |
+
* `evaluate` - Loads the checkpoint (if available) and performs evaluation on validation subset (requires `--fold` other than `None`).
|
179 |
+
* `train_and_evaluate` - Trains the model from scratch and performs validation at the end (requires `--fold` other than `None`).
|
180 |
+
* `predict` - Loads the checkpoint (if available) and runs inference on the test set. Stores the results in the `--model_dir` directory.
|
181 |
+
* `train_and_predict` - Trains the model from scratch and performs inference.
|
182 |
+
* `--model_dir`: Set the output directory for information related to the model.
|
183 |
+
* `--log_dir`: Set the output directory for logs (default: `/tmp/unet3d_logs`).
|
184 |
+
* `--data_dir`: Set the input directory containing the preprocessed dataset.
|
185 |
+
* `--batch_size`: Size of each minibatch per device (default: `2`).
|
186 |
+
* `--dtype`: Set precision to be used in model on HPU: fp32/bf16 (default: `bf16`).
|
187 |
+
* `--bf16_config_path`: Path to custom mixed precision config to be used (default: `./bf16_config/unet.json`).
|
188 |
+
* `--fold`: Selected fold for cross-validation (default: `0`).
|
189 |
+
* `--num_folds`: Number of folds in k-cross-validation of dataset (default: `5`).
|
190 |
+
* `--max_steps`: Maximum number of steps (batches) for training (default: `16000`).
|
191 |
+
* `--seed`: Set random seed for reproducibility (default: `None`).
|
192 |
+
* `--log_every`: Log performance every n steps (default: `100`).
|
193 |
+
* `--learning_rate`: Model’s learning rate (default: `0.0002`).
|
194 |
+
* `--loss`: Loss function to be used during training (default: `dice+ce`).
|
195 |
+
* `--normalization`: Normalization block to be applied in the model (default: `instancenorm`).
|
196 |
+
* `--include_background`: Include background both in preditions and labels (default: `False`).
|
197 |
+
* `--no-augment`: Disable data augmentation (enabled by default).
|
198 |
+
* `--benchmark`: Enable performance benchmarking (disabled by default). If the flag is set, the script runs in a benchmark mode - each iteration is timed and the performance result (in images per second) is printed at the end. Works for both `train` and `predict` execution modes.
|
199 |
+
* `--warmup_steps`: Used during benchmarking - the number of steps to skip (default: `40`). First iterations are usually much slower since the graph is being constructed. Skipping the initial iterations is required for a fair performance assessment.
|
200 |
+
* `--resume_training`: Whether to resume training from a checkpoint, if there is one (disabled by default).
|
201 |
+
* `--no_xla`: Disable accelerated linear algebra optimization (enabled by default).
|
202 |
+
* `--use_amp`: Enable automatic mixed precision for GPU (disabled by default).
|
203 |
+
* `--no_hpu`: Disable execution on HPU, train on CPU/GPU (default: `False`).
|
204 |
+
* `--dump_config`: Directory for dumping debug traces (default: `None`).
|
205 |
+
* `--synth_data`: Use deterministic and synthetic data (default: `False`).
|
206 |
+
* `--disable_ckpt_saving`: Disables saving checkpoints (default: `False`).
|
207 |
+
* `--use_horovod`: Enable horovod usage (default: `False`).
|
208 |
+
* `--tensorboard_logging`: Enable tensorboard logging (default: `False`).
|
209 |
+
* `--log_all_workers`: Enable logging data for every horovod worker in a separate directory named `worker_N` (default: `False`).
|
210 |
+
|
211 |
+
### Command Line Options
|
212 |
+
|
213 |
+
To see the full list of the available options and their descriptions, use the `-h` or `--help` command-line option, for example:
|
214 |
+
|
215 |
+
```bash
|
216 |
+
$PYTHON main.py --help
|
217 |
+
```
|
218 |
+
|
219 |
+
### Dataset Description
|
220 |
+
|
221 |
+
The UNet3D model was trained in the [Brain Tumor Segmentation 2019 dataset](https://www.med.upenn.edu/cbica/brats-2019/). Test images provided by the organization were used to produce the resulting masks for submission. Upon registration, the challenge's data is made available through the https//ipp.cbica.upenn.edu service.
|
222 |
+
|
223 |
+
The dataset consists of 335 240x240x155 `nifti` volumes. Each volume is represented by 4 modalities and a corresponding segmentation mask.
|
224 |
+
The following lists the modalities:
|
225 |
+
* Native T1-weighted (T1),
|
226 |
+
* Post-contrast T1-weighted (T1Gd),
|
227 |
+
* Native T2-weighted (T2),
|
228 |
+
* T2 Fluid Attenuated Inversion Recovery (FLAIR).
|
229 |
+
|
230 |
+
Each voxel in a segmentation mask belongs to one of four classes:
|
231 |
+
* 0 corresponds to healthy tissue or background,
|
232 |
+
* 1 indicates the presence of the necrotic and non-enhancing tumor core (TC),
|
233 |
+
* 2 indicates the presence of the peritumoral edema (ED),
|
234 |
+
* 4 indicates the presence of the GD-enhancing tumor (ET).
|
235 |
+
|
236 |
+
The objective is to produce a set of masks that segment the data as accurately as possible. The results are expected to be submitted as a 12-bit `nifti` 3D image, with values corresponding to the underlying class.
|
237 |
+
|
238 |
+
### Dataset Guidelines
|
239 |
+
|
240 |
+
The training and test datasets are given as 3D `nifti` volumes that can be read using the Nibabel library and NumPy.
|
241 |
+
|
242 |
+
Initially, all modalities are loaded, stacked and converted into 240x240x155x4 NumPy arrays using Nibabel. To decrease the size of the dataset, each volume is clipped to 85% of the maximal value, normalized to 255 for each modality separately, casted to 8-bit, grouped by 4 volumes, and saved as a `tfrecord` file. The process of converting from `nifti` to `tfrecord` can be found in the `preprocess_data.py` script.
|
243 |
+
|
244 |
+
The `tfrecord` files are fed to the model through `tf.data.TFRecordDataset()` to achieve high performance.
|
245 |
+
|
246 |
+
The foreground voxel intensities then z-score normalized, whereas labels are one-hot encoded for their later use in dice or pixel-wise cross-entropy loss, becoming 240x240x155x4 tensors.
|
247 |
+
|
248 |
+
If the augmentation is enabled, the following set of augmentation techniques are applied:
|
249 |
+
* Random horizontal flipping
|
250 |
+
* Random 128x128x128x4 crop
|
251 |
+
* Random brightness shifting
|
252 |
+
|
253 |
+
In addition, random vertical flip and random gamma correction augmentations were implemented, but are not used. The process of loading, normalizing and augmenting the data contained in the dataset can be found in the `data_loader.py` script.
|
254 |
+
|
255 |
+
#### Multi-dataset
|
256 |
+
|
257 |
+
This implementation is tuned for the Brain Tumor Segmentation 2019 dataset. Using other datasets is possible, but might require changes to the code (data loader) and tuning some hyperparameters (e.g. learning rate, number of iterations).
|
258 |
+
|
259 |
+
In the current implementation, the data loader works with tfrecord files. It should work seamlessly with any dataset containing 3D data stored in tfrecord format, as long as features (with corresponding mean and standard deviation) and labels are stored as bytestream in the same file as `X`, `Y`, `mean`, and `stdev`. See the data pre-processing script for details. If your data is stored in a different format, you will have to modify the parsing function in the `dataset/data_loader.py` file. For a walk-through, check the [TensorFlow tf.data API guide](https://www.tensorflow.org/guide/data_performance)
|
260 |
+
|
261 |
+
### Training Process
|
262 |
+
|
263 |
+
The model trains for a total 16,000 (16,000 / number of devices) iterations for each fold, with the default UNet3D setup:
|
264 |
+
* Adam optimizer with learning rate of 0.0002.
|
265 |
+
* Training and evaluation batch size of 2.
|
266 |
+
|
267 |
+
The default configuration minimizes a function _L = 1 - DICE + cross entropy_ during training and reports achieved convergence as dice score per class, mean dice score, and dice score for whole tumor vs background. The training with a combination of dice and cross entropy has been proven to achieve better convergence than a training using only dice.
|
268 |
+
|
269 |
+
If the `--exec_mode train_and_evaluate` parameter was used, and if `--fold` parameter is set to an integer value of {0, 1, 2, 3, 4}, the evaluation of the validation set takes place after the training is completed. The results of the evaluation will be printed to the console.
|
270 |
+
|
271 |
+
### Inference Process
|
272 |
+
|
273 |
+
Inference can be launched with the same script used for training by passing the `--exec_mode predict` flag:
|
274 |
+
|
275 |
+
```bash
|
276 |
+
$PYTHON main.py --exec_mode predict --data_dir <path/to/data/preprocessed> --model_dir <path/to/checkpoint> [other parameters]
|
277 |
+
```
|
278 |
+
|
279 |
+
The script will then perform the following:
|
280 |
+
* Load the checkpoint from the directory specified by the `<path/to/checkpoint>` directory.
|
281 |
+
* Run inference on the test dataset.
|
282 |
+
* Save the resulting masks in the `numpy` format in the `--model_dir` directory.
|
283 |
+
|
284 |
+
## Supported Configuration
|
285 |
+
|
286 |
+
| Validated on | SynapseAI Version | TensorFlow Version(s) | Mode |
|
287 |
+
|:------:|:-----------------:|:-----:|:----------:|
|
288 |
+
| Gaudi | 1.14.0 | 2.15.0 | Training |
|
289 |
+
| Gaudi2 | 1.14.0 | 2.15.0 | Training |
|
290 |
+
|
291 |
+
## Changelog
|
292 |
+
|
293 |
+
### 1.7.0
|
294 |
+
|
295 |
+
* Added TimeToTrain callback for dumping evaluation times
|
296 |
+
|
297 |
+
### 1.6.0
|
298 |
+
|
299 |
+
* Model enabled on Gaudi2, with the same config as first-gen Gaudi.
|
300 |
+
|
301 |
+
### 1.5.0
|
302 |
+
|
303 |
+
* Stopped overriding model_dir path with additional `model_checkpoint` directory.
|
304 |
+
|
305 |
+
### 1.4.0
|
306 |
+
|
307 |
+
* Enabled tf.data.experimental.prefetch_to_device for HPU device to improve the model performance.
|
308 |
+
* Changed `python` or `python3` to `$PYTHON` to execute correct version based on environment setup.
|
309 |
+
* Replaced references to custom demo script by community entry points in README and `5-cross-validation.sh`.
|
310 |
+
* Added support to import horovod-fork package directly instead of using Model-References' TensorFlow.common.horovod_helpers.
|
311 |
+
|
312 |
+
### 1.3.0
|
313 |
+
* Moved BF16 config json file from TensorFlow/common/ to model's directory.
|
314 |
+
|
315 |
+
|
316 |
+
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/bf16_config/unet.json
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"allowlist": [
|
3 |
+
"BatchMatMul",
|
4 |
+
"BatchMatMulV2",
|
5 |
+
"BiasAdd",
|
6 |
+
"BiasAddGrad",
|
7 |
+
"Conv2D",
|
8 |
+
"Conv2DBackpropFilter",
|
9 |
+
"Conv2DBackpropInput",
|
10 |
+
"Conv3D",
|
11 |
+
"Conv3DBackpropFilter",
|
12 |
+
"Conv3DBackpropFilterV2",
|
13 |
+
"Conv3DBackpropInput",
|
14 |
+
"Conv3DBackpropInputV2",
|
15 |
+
"HabanaConv2DWithPadding",
|
16 |
+
"HabanaConv2DWithPaddingBackpropFilter",
|
17 |
+
"HabanaConv2DWithPaddingBackpropInput",
|
18 |
+
"HabanaInstanceNorm",
|
19 |
+
"HabanaInstanceNormGrad",
|
20 |
+
"LeakyRelu",
|
21 |
+
"LeakyReluGrad",
|
22 |
+
"MatMul",
|
23 |
+
"MaxPool",
|
24 |
+
"MaxPoolV2",
|
25 |
+
"MaxPoolGrad",
|
26 |
+
"MaxPoolGradV2",
|
27 |
+
"Relu",
|
28 |
+
"ReluGrad"
|
29 |
+
],
|
30 |
+
"conditional_list": [
|
31 |
+
"Add",
|
32 |
+
"AddN",
|
33 |
+
"AddV2",
|
34 |
+
"CollectiveReduceV2",
|
35 |
+
"CollectiveReduceV3",
|
36 |
+
"Concat",
|
37 |
+
"ConcatV2",
|
38 |
+
"Equal",
|
39 |
+
"Exp",
|
40 |
+
"GreaterEqual",
|
41 |
+
"HabanaClampBwd",
|
42 |
+
"HabanaClampFwd",
|
43 |
+
"HabanaDropout",
|
44 |
+
"HabanaDropoutGrad",
|
45 |
+
"HorovodAllgather",
|
46 |
+
"HorovodAllreduce",
|
47 |
+
"HpuCollectiveGather",
|
48 |
+
"HpuCollectiveGatherV2",
|
49 |
+
"HpuCollectiveReduce",
|
50 |
+
"Identity",
|
51 |
+
"Log1p",
|
52 |
+
"L2Loss",
|
53 |
+
"Max",
|
54 |
+
"Mean",
|
55 |
+
"Mul",
|
56 |
+
"Neg",
|
57 |
+
"Pad",
|
58 |
+
"PadV2",
|
59 |
+
"Pow",
|
60 |
+
"RealDiv",
|
61 |
+
"Reciprocal",
|
62 |
+
"Reshape",
|
63 |
+
"ResizeNearestNeighbor",
|
64 |
+
"ResizeNearestNeighborGrad",
|
65 |
+
"Select",
|
66 |
+
"Shape",
|
67 |
+
"ShapeN",
|
68 |
+
"Slice",
|
69 |
+
"Square",
|
70 |
+
"Tile"
|
71 |
+
],
|
72 |
+
"strict_conditional_list": [],
|
73 |
+
"non_convertible_exceptions": [
|
74 |
+
[
|
75 |
+
".*KEEP_FP32_PRECISION.*",
|
76 |
+
""
|
77 |
+
]
|
78 |
+
],
|
79 |
+
"convertible_exceptions": [
|
80 |
+
[
|
81 |
+
".*FORCE_BF16_PRECISION.*",
|
82 |
+
""
|
83 |
+
]
|
84 |
+
]
|
85 |
+
}
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/dataset/data_loader.py
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - script migration to Tensorflow 2.x version
|
19 |
+
# - main function and unused imports have been removed
|
20 |
+
# - updated synth_train_fn function for stable CPU and HPU results
|
21 |
+
# - enabled experimental.prefetch_to_device functionality to improve the performance
|
22 |
+
|
23 |
+
import os
|
24 |
+
|
25 |
+
import numpy as np
|
26 |
+
import tensorflow as tf
|
27 |
+
|
28 |
+
from dataset.transforms import NormalizeImages, OneHotLabels, apply_transforms, PadXYZ, RandomCrop3D, \
|
29 |
+
RandomHorizontalFlip, RandomBrightnessCorrection, CenterCrop, apply_test_transforms, Cast
|
30 |
+
|
31 |
+
CLASSES = {0: "TumorCore", 1: "PeritumoralEdema", 2: "EnhancingTumor"}
|
32 |
+
|
33 |
+
|
34 |
+
def cross_validation(x: np.ndarray, fold_idx: int, n_folds: int):
|
35 |
+
if fold_idx < 0 or fold_idx >= n_folds:
|
36 |
+
raise ValueError('Fold index has to be [0, n_folds). Received index {} for {} folds'.format(fold_idx, n_folds))
|
37 |
+
|
38 |
+
_folders = np.array_split(x, n_folds)
|
39 |
+
|
40 |
+
return np.concatenate(_folders[:fold_idx] + _folders[fold_idx + 1:]), _folders[fold_idx]
|
41 |
+
|
42 |
+
|
43 |
+
class Dataset:
|
44 |
+
def __init__(self, data_dir, batch_size=2, fold_idx=0, n_folds=5, seed=0, pipeline_factor=1, params=None):
|
45 |
+
self._folders = np.array([os.path.join(data_dir, path) for path in os.listdir(data_dir)])
|
46 |
+
self._train, self._eval = cross_validation(self._folders, fold_idx=fold_idx, n_folds=n_folds)
|
47 |
+
self._pipeline_factor = pipeline_factor
|
48 |
+
self._data_dir = data_dir
|
49 |
+
self.params = params
|
50 |
+
|
51 |
+
self._hpu_id = params.worker_id if params.worker_id else 0
|
52 |
+
self._num_hpus = params.num_workers if params.num_workers else 1
|
53 |
+
|
54 |
+
self._batch_size = batch_size
|
55 |
+
self._seed = seed
|
56 |
+
|
57 |
+
self._xshape = (240, 240, 155, 4)
|
58 |
+
self._yshape = (240, 240, 155)
|
59 |
+
|
60 |
+
def parse(self, serialized):
|
61 |
+
features = {
|
62 |
+
'X': tf.io.FixedLenFeature([], tf.string),
|
63 |
+
'Y': tf.io.FixedLenFeature([], tf.string),
|
64 |
+
'mean': tf.io.FixedLenFeature([4], tf.float32),
|
65 |
+
'stdev': tf.io.FixedLenFeature([4], tf.float32)
|
66 |
+
}
|
67 |
+
|
68 |
+
parsed_example = tf.io.parse_single_example(serialized=serialized,
|
69 |
+
features=features)
|
70 |
+
|
71 |
+
x = tf.io.decode_raw(parsed_example['X'], tf.uint8)
|
72 |
+
x = tf.cast(tf.reshape(x, self._xshape), tf.uint8)
|
73 |
+
y = tf.io.decode_raw(parsed_example['Y'], tf.uint8)
|
74 |
+
y = tf.cast(tf.reshape(y, self._yshape), tf.uint8)
|
75 |
+
|
76 |
+
mean = parsed_example['mean']
|
77 |
+
stdev = parsed_example['stdev']
|
78 |
+
|
79 |
+
return x, y, mean, stdev
|
80 |
+
|
81 |
+
def parse_x(self, serialized):
|
82 |
+
features = {'X': tf.io.FixedLenFeature([], tf.string),
|
83 |
+
'Y': tf.io.FixedLenFeature([], tf.string),
|
84 |
+
'mean': tf.io.FixedLenFeature([4], tf.float32),
|
85 |
+
'stdev': tf.io.FixedLenFeature([4], tf.float32)}
|
86 |
+
|
87 |
+
parsed_example = tf.io.parse_single_example(serialized=serialized,
|
88 |
+
features=features)
|
89 |
+
|
90 |
+
x = tf.io.decode_raw(parsed_example['X'], tf.uint8)
|
91 |
+
x = tf.cast(tf.reshape(x, self._xshape), tf.uint8)
|
92 |
+
|
93 |
+
mean = parsed_example['mean']
|
94 |
+
stdev = parsed_example['stdev']
|
95 |
+
|
96 |
+
return x, mean, stdev
|
97 |
+
|
98 |
+
def prefetch(self, dataset, buffer_size):
|
99 |
+
"""Dataset prefetching function"""
|
100 |
+
if len(tf.config.list_logical_devices('HPU')) > 0:
|
101 |
+
device = tf.config.list_logical_devices('HPU')[0].name
|
102 |
+
with tf.device(device):
|
103 |
+
dataset = dataset.apply(tf.data.experimental.prefetch_to_device(device))
|
104 |
+
else:
|
105 |
+
dataset = dataset.prefetch(buffer_size)
|
106 |
+
|
107 |
+
return dataset
|
108 |
+
|
109 |
+
def train_fn(self):
|
110 |
+
assert len(self._train) > 0, "Training data not found."
|
111 |
+
|
112 |
+
ds = tf.data.TFRecordDataset(filenames=self._train)
|
113 |
+
|
114 |
+
ds = ds.shard(self._num_hpus, self._hpu_id)
|
115 |
+
ds = ds.cache()
|
116 |
+
ds = ds.shuffle(buffer_size=self._batch_size * 8, seed=self._seed)
|
117 |
+
ds = ds.repeat()
|
118 |
+
|
119 |
+
ds = ds.map(self.parse, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
120 |
+
|
121 |
+
transforms = [
|
122 |
+
RandomCrop3D((128, 128, 128)),
|
123 |
+
RandomHorizontalFlip() if self.params.augment else None,
|
124 |
+
Cast(dtype=tf.float32),
|
125 |
+
NormalizeImages(),
|
126 |
+
RandomBrightnessCorrection() if self.params.augment else None,
|
127 |
+
OneHotLabels(n_classes=4),
|
128 |
+
]
|
129 |
+
|
130 |
+
ds = ds.map(map_func=lambda x, y, mean, stdev: apply_transforms(x, y, mean, stdev, transforms=transforms),
|
131 |
+
num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
132 |
+
|
133 |
+
ds = ds.batch(batch_size=self._batch_size,
|
134 |
+
drop_remainder=True)
|
135 |
+
|
136 |
+
ds = self.prefetch(ds, buffer_size=tf.data.experimental.AUTOTUNE)
|
137 |
+
|
138 |
+
return ds
|
139 |
+
|
140 |
+
def eval_fn(self):
|
141 |
+
ds = tf.data.TFRecordDataset(filenames=self._eval)
|
142 |
+
assert len(self._eval) > 0, "Evaluation data not found. Did you specify --fold flag?"
|
143 |
+
|
144 |
+
ds = ds.cache()
|
145 |
+
ds = ds.map(self.parse, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
146 |
+
|
147 |
+
transforms = [
|
148 |
+
CenterCrop((224, 224, 155)),
|
149 |
+
Cast(dtype=tf.float32),
|
150 |
+
NormalizeImages(),
|
151 |
+
OneHotLabels(n_classes=4),
|
152 |
+
PadXYZ()
|
153 |
+
]
|
154 |
+
|
155 |
+
ds = ds.map(map_func=lambda x, y, mean, stdev: apply_transforms(x, y, mean, stdev, transforms=transforms),
|
156 |
+
num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
157 |
+
ds = ds.batch(batch_size=self._batch_size,
|
158 |
+
drop_remainder=False)
|
159 |
+
ds = self.prefetch(ds, buffer_size=tf.data.experimental.AUTOTUNE)
|
160 |
+
|
161 |
+
return ds
|
162 |
+
|
163 |
+
def test_fn(self, count=1, drop_remainder=False):
|
164 |
+
ds = tf.data.TFRecordDataset(filenames=self._eval)
|
165 |
+
assert len(self._eval) > 0, "Evaluation data not found. Did you specify --fold flag?"
|
166 |
+
|
167 |
+
ds = ds.repeat(count)
|
168 |
+
ds = ds.map(self.parse_x, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
169 |
+
|
170 |
+
transforms = [
|
171 |
+
CenterCrop((224, 224, 155)),
|
172 |
+
Cast(dtype=tf.float32),
|
173 |
+
NormalizeImages(),
|
174 |
+
PadXYZ((224, 224, 160))
|
175 |
+
]
|
176 |
+
|
177 |
+
ds = ds.map(map_func=lambda x, mean, stdev: apply_test_transforms(x, mean, stdev, transforms=transforms),
|
178 |
+
num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
179 |
+
ds = ds.batch(batch_size=self._batch_size,
|
180 |
+
drop_remainder=drop_remainder)
|
181 |
+
ds = self.prefetch(ds, buffer_size=tf.data.experimental.AUTOTUNE)
|
182 |
+
|
183 |
+
return ds
|
184 |
+
|
185 |
+
def synth_train_fn(self):
|
186 |
+
"""Synthetic data function for testing"""
|
187 |
+
inputs = tf.random.uniform(self._xshape, dtype=tf.int32, minval=0, maxval=255, seed=self._seed,
|
188 |
+
name='synth_inputs')
|
189 |
+
masks = tf.random.uniform(self._yshape, dtype=tf.int32, minval=0, maxval=4, seed=self._seed,
|
190 |
+
name='synth_masks')
|
191 |
+
|
192 |
+
ds = tf.data.Dataset.from_tensors((inputs, masks))
|
193 |
+
ds = ds.repeat()
|
194 |
+
|
195 |
+
transforms = [
|
196 |
+
Cast(dtype=tf.uint8),
|
197 |
+
RandomCrop3D((128, 128, 128)),
|
198 |
+
RandomHorizontalFlip() if self.params.augment else None,
|
199 |
+
Cast(dtype=tf.float32),
|
200 |
+
NormalizeImages(),
|
201 |
+
RandomBrightnessCorrection() if self.params.augment else None,
|
202 |
+
OneHotLabels(n_classes=4),
|
203 |
+
]
|
204 |
+
|
205 |
+
ds = ds.map(map_func=lambda x, y: apply_transforms(x, y, mean=0.0, stdev=1.0, transforms=transforms),
|
206 |
+
num_parallel_calls=1)
|
207 |
+
ds = ds.batch(self._batch_size)
|
208 |
+
ds = self.prefetch(ds, buffer_size=self._batch_size)
|
209 |
+
|
210 |
+
return ds
|
211 |
+
|
212 |
+
def synth_predict_fn(self, count=1):
|
213 |
+
"""Synthetic data function for testing"""
|
214 |
+
inputs = tf.random.truncated_normal((64, 64, 64, 4), dtype=tf.float32, mean=0.0, stddev=1.0, seed=self._seed,
|
215 |
+
name='synth_inputs')
|
216 |
+
|
217 |
+
ds = tf.data.Dataset.from_tensors(inputs)
|
218 |
+
ds = ds.repeat(count)
|
219 |
+
ds = ds.batch(self._batch_size)
|
220 |
+
ds = self.prefetch(ds, buffer_size=tf.data.experimental.AUTOTUNE)
|
221 |
+
|
222 |
+
return ds
|
223 |
+
|
224 |
+
@property
|
225 |
+
def train_size(self):
|
226 |
+
return len(self._train)
|
227 |
+
|
228 |
+
@property
|
229 |
+
def eval_size(self):
|
230 |
+
return len(self._eval)
|
231 |
+
|
232 |
+
@property
|
233 |
+
def test_size(self):
|
234 |
+
return len(self._eval)
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/dataset/preprocess_data.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import os
|
16 |
+
import argparse
|
17 |
+
from random import shuffle
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
import nibabel as nib
|
21 |
+
import tensorflow as tf
|
22 |
+
|
23 |
+
|
24 |
+
PARSER = argparse.ArgumentParser()
|
25 |
+
|
26 |
+
PARSER.add_argument('--input_dir', '-i',
|
27 |
+
type=str, help='path to the input directory with data')
|
28 |
+
|
29 |
+
PARSER.add_argument('--output_dir', '-o',
|
30 |
+
type=str, help='path to the output directory where tfrecord files will be stored')
|
31 |
+
|
32 |
+
PARSER.add_argument('--verbose', '-v', dest='verbose', action='store_true', default=False)
|
33 |
+
|
34 |
+
PARSER.add_argument('--vol_per_file', default=4, dest='vol_per_file',
|
35 |
+
type=int, help='how many volumes to pack into a single tfrecord file')
|
36 |
+
|
37 |
+
PARSER.add_argument('--single_data_dir', dest='single_data_dir', action='store_true', default=False)
|
38 |
+
|
39 |
+
|
40 |
+
def load_features(path):
|
41 |
+
data = np.zeros((240, 240, 155, 4), dtype=np.uint8)
|
42 |
+
name = os.path.basename(path)
|
43 |
+
for i, modality in enumerate(["_t1.nii.gz", "_t1ce.nii.gz", "_t2.nii.gz", "_flair.nii.gz"]):
|
44 |
+
vol = load_single_nifti(os.path.join(path, name+modality)).astype(np.float32)
|
45 |
+
vol[vol > 0.85 * vol.max()] = 0.85 * vol.max()
|
46 |
+
vol = 255 * vol / vol.max()
|
47 |
+
data[..., i] = vol.astype(np.uint8)
|
48 |
+
|
49 |
+
return data
|
50 |
+
|
51 |
+
|
52 |
+
def load_segmentation(path):
|
53 |
+
path = os.path.join(path, os.path.basename(path)) + "_seg.nii.gz"
|
54 |
+
return load_single_nifti(path).astype(np.uint8)
|
55 |
+
|
56 |
+
|
57 |
+
def load_single_nifti(path):
|
58 |
+
data = nib.load(path).get_fdata().astype(np.int16)
|
59 |
+
return np.transpose(data, (1, 0, 2))
|
60 |
+
|
61 |
+
|
62 |
+
def write_to_file(features_list, labels_list, foreground_mean_list, foreground_std_list, output_dir, count):
|
63 |
+
output_filename = os.path.join(output_dir, "volume-{}.tfrecord".format(count))
|
64 |
+
filelist = list(zip(np.array(features_list),
|
65 |
+
np.array(labels_list),
|
66 |
+
np.array(foreground_mean_list),
|
67 |
+
np.array(foreground_std_list)))
|
68 |
+
np_to_tfrecords(filelist, output_filename)
|
69 |
+
|
70 |
+
|
71 |
+
def np_to_tfrecords(filelist, output_filename):
|
72 |
+
writer = tf.io.TFRecordWriter(output_filename)
|
73 |
+
|
74 |
+
for idx in range(len(filelist)):
|
75 |
+
X = filelist[idx][0].flatten().tostring()
|
76 |
+
Y = filelist[idx][1].flatten().tostring()
|
77 |
+
mean = filelist[idx][2].astype(np.float32).flatten()
|
78 |
+
stdev = filelist[idx][3].astype(np.float32).flatten()
|
79 |
+
|
80 |
+
d_feature = {}
|
81 |
+
d_feature['X'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[X]))
|
82 |
+
d_feature['Y'] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[Y]))
|
83 |
+
d_feature['mean'] = tf.train.Feature(float_list=tf.train.FloatList(value=mean))
|
84 |
+
d_feature['stdev'] = tf.train.Feature(float_list=tf.train.FloatList(value=stdev))
|
85 |
+
|
86 |
+
features = tf.train.Features(feature=d_feature)
|
87 |
+
example = tf.train.Example(features=features)
|
88 |
+
serialized = example.SerializeToString()
|
89 |
+
writer.write(serialized)
|
90 |
+
writer.close()
|
91 |
+
|
92 |
+
|
93 |
+
def main():
|
94 |
+
# parse arguments
|
95 |
+
params = PARSER.parse_args()
|
96 |
+
input_dir = params.input_dir
|
97 |
+
output_dir = params.output_dir
|
98 |
+
os.makedirs(params.output_dir, exist_ok=True)
|
99 |
+
|
100 |
+
patient_list = []
|
101 |
+
if params.single_data_dir:
|
102 |
+
patient_list.extend([os.path.join(input_dir, folder) for folder in os.listdir(input_dir)])
|
103 |
+
else:
|
104 |
+
assert "HGG" in os.listdir(input_dir) and "LGG" in os.listdir(input_dir),\
|
105 |
+
"Data directory has to contain folders named HGG and LGG. " \
|
106 |
+
"If you have a single folder with patient's data please set --single_data_dir flag"
|
107 |
+
path_hgg = os.path.join(input_dir, "HGG")
|
108 |
+
path_lgg = os.path.join(input_dir, "LGG")
|
109 |
+
patient_list.extend([os.path.join(path_hgg, folder) for folder in os.listdir(path_hgg)])
|
110 |
+
patient_list.extend([os.path.join(path_lgg, folder) for folder in os.listdir(path_lgg)])
|
111 |
+
shuffle(patient_list)
|
112 |
+
|
113 |
+
features_list = []
|
114 |
+
labels_list = []
|
115 |
+
foreground_mean_list = []
|
116 |
+
foreground_std_list = []
|
117 |
+
count = 0
|
118 |
+
|
119 |
+
total_tfrecord_files = len(patient_list) // params.vol_per_file + (1 if len(patient_list) % params.vol_per_file
|
120 |
+
else 0)
|
121 |
+
for i, folder in enumerate(patient_list):
|
122 |
+
|
123 |
+
# Calculate mean and stdev only for foreground voxels
|
124 |
+
features = load_features(folder)
|
125 |
+
foreground = features > 0
|
126 |
+
fg_mean = np.array([(features[..., i][foreground[..., i]]).mean() for i in range(features.shape[-1])])
|
127 |
+
fg_std = np.array([(features[..., i][foreground[..., i]]).std() for i in range(features.shape[-1])])
|
128 |
+
|
129 |
+
# BraTS labels are 0,1,2,4 -> switching to 0,1,2,3
|
130 |
+
labels = load_segmentation(folder)
|
131 |
+
labels[labels == 4] = 3
|
132 |
+
|
133 |
+
features_list.append(features)
|
134 |
+
labels_list.append(labels)
|
135 |
+
foreground_mean_list.append(fg_mean)
|
136 |
+
foreground_std_list.append(fg_std)
|
137 |
+
|
138 |
+
if (i+1) % params.vol_per_file == 0:
|
139 |
+
write_to_file(features_list, labels_list, foreground_mean_list, foreground_std_list, output_dir, count)
|
140 |
+
|
141 |
+
# Clear lists
|
142 |
+
features_list = []
|
143 |
+
labels_list = []
|
144 |
+
foreground_mean_list = []
|
145 |
+
foreground_std_list = []
|
146 |
+
count += 1
|
147 |
+
|
148 |
+
if params.verbose:
|
149 |
+
print("{}/{} tfrecord files created".format(count, total_tfrecord_files))
|
150 |
+
|
151 |
+
# create one more file if there are any remaining unpacked volumes
|
152 |
+
if features_list:
|
153 |
+
write_to_file(features_list, labels_list, foreground_mean_list, foreground_std_list, output_dir, count)
|
154 |
+
count += 1
|
155 |
+
if params.verbose:
|
156 |
+
print("{}/{} tfrecord files created".format(count, total_tfrecord_files))
|
157 |
+
|
158 |
+
|
159 |
+
if __name__ == '__main__':
|
160 |
+
main()
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/dataset/transforms.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - script migration to Tensorflow 2.x version
|
19 |
+
# - added seed setting possibility to random operations
|
20 |
+
|
21 |
+
import tensorflow as tf
|
22 |
+
|
23 |
+
from runtime.arguments import parse_args
|
24 |
+
|
25 |
+
params = parse_args()
|
26 |
+
|
27 |
+
|
28 |
+
def apply_transforms(x, y, mean, stdev, transforms):
|
29 |
+
for _t in transforms:
|
30 |
+
if _t is not None:
|
31 |
+
x, y = _t(x, y, mean, stdev)
|
32 |
+
return x, y
|
33 |
+
|
34 |
+
|
35 |
+
def apply_test_transforms(x, mean, stdev, transforms):
|
36 |
+
for _t in transforms:
|
37 |
+
if _t is not None:
|
38 |
+
x = _t(x, y=None, mean=mean, stdev=stdev)
|
39 |
+
return x
|
40 |
+
|
41 |
+
|
42 |
+
class PadXYZ:
|
43 |
+
def __init__(self, shape=None):
|
44 |
+
self.shape = shape
|
45 |
+
|
46 |
+
def __call__(self, x, y, mean, stdev):
|
47 |
+
paddings = tf.constant([[0, 0], [0, 0], [0, 5], [0, 0]])
|
48 |
+
x = tf.pad(tensor=x, paddings=paddings, mode="CONSTANT")
|
49 |
+
if y is None:
|
50 |
+
return x
|
51 |
+
y = tf.pad(tensor=y, paddings=paddings, mode="CONSTANT")
|
52 |
+
return x, y
|
53 |
+
|
54 |
+
|
55 |
+
class CenterCrop:
|
56 |
+
def __init__(self, shape):
|
57 |
+
self.shape = shape
|
58 |
+
|
59 |
+
def __call__(self, x, y, mean, stdev):
|
60 |
+
shape = x.get_shape()
|
61 |
+
delta = [(shape[i] - self.shape[i]) // 2 for i in range(len(self.shape))]
|
62 |
+
x = x[
|
63 |
+
delta[0]:delta[0] + self.shape[0],
|
64 |
+
delta[1]:delta[1] + self.shape[1],
|
65 |
+
delta[2]:delta[2] + self.shape[2]
|
66 |
+
]
|
67 |
+
if y is None:
|
68 |
+
return x
|
69 |
+
y = y[
|
70 |
+
delta[0]:delta[0] + self.shape[0],
|
71 |
+
delta[1]:delta[1] + self.shape[1],
|
72 |
+
delta[2]:delta[2] + self.shape[2]
|
73 |
+
]
|
74 |
+
return x, y
|
75 |
+
|
76 |
+
|
77 |
+
class RandomCrop3D:
|
78 |
+
def __init__(self, shape, margins=(0, 0, 0)):
|
79 |
+
self.shape = shape
|
80 |
+
self.margins = margins
|
81 |
+
|
82 |
+
def __call__(self, x, y, mean, stdev):
|
83 |
+
shape = x.get_shape()
|
84 |
+
min = tf.constant(self.margins, dtype=tf.float32)
|
85 |
+
max = tf.constant([shape[0] - self.shape[0] - self.margins[0],
|
86 |
+
shape[1] - self.shape[1] - self.margins[1],
|
87 |
+
shape[2] - self.shape[2] - self.margins[2]], dtype=tf.float32)
|
88 |
+
center = tf.random.uniform((len(self.shape),), minval=min, maxval=max, seed=params.seed)
|
89 |
+
center = tf.cast(center, dtype=tf.int32)
|
90 |
+
x = x[center[0]:center[0] + self.shape[0],
|
91 |
+
center[1]:center[1] + self.shape[1],
|
92 |
+
center[2]:center[2] + self.shape[2]]
|
93 |
+
if y is None:
|
94 |
+
return x
|
95 |
+
y = y[center[0]:center[0] + self.shape[0],
|
96 |
+
center[1]:center[1] + self.shape[1],
|
97 |
+
center[2]:center[2] + self.shape[2]]
|
98 |
+
return x, y
|
99 |
+
|
100 |
+
|
101 |
+
class NormalizeImages:
|
102 |
+
def __init__(self):
|
103 |
+
pass
|
104 |
+
|
105 |
+
def __call__(self, x, y, mean, stdev):
|
106 |
+
mask = tf.math.greater(x, 0)
|
107 |
+
x = tf.compat.v1.where(mask, (x - tf.cast(mean, x.dtype)) / (tf.cast(stdev + 1e-8, x.dtype)), x)
|
108 |
+
|
109 |
+
if y is None:
|
110 |
+
return x
|
111 |
+
return x, y
|
112 |
+
|
113 |
+
|
114 |
+
class Cast:
|
115 |
+
def __init__(self, dtype=tf.float32):
|
116 |
+
self._dtype = dtype
|
117 |
+
|
118 |
+
def __call__(self, x, y, mean, stdev):
|
119 |
+
if y is None:
|
120 |
+
return tf.cast(x, dtype=self._dtype)
|
121 |
+
return tf.cast(x, dtype=self._dtype), y
|
122 |
+
|
123 |
+
|
124 |
+
class RandomHorizontalFlip:
|
125 |
+
def __init__(self, threshold=0.5):
|
126 |
+
self._threshold = threshold
|
127 |
+
|
128 |
+
def __call__(self, x, y, mean, stdev):
|
129 |
+
h_flip = tf.random.uniform([], seed=params.seed) > self._threshold
|
130 |
+
|
131 |
+
x = tf.cond(pred=h_flip, true_fn=lambda: tf.reverse(x, axis=[1]), false_fn=lambda: x)
|
132 |
+
y = tf.cond(pred=h_flip, true_fn=lambda: tf.reverse(y, axis=[1]), false_fn=lambda: y)
|
133 |
+
|
134 |
+
return x, y
|
135 |
+
|
136 |
+
|
137 |
+
class RandomVerticalFlip:
|
138 |
+
def __init__(self, threshold=0.5):
|
139 |
+
self._threshold = threshold
|
140 |
+
|
141 |
+
def __call__(self, x, y, mean, stdev):
|
142 |
+
h_flip = tf.random.uniform([], seed=params.seed) > self._threshold
|
143 |
+
|
144 |
+
x = tf.cond(pred=h_flip, true_fn=lambda: tf.reverse(x, axis=[0]), false_fn=lambda: x)
|
145 |
+
y = tf.cond(pred=h_flip, true_fn=lambda: tf.reverse(y, axis=[0]), false_fn=lambda: y)
|
146 |
+
|
147 |
+
return x, y
|
148 |
+
|
149 |
+
|
150 |
+
class RandomGammaCorrection:
|
151 |
+
def __init__(self, gamma_range=(0.8, 1.5), keep_stats=False, threshold=0.5, epsilon=1e-8):
|
152 |
+
self._gamma_range = gamma_range
|
153 |
+
self._keep_stats = keep_stats
|
154 |
+
self._eps = epsilon
|
155 |
+
self._threshold = threshold
|
156 |
+
|
157 |
+
def __call__(self, x, y, mean, stdev):
|
158 |
+
augment = tf.random.uniform([], seed=params.seed) > self._threshold
|
159 |
+
gamma = tf.random.uniform([], minval=self._gamma_range[0], maxval=self._gamma_range[1], seed=params.seed)
|
160 |
+
|
161 |
+
x_min = tf.math.reduce_min(input_tensor=x)
|
162 |
+
x_range = tf.math.reduce_max(input_tensor=x) - x_min
|
163 |
+
|
164 |
+
x = tf.cond(pred=augment,
|
165 |
+
true_fn=lambda: tf.math.pow(((x - x_min) / float(x_range + self._eps)), gamma) * x_range + x_min,
|
166 |
+
false_fn=lambda: x)
|
167 |
+
return x, y
|
168 |
+
|
169 |
+
|
170 |
+
class RandomBrightnessCorrection:
|
171 |
+
def __init__(self, alpha=0.1, threshold=0.5, per_channel=True):
|
172 |
+
self._alpha_range = [1.0 - alpha, 1.0 + alpha]
|
173 |
+
self._threshold = threshold
|
174 |
+
self._per_channel = per_channel
|
175 |
+
|
176 |
+
def __call__(self, x, y, mean, stdev):
|
177 |
+
mask = tf.math.greater(x, 0)
|
178 |
+
size = x.get_shape()[-1] if self._per_channel else 1
|
179 |
+
augment = tf.random.uniform([], seed=params.seed) > self._threshold
|
180 |
+
correction = tf.random.uniform([size],
|
181 |
+
minval=self._alpha_range[0],
|
182 |
+
maxval=self._alpha_range[1],
|
183 |
+
dtype=x.dtype, seed=params.seed)
|
184 |
+
|
185 |
+
x = tf.cond(pred=augment,
|
186 |
+
true_fn=lambda: tf.compat.v1.where(mask, x + correction, x),
|
187 |
+
false_fn=lambda: x)
|
188 |
+
|
189 |
+
return x, y
|
190 |
+
|
191 |
+
|
192 |
+
class OneHotLabels:
|
193 |
+
def __init__(self, n_classes=1):
|
194 |
+
self._n_classes = n_classes
|
195 |
+
|
196 |
+
def __call__(self, x, y, mean, stdev):
|
197 |
+
return x, tf.one_hot(y, self._n_classes)
|
198 |
+
|
199 |
+
|
200 |
+
class PadXY:
|
201 |
+
def __init__(self, dst_size=None):
|
202 |
+
if not dst_size:
|
203 |
+
raise ValueError("Invalid padding size: {}".format(dst_size))
|
204 |
+
|
205 |
+
self._dst_size = dst_size
|
206 |
+
|
207 |
+
def __call__(self, x, y, mean, stdev):
|
208 |
+
return tf.pad(tensor=x, paddings=self._build_padding(x)), \
|
209 |
+
tf.pad(tensor=y, paddings=self._build_padding(y))
|
210 |
+
|
211 |
+
def _build_padding(self, _t):
|
212 |
+
padding = []
|
213 |
+
for i in range(len(_t.shape)):
|
214 |
+
if i < len(self._dst_size):
|
215 |
+
padding.append((0, self._dst_size[i] - _t.shape[i]))
|
216 |
+
else:
|
217 |
+
padding.append((0, 0))
|
218 |
+
return padding
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/main.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - script migration to Tensorflow 2.x version
|
19 |
+
# - included HPU horovod setup
|
20 |
+
# - added tensorboard logging functionality
|
21 |
+
# - added TumorCore, PeritumoralEdema and EnhancingTumor metrics to evaluation results
|
22 |
+
# - debug_train and debug_predict options have been removed
|
23 |
+
# - added tensor dumping possibility for training with dump_callback function
|
24 |
+
# - added TimeToTrain estimator for dumping evaluation timestamps
|
25 |
+
|
26 |
+
import os
|
27 |
+
import logging
|
28 |
+
|
29 |
+
import numpy as np
|
30 |
+
import tensorflow as tf
|
31 |
+
|
32 |
+
from dataset.data_loader import Dataset, CLASSES
|
33 |
+
from runtime.hooks import get_hooks
|
34 |
+
from runtime.arguments import parse_args
|
35 |
+
from runtime.setup import prepare_model_dir, build_estimator, setup_horovod, set_flags, get_logger
|
36 |
+
from TensorFlow.common.debug import dump_callback
|
37 |
+
|
38 |
+
|
39 |
+
def parse_evaluation_results(result):
|
40 |
+
data = {CLASSES[i]: result[CLASSES[i]] for i in range(len(CLASSES))}
|
41 |
+
data['MeanDice'] = str(sum([result[CLASSES[i]] for i in range(len(CLASSES))]) / len(CLASSES))
|
42 |
+
data['WholeTumor'] = str(result['WholeTumor'])
|
43 |
+
data['TumorCore'] = str(data['TumorCore'])
|
44 |
+
data['PeritumoralEdema'] = str(data['PeritumoralEdema'])
|
45 |
+
data['EnhancingTumor'] = str(data['EnhancingTumor'])
|
46 |
+
return data
|
47 |
+
|
48 |
+
|
49 |
+
def main():
|
50 |
+
params = parse_args()
|
51 |
+
tf.random.set_seed(params.seed)
|
52 |
+
tf.get_logger().setLevel(logging.ERROR)
|
53 |
+
|
54 |
+
params = setup_horovod(params)
|
55 |
+
set_flags(params)
|
56 |
+
model_dir = prepare_model_dir(params)
|
57 |
+
logger = get_logger(params)
|
58 |
+
|
59 |
+
dataset = Dataset(data_dir=params.data_dir,
|
60 |
+
batch_size=params.batch_size,
|
61 |
+
fold_idx=params.fold,
|
62 |
+
n_folds=params.num_folds,
|
63 |
+
params=params,
|
64 |
+
seed=params.seed)
|
65 |
+
|
66 |
+
estimator = build_estimator(params, model_dir)
|
67 |
+
|
68 |
+
if params.tensorboard_logging and (params.worker_id == 0 or params.log_all_workers):
|
69 |
+
from TensorFlow.common.tb_utils import write_hparams_v1
|
70 |
+
write_hparams_v1(params.log_dir, vars(params))
|
71 |
+
|
72 |
+
if params.tensorboard_logging:
|
73 |
+
from TensorFlow.common.tb_utils import TimeToTrainEstimatorHook
|
74 |
+
ttt = TimeToTrainEstimatorHook(train_or_eval ='eval', output_dir=params.log_dir)
|
75 |
+
|
76 |
+
if not params.benchmark:
|
77 |
+
params.max_steps = params.max_steps // params.num_workers
|
78 |
+
if 'train' in params.exec_mode:
|
79 |
+
with dump_callback(params.dump_config):
|
80 |
+
training_hooks = get_hooks(params, logger)
|
81 |
+
dataset_fn = dataset.synth_train_fn if params.synth_data else dataset.train_fn
|
82 |
+
|
83 |
+
estimator.train(
|
84 |
+
input_fn=dataset_fn,
|
85 |
+
steps=params.max_steps,
|
86 |
+
hooks=training_hooks)
|
87 |
+
|
88 |
+
if 'evaluate' in params.exec_mode:
|
89 |
+
if params.tensorboard_logging:
|
90 |
+
ttt.begin()
|
91 |
+
result = estimator.evaluate(input_fn=dataset.eval_fn, steps=dataset.eval_size)
|
92 |
+
if params.tensorboard_logging:
|
93 |
+
ttt.end(session=None)
|
94 |
+
data = parse_evaluation_results(result)
|
95 |
+
if params.worker_id == 0:
|
96 |
+
logger.log(step=(), data=data)
|
97 |
+
|
98 |
+
if 'predict' == params.exec_mode:
|
99 |
+
inference_hooks = get_hooks(params, logger)
|
100 |
+
if params.worker_id == 0:
|
101 |
+
count = 1 if not params.benchmark else 2 * params.warmup_steps * params.batch_size // dataset.test_size
|
102 |
+
predictions = estimator.predict(
|
103 |
+
input_fn=lambda: dataset.test_fn(count=count,
|
104 |
+
drop_remainder=params.benchmark), hooks=inference_hooks)
|
105 |
+
|
106 |
+
for idx, p in enumerate(predictions):
|
107 |
+
volume = p['predictions']
|
108 |
+
if not params.benchmark:
|
109 |
+
np.save(os.path.join(params.model_dir, "vol_{}.npy".format(idx)), volume)
|
110 |
+
|
111 |
+
|
112 |
+
if __name__ == '__main__':
|
113 |
+
tf.compat.v1.disable_eager_execution()
|
114 |
+
main()
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/model/layers.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2021-2023 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - script migration to Tensorflow 2.x version
|
19 |
+
# - tf.contrib.layers.instance_norm, tf.contrib.layers.group_norm replaced with its tensorflow_addons counterparts
|
20 |
+
# - added support for HabanaInstanceNormalization
|
21 |
+
# - added seed setting possibility to glorot_uniform_initializer operations
|
22 |
+
|
23 |
+
import tensorflow as tf
|
24 |
+
from keras.layers import GroupNormalization
|
25 |
+
from runtime.arguments import parse_args
|
26 |
+
|
27 |
+
|
28 |
+
params = parse_args()
|
29 |
+
|
30 |
+
|
31 |
+
def _normalization(inputs, name, mode):
|
32 |
+
training = mode == tf.estimator.ModeKeys.TRAIN
|
33 |
+
if name == 'instancenorm':
|
34 |
+
if not params.no_hpu:
|
35 |
+
from habana_frameworks.tensorflow.ops.instance_norm import HabanaInstanceNormalization
|
36 |
+
instance_norm_block = HabanaInstanceNormalization
|
37 |
+
else:
|
38 |
+
instance_norm_block = GroupNormalization(groups=-1)
|
39 |
+
|
40 |
+
gamma_initializer = tf.compat.v1.constant_initializer(1.0)
|
41 |
+
return instance_norm_block(gamma_initializer=gamma_initializer, epsilon=1e-6)(inputs, training=training)
|
42 |
+
|
43 |
+
if name == 'groupnorm':
|
44 |
+
return GroupNormalization(groups=16, axis=-1)(inputs, training=training)
|
45 |
+
|
46 |
+
if name == 'batchnorm':
|
47 |
+
return tf.compat.v1.keras.layers.BatchNormalization(axis=-1,
|
48 |
+
trainable=True,
|
49 |
+
virtual_batch_size=None)(inputs, training=training)
|
50 |
+
elif name == 'none':
|
51 |
+
return inputs
|
52 |
+
else:
|
53 |
+
raise ValueError('Invalid normalization layer')
|
54 |
+
|
55 |
+
|
56 |
+
def _activation(x, activation):
|
57 |
+
if activation == 'relu':
|
58 |
+
return tf.nn.relu(x)
|
59 |
+
elif activation == 'leaky_relu':
|
60 |
+
return tf.nn.leaky_relu(x, alpha=0.01)
|
61 |
+
elif activation == 'sigmoid':
|
62 |
+
return tf.nn.sigmoid(x)
|
63 |
+
elif activation == 'softmax':
|
64 |
+
return tf.nn.softmax(x)
|
65 |
+
elif activation == 'none':
|
66 |
+
return x
|
67 |
+
else:
|
68 |
+
raise ValueError("Unknown activation {}".format(activation))
|
69 |
+
|
70 |
+
|
71 |
+
def convolution(x,
|
72 |
+
out_channels,
|
73 |
+
kernel_size=3,
|
74 |
+
stride=1,
|
75 |
+
mode=tf.estimator.ModeKeys.TRAIN,
|
76 |
+
normalization='batchnorm',
|
77 |
+
activation='leaky_relu',
|
78 |
+
transpose=False):
|
79 |
+
|
80 |
+
conv = tf.keras.layers.Conv3DTranspose if transpose else tf.keras.layers.Conv3D
|
81 |
+
regularizer = None
|
82 |
+
|
83 |
+
x = conv(filters=out_channels,
|
84 |
+
kernel_size=kernel_size,
|
85 |
+
strides=stride,
|
86 |
+
activation=None,
|
87 |
+
padding='same',
|
88 |
+
data_format='channels_last',
|
89 |
+
kernel_initializer=tf.compat.v1.glorot_uniform_initializer(seed=params.seed),
|
90 |
+
kernel_regularizer=regularizer,
|
91 |
+
bias_initializer=tf.compat.v1.zeros_initializer(),
|
92 |
+
bias_regularizer=regularizer)(x)
|
93 |
+
|
94 |
+
x = _normalization(x, normalization, mode)
|
95 |
+
|
96 |
+
return _activation(x, activation)
|
97 |
+
|
98 |
+
|
99 |
+
def upsample_block(x, skip_connection, out_channels, normalization, mode):
|
100 |
+
x = convolution(x, kernel_size=2, out_channels=out_channels, stride=2,
|
101 |
+
normalization='none', activation='none', transpose=True)
|
102 |
+
x = tf.keras.layers.Concatenate(axis=-1)([x, skip_connection])
|
103 |
+
|
104 |
+
x = convolution(x, out_channels=out_channels, normalization=normalization, mode=mode)
|
105 |
+
x = convolution(x, out_channels=out_channels, normalization=normalization, mode=mode)
|
106 |
+
return x
|
107 |
+
|
108 |
+
|
109 |
+
def input_block(x, out_channels, normalization, mode):
|
110 |
+
x = convolution(x, out_channels=out_channels, normalization=normalization, mode=mode)
|
111 |
+
x = convolution(x, out_channels=out_channels, normalization=normalization, mode=mode)
|
112 |
+
return x
|
113 |
+
|
114 |
+
|
115 |
+
def downsample_block(x, out_channels, normalization, mode):
|
116 |
+
x = convolution(x, out_channels=out_channels, normalization=normalization, mode=mode, stride=2)
|
117 |
+
return convolution(x, out_channels=out_channels, normalization=normalization, mode=mode)
|
118 |
+
|
119 |
+
|
120 |
+
def linear_block(x, out_channels, mode, activation='leaky_relu', normalization='none'):
|
121 |
+
x = convolution(x, out_channels=out_channels, normalization=normalization, mode=mode)
|
122 |
+
return convolution(x, out_channels=out_channels, activation=activation, mode=mode, normalization=normalization)
|
123 |
+
|
124 |
+
|
125 |
+
def output_layer(x, out_channels, activation):
|
126 |
+
x = tf.keras.layers.Conv3D(out_channels,
|
127 |
+
kernel_size=3,
|
128 |
+
activation=None,
|
129 |
+
padding='same',
|
130 |
+
kernel_regularizer=None,
|
131 |
+
kernel_initializer=tf.compat.v1.glorot_uniform_initializer(seed=params.seed),
|
132 |
+
bias_initializer=tf.compat.v1.zeros_initializer(),
|
133 |
+
bias_regularizer=None)(x)
|
134 |
+
return _activation(x, activation)
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/model/losses.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - script migration to Tensorflow 2.x version
|
19 |
+
|
20 |
+
import tensorflow as tf
|
21 |
+
|
22 |
+
|
23 |
+
def make_loss(params, y_true, y_pred):
|
24 |
+
if params.loss == 'dice':
|
25 |
+
return _dice(y_true, y_pred)
|
26 |
+
if params.loss == 'ce':
|
27 |
+
return _ce(y_true, y_pred)
|
28 |
+
if params.loss == 'dice+ce':
|
29 |
+
return tf.add(_ce(y_true, y_pred), _dice(y_true, y_pred), name="total_loss_ref")
|
30 |
+
|
31 |
+
raise ValueError('Unknown loss: {}'.format(params.loss))
|
32 |
+
|
33 |
+
|
34 |
+
def _ce(y_true, y_pred):
|
35 |
+
return tf.reduce_sum(
|
36 |
+
input_tensor=tf.reduce_mean(input_tensor=tf.keras.backend.binary_crossentropy(
|
37 |
+
tf.cast(y_true, tf.float32), y_pred), axis=[0, 1, 2, 3]),
|
38 |
+
name='crossentropy_loss_ref')
|
39 |
+
|
40 |
+
|
41 |
+
def _dice(y_true, y_pred):
|
42 |
+
return tf.reduce_sum(input_tensor=dice_loss(predictions=y_pred, targets=y_true), name='dice_loss_ref')
|
43 |
+
|
44 |
+
|
45 |
+
def eval_dice(y_true, y_pred):
|
46 |
+
return 1 - dice_loss(predictions=y_pred, targets=y_true)
|
47 |
+
|
48 |
+
|
49 |
+
def dice_loss(predictions,
|
50 |
+
targets,
|
51 |
+
squared_pred=False,
|
52 |
+
smooth=1e-5,
|
53 |
+
top_smooth=0.0):
|
54 |
+
is_channels_first = False
|
55 |
+
|
56 |
+
n_len = len(predictions.get_shape())
|
57 |
+
reduce_axis = list(range(2, n_len)) if is_channels_first else list(range(1, n_len - 1))
|
58 |
+
intersection = tf.reduce_sum(input_tensor=targets * predictions, axis=reduce_axis)
|
59 |
+
|
60 |
+
if squared_pred:
|
61 |
+
targets = tf.square(targets)
|
62 |
+
predictions = tf.square(predictions)
|
63 |
+
|
64 |
+
y_true_o = tf.reduce_sum(input_tensor=targets, axis=reduce_axis)
|
65 |
+
y_pred_o = tf.reduce_sum(input_tensor=predictions, axis=reduce_axis)
|
66 |
+
|
67 |
+
denominator = y_true_o + y_pred_o
|
68 |
+
|
69 |
+
f = (2.0 * intersection + top_smooth) / (denominator + smooth)
|
70 |
+
|
71 |
+
return 1 - tf.reduce_mean(input_tensor=f, axis=0)
|
72 |
+
|
73 |
+
|
74 |
+
def total_dice(predictions,
|
75 |
+
targets,
|
76 |
+
smooth=1e-5,
|
77 |
+
top_smooth=0.0):
|
78 |
+
n_len = len(predictions.get_shape())
|
79 |
+
reduce_axis = list(range(1, n_len-1))
|
80 |
+
targets = tf.reduce_sum(input_tensor=targets, axis=-1)
|
81 |
+
predictions = tf.reduce_sum(input_tensor=predictions, axis=-1)
|
82 |
+
intersection = tf.reduce_sum(input_tensor=targets * predictions, axis=reduce_axis)
|
83 |
+
|
84 |
+
y_true_o = tf.reduce_sum(input_tensor=targets, axis=reduce_axis)
|
85 |
+
y_pred_o = tf.reduce_sum(input_tensor=predictions, axis=reduce_axis)
|
86 |
+
|
87 |
+
denominator = y_true_o + y_pred_o
|
88 |
+
|
89 |
+
return tf.reduce_mean(input_tensor=(2.0 * intersection + top_smooth) / (denominator + smooth))
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/model/model_fn.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - script migration to Tensorflow 2.x version
|
19 |
+
# - added 'accuracy' metrics for consistency
|
20 |
+
# - added tensorboard loss logging
|
21 |
+
# - moved horovod import under use_horovod condition so that the user is not required to install this
|
22 |
+
# library when the model is being run on a single card
|
23 |
+
|
24 |
+
import os
|
25 |
+
|
26 |
+
import tensorflow as tf
|
27 |
+
|
28 |
+
from model.unet3d import Builder
|
29 |
+
from model.losses import make_loss, eval_dice, total_dice
|
30 |
+
from dataset.data_loader import CLASSES
|
31 |
+
|
32 |
+
|
33 |
+
def unet_3d(features, labels, mode, params):
|
34 |
+
|
35 |
+
logits = Builder(n_classes=4, normalization=params.normalization, mode=mode)(features)
|
36 |
+
|
37 |
+
if mode == tf.estimator.ModeKeys.PREDICT:
|
38 |
+
prediction = tf.argmax(input=logits, axis=-1, output_type=tf.dtypes.int32)
|
39 |
+
return tf.estimator.EstimatorSpec(mode=mode,
|
40 |
+
predictions={'predictions': tf.cast(prediction, tf.int8)})
|
41 |
+
|
42 |
+
labels = tf.cast(labels, tf.float32)
|
43 |
+
if not params.include_background:
|
44 |
+
labels = labels[..., 1:]
|
45 |
+
logits = logits[..., 1:]
|
46 |
+
|
47 |
+
if mode == tf.estimator.ModeKeys.EVAL:
|
48 |
+
eval_acc = eval_dice(y_true=labels, y_pred=tf.round(logits))
|
49 |
+
total_eval_acc = total_dice(tf.round(logits), labels)
|
50 |
+
metrics = {CLASSES[i]: tf.compat.v1.metrics.mean(eval_acc[i]) for i in range(eval_acc.shape[-1])}
|
51 |
+
metrics['WholeTumor'] = tf.compat.v1.metrics.mean(total_eval_acc)
|
52 |
+
metrics['accuracy'] = metrics['WholeTumor'] # for consistency
|
53 |
+
return tf.estimator.EstimatorSpec(mode=mode, loss=tf.reduce_mean(input_tensor=eval_acc),
|
54 |
+
eval_metric_ops=metrics)
|
55 |
+
|
56 |
+
loss = make_loss(params, y_pred=logits, y_true=labels)
|
57 |
+
loss = tf.identity(loss, name="total_loss_ref")
|
58 |
+
|
59 |
+
global_step = tf.compat.v1.train.get_or_create_global_step()
|
60 |
+
|
61 |
+
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=params.learning_rate)
|
62 |
+
if params.use_horovod:
|
63 |
+
import horovod.tensorflow as hvd
|
64 |
+
optimizer = hvd.DistributedOptimizer(optimizer)
|
65 |
+
|
66 |
+
# NGC has TF_ENABLE_AUTO_MIXED_PRECISION enabled by default. We cannot use
|
67 |
+
# both graph_rewrite and envar, so if we're not in NGC we do graph_rewrite
|
68 |
+
try:
|
69 |
+
amp_envar = int(os.environ['TF_ENABLE_AUTO_MIXED_PRECISION']) == 1
|
70 |
+
except KeyError:
|
71 |
+
amp_envar = False
|
72 |
+
|
73 |
+
if params.use_amp and not amp_envar:
|
74 |
+
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
|
75 |
+
optimizer,
|
76 |
+
loss_scale='dynamic'
|
77 |
+
)
|
78 |
+
|
79 |
+
with tf.control_dependencies(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)):
|
80 |
+
train_op = optimizer.minimize(loss, global_step=global_step)
|
81 |
+
|
82 |
+
training_hooks = []
|
83 |
+
if params.tensorboard_logging and (params.worker_id == 0 or params.log_all_workers):
|
84 |
+
tf.compat.v1.summary.scalar("loss", loss)
|
85 |
+
training_hooks += [tf.estimator.SummarySaverHook(params.log_every,
|
86 |
+
output_dir=params.log_dir, summary_op=tf.compat.v1.summary.merge_all())]
|
87 |
+
|
88 |
+
return tf.estimator.EstimatorSpec(
|
89 |
+
mode=mode, loss=loss, train_op=train_op, training_hooks=training_hooks)
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/model/unet3d.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from model.layers import downsample_block, upsample_block, output_layer, input_block
|
16 |
+
|
17 |
+
|
18 |
+
class Builder:
|
19 |
+
def __init__(self, n_classes, mode, normalization='none'):
|
20 |
+
self._n_classes = n_classes
|
21 |
+
self._mode = mode
|
22 |
+
self._normalization = normalization
|
23 |
+
|
24 |
+
def __call__(self, features):
|
25 |
+
skip_128 = input_block(x=features,
|
26 |
+
out_channels=32,
|
27 |
+
normalization=self._normalization,
|
28 |
+
mode=self._mode)
|
29 |
+
|
30 |
+
skip_64 = downsample_block(x=skip_128,
|
31 |
+
out_channels=64,
|
32 |
+
normalization=self._normalization,
|
33 |
+
mode=self._mode)
|
34 |
+
|
35 |
+
skip_32 = downsample_block(x=skip_64,
|
36 |
+
out_channels=128,
|
37 |
+
normalization=self._normalization,
|
38 |
+
mode=self._mode)
|
39 |
+
|
40 |
+
skip_16 = downsample_block(x=skip_32,
|
41 |
+
out_channels=256,
|
42 |
+
normalization=self._normalization,
|
43 |
+
mode=self._mode)
|
44 |
+
|
45 |
+
skip_8 = downsample_block(x=skip_16,
|
46 |
+
out_channels=320,
|
47 |
+
normalization=self._normalization,
|
48 |
+
mode=self._mode)
|
49 |
+
|
50 |
+
x = downsample_block(x=skip_8,
|
51 |
+
out_channels=320,
|
52 |
+
normalization=self._normalization,
|
53 |
+
mode=self._mode)
|
54 |
+
|
55 |
+
x = upsample_block(x, skip_8,
|
56 |
+
out_channels=320,
|
57 |
+
normalization=self._normalization,
|
58 |
+
mode=self._mode)
|
59 |
+
|
60 |
+
x = upsample_block(x, skip_16,
|
61 |
+
out_channels=256,
|
62 |
+
normalization=self._normalization,
|
63 |
+
mode=self._mode)
|
64 |
+
|
65 |
+
x = upsample_block(x, skip_32,
|
66 |
+
out_channels=128,
|
67 |
+
normalization=self._normalization,
|
68 |
+
mode=self._mode)
|
69 |
+
|
70 |
+
x = upsample_block(x, skip_64,
|
71 |
+
out_channels=64,
|
72 |
+
normalization=self._normalization,
|
73 |
+
mode=self._mode)
|
74 |
+
|
75 |
+
x = upsample_block(x, skip_128,
|
76 |
+
out_channels=32,
|
77 |
+
normalization=self._normalization,
|
78 |
+
mode=self._mode)
|
79 |
+
|
80 |
+
return output_layer(x=x,
|
81 |
+
out_channels=self._n_classes,
|
82 |
+
activation='softmax')
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/nvidia/dllogger@26a0f8f1958de2c0c460925ff6102a4d2486d6cc#egg=dllogger
|
2 |
+
munch==2.5.0
|
3 |
+
nibabel==3.2.1
|
4 |
+
PyYAML==6.0.0
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/runtime/arguments.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - default values for exec_mode, max_steps, log_dir, augment, use_xla, batch_size
|
19 |
+
# - added dtype, num_workers_per_hls, kubernetes_run, use_horovod, no-augment, no_xla,
|
20 |
+
# seed, no_hpu, tensorboard_logging, log_all_workers, tf_verbosity, bf16_config_path options,
|
21 |
+
# dump_config, synth_data, disable_ckpt_saving
|
22 |
+
# - included missing help for all available flags
|
23 |
+
# - parser has been wraper with Munch dict for more elegant parameter access
|
24 |
+
|
25 |
+
|
26 |
+
import os
|
27 |
+
import argparse
|
28 |
+
from pathlib import Path
|
29 |
+
|
30 |
+
from munch import Munch
|
31 |
+
|
32 |
+
|
33 |
+
def get_parser(description, distributed_launcher):
|
34 |
+
|
35 |
+
parser = argparse.ArgumentParser(description=description)
|
36 |
+
|
37 |
+
# Estimator flags
|
38 |
+
parser.add_argument('--model_dir', required=True, type=str,
|
39 |
+
help="""Output directory for information related to the model""")
|
40 |
+
parser.add_argument('--exec_mode', default="train_and_evaluate",
|
41 |
+
choices=['train', 'evaluate', 'train_and_evaluate', 'predict'], type=str,
|
42 |
+
help="""Execution mode of running the model""")
|
43 |
+
|
44 |
+
# Training flags
|
45 |
+
parser.add_argument('--benchmark', dest='benchmark', action='store_true', default=False,
|
46 |
+
help="""Collect performance metrics during training""")
|
47 |
+
parser.add_argument('--max_steps', default=16000, type=int,
|
48 |
+
help="""Maximum number of steps used for training""")
|
49 |
+
parser.add_argument('--learning_rate', default=0.0002, type=float,
|
50 |
+
help="""Learning rate coefficient for AdamOptimizer""")
|
51 |
+
parser.add_argument('--log_every', default=100, type=int,
|
52 |
+
help="""Log data every n steps""")
|
53 |
+
parser.add_argument('--log_dir', default="/tmp/unet3d_logs", type=str,
|
54 |
+
help="""Output directory for logs""")
|
55 |
+
parser.add_argument('--loss', choices=['dice', 'ce', 'dice+ce'], default='dice+ce', type=str,
|
56 |
+
help="""Loss function to be used during training""")
|
57 |
+
parser.add_argument('--warmup_steps', default=40, type=int,
|
58 |
+
help="""Number of warmup steps""")
|
59 |
+
parser.add_argument('--normalization', choices=['instancenorm', 'batchnorm', 'groupnorm'],
|
60 |
+
default='instancenorm', type=str,
|
61 |
+
help="""Normalization block to be applied in the model""")
|
62 |
+
parser.add_argument('--include_background', dest='include_background', action='store_true', default=False,
|
63 |
+
help="""Include background both in preditions and labels""")
|
64 |
+
parser.add_argument('--resume_training', dest='resume_training', action='store_true', default=False,
|
65 |
+
help="""Resume training from a checkpoint""")
|
66 |
+
parser.add_argument('--num_workers_per_hls', dest='num_workers_per_hls', type=int, default=1,
|
67 |
+
help="""Number of workers for single HLS""" if distributed_launcher else argparse.SUPPRESS) # ignored by main.py
|
68 |
+
parser.add_argument("--hls_type", default="HLS1", type=str,
|
69 |
+
help="Type of HLS" if distributed_launcher else argparse.SUPPRESS) # ignored by main.py
|
70 |
+
parser.add_argument("--kubernetes_run", default=False, type=bool,
|
71 |
+
help="Kubernetes run" if distributed_launcher else argparse.SUPPRESS) # ignored by main.py
|
72 |
+
parser.add_argument('--use_horovod', dest='use_horovod', action='store_true',
|
73 |
+
help="""Enable horovod usage""")
|
74 |
+
|
75 |
+
# Augmentations
|
76 |
+
parser.add_argument('--augment', dest='augment', action='store_true',
|
77 |
+
help="""Perform data augmentation during training""")
|
78 |
+
parser.add_argument('--no-augment', dest='augment', action='store_false')
|
79 |
+
parser.set_defaults(augment=True)
|
80 |
+
|
81 |
+
# Dataset flags
|
82 |
+
parser.add_argument('--data_dir', required=True, type=str,
|
83 |
+
help="""Input directory containing the dataset for training the model""")
|
84 |
+
parser.add_argument('--batch_size', default=2, type=int,
|
85 |
+
help="""Size of each minibatch per device""")
|
86 |
+
parser.add_argument('--fold', default=0, type=int,
|
87 |
+
help="""Chosen fold for cross-validation""")
|
88 |
+
parser.add_argument('--num_folds', default=5, type=int,
|
89 |
+
help="""Number of folds in k-cross-validation of dataset""")
|
90 |
+
|
91 |
+
# Tensorflow configuration flags
|
92 |
+
parser.add_argument('--dtype', '-d', type=str, choices=['fp32', 'bf16'], default='bf16',
|
93 |
+
help='Data type for HPU: fp32 or bf16')
|
94 |
+
parser.add_argument('--use_amp', '--amp', dest='use_amp', action='store_true', default=False,
|
95 |
+
help="""Train using TF-AMP for GPU/CPU""")
|
96 |
+
parser.add_argument('--use_xla', '--xla', dest='use_xla', action='store_true',
|
97 |
+
help="""Train using XLA""")
|
98 |
+
parser.add_argument('--no_xla', dest='use_xla', action='store_false')
|
99 |
+
parser.set_defaults(use_xla=True)
|
100 |
+
|
101 |
+
parser.add_argument('--seed', default=None, type=int,
|
102 |
+
help="""Random seed""")
|
103 |
+
parser.add_argument('--dump_config', type=str, default=None,
|
104 |
+
help="""Directory for dumping debug traces""")
|
105 |
+
parser.add_argument('--synth_data', dest='synth_data', action='store_true',
|
106 |
+
help="""Use deterministic and synthetic data""")
|
107 |
+
parser.add_argument('--disable_ckpt_saving', dest='disable_ckpt_saving', action='store_true',
|
108 |
+
help="""Disables saving checkpoints""")
|
109 |
+
parser.add_argument('--no_hpu', dest='no_hpu', action='store_true',
|
110 |
+
help="""Do not load Habana modules. Train the model on CPU/GPU""")
|
111 |
+
parser.add_argument('--tensorboard_logging', dest='tensorboard_logging', action='store_true',
|
112 |
+
help="""Enable tensorboard logging""")
|
113 |
+
parser.add_argument('--log_all_workers', dest='log_all_workers', action='store_true',
|
114 |
+
help="""Enable logging data for every worker in a separate directory named `worker_N`""")
|
115 |
+
parser.add_argument('--tf_verbosity', dest='tf_verbosity', type=int, choices=[0, 1, 2, 3],
|
116 |
+
help="""Logging level from Tensorflow.
|
117 |
+
0 = all messages are logged (default behavior)
|
118 |
+
1 = INFO messages are not printed
|
119 |
+
2 = INFO and WARNING messages are not printed
|
120 |
+
3 = INFO, WARNING, and ERROR messages are not printed""")
|
121 |
+
DEFAULT_BF16_CONFIG_PATH = os.fspath(Path(os.path.realpath(
|
122 |
+
__file__)).parents[1].joinpath("bf16_config/unet.json"))
|
123 |
+
parser.add_argument('--bf16_config_path', metavar='</path/to/custom/bf16/config>', required=False, type=str, default=DEFAULT_BF16_CONFIG_PATH,
|
124 |
+
help=f'Path to custom mixed precision config to use given in JSON format. Defaults to {DEFAULT_BF16_CONFIG_PATH}')
|
125 |
+
|
126 |
+
return parser
|
127 |
+
|
128 |
+
|
129 |
+
def parse_args(description="UNet-3D", distributed_launcher=False):
|
130 |
+
flags = get_parser(description, distributed_launcher).parse_args()
|
131 |
+
return Munch({
|
132 |
+
'model_dir': flags.model_dir,
|
133 |
+
'exec_mode': flags.exec_mode,
|
134 |
+
'benchmark': flags.benchmark,
|
135 |
+
'max_steps': flags.max_steps,
|
136 |
+
'learning_rate': flags.learning_rate,
|
137 |
+
'log_every': flags.log_every,
|
138 |
+
'log_dir': flags.log_dir,
|
139 |
+
'loss': flags.loss,
|
140 |
+
'warmup_steps': flags.warmup_steps,
|
141 |
+
'normalization': flags.normalization,
|
142 |
+
'include_background': flags.include_background,
|
143 |
+
'resume_training': flags.resume_training,
|
144 |
+
'augment': flags.augment,
|
145 |
+
'data_dir': flags.data_dir,
|
146 |
+
'batch_size': flags.batch_size,
|
147 |
+
'fold': flags.fold,
|
148 |
+
'num_folds': flags.num_folds,
|
149 |
+
'use_amp': flags.use_amp,
|
150 |
+
'use_xla': flags.use_xla,
|
151 |
+
'dtype': flags.dtype,
|
152 |
+
'precision': flags.dtype,
|
153 |
+
'num_workers_per_hls': flags.num_workers_per_hls,
|
154 |
+
'hls_type': flags.hls_type,
|
155 |
+
'kubernetes_run': flags.kubernetes_run,
|
156 |
+
'use_horovod': flags.use_horovod,
|
157 |
+
'seed': flags.seed,
|
158 |
+
'no_hpu': flags.no_hpu,
|
159 |
+
'tensorboard_logging': flags.tensorboard_logging,
|
160 |
+
'log_all_workers': flags.log_all_workers,
|
161 |
+
'tf_verbosity': flags.tf_verbosity,
|
162 |
+
'bf16_config_path': flags.bf16_config_path,
|
163 |
+
'dump_config': flags.dump_config,
|
164 |
+
'synth_data': flags.synth_data,
|
165 |
+
'disable_ckpt_saving': flags.disable_ckpt_saving,
|
166 |
+
})
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/runtime/hooks.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - added tensorboard logging possibility
|
19 |
+
# - added performance parameters logging during training
|
20 |
+
# - added possibility to log data from every horovod worker
|
21 |
+
# - moved horovod import under use_horovod condition so that the user is not required to install this
|
22 |
+
# library when the model is being run on a single card
|
23 |
+
|
24 |
+
import time
|
25 |
+
|
26 |
+
import numpy as np
|
27 |
+
import tensorflow as tf
|
28 |
+
|
29 |
+
|
30 |
+
def get_hooks(params, logger):
|
31 |
+
if 'train' in params.exec_mode:
|
32 |
+
hooks = []
|
33 |
+
if params.use_horovod:
|
34 |
+
import horovod.tensorflow as hvd
|
35 |
+
hooks += [hvd.BroadcastGlobalVariablesHook(0)]
|
36 |
+
if params.worker_id == 0 or params.log_all_workers:
|
37 |
+
if params.benchmark:
|
38 |
+
hooks += [ProfilingHook(warmup_steps=params.warmup_steps,
|
39 |
+
global_batch_size=params.num_workers * params.batch_size,
|
40 |
+
logger=logger,
|
41 |
+
mode='train')]
|
42 |
+
else:
|
43 |
+
hooks += [TrainingHook(params,
|
44 |
+
logger=logger,
|
45 |
+
tensor_names=['total_loss_ref:0'])]
|
46 |
+
if params.tensorboard_logging:
|
47 |
+
from TensorFlow.common.tb_utils import ExamplesPerSecondEstimatorHook
|
48 |
+
hooks += [ExamplesPerSecondEstimatorHook(params.batch_size,
|
49 |
+
params.log_every,
|
50 |
+
output_dir=params.log_dir)]
|
51 |
+
return hooks
|
52 |
+
|
53 |
+
elif 'predict' == params.exec_mode:
|
54 |
+
hooks = []
|
55 |
+
if params.worker_id == 0:
|
56 |
+
if params.benchmark:
|
57 |
+
hooks += [ProfilingHook(warmup_steps=params.warmup_steps,
|
58 |
+
global_batch_size=params.batch_size,
|
59 |
+
logger=logger,
|
60 |
+
mode='test')]
|
61 |
+
return hooks
|
62 |
+
|
63 |
+
|
64 |
+
class ProfilingHook(tf.estimator.SessionRunHook):
|
65 |
+
def __init__(self, warmup_steps, global_batch_size, logger, mode):
|
66 |
+
self._warmup_steps = warmup_steps
|
67 |
+
self._global_batch_size = global_batch_size
|
68 |
+
self._step = 0
|
69 |
+
self._timestamps = []
|
70 |
+
self._logger = logger
|
71 |
+
self._mode = mode
|
72 |
+
|
73 |
+
def before_run(self, run_context):
|
74 |
+
self._step += 1
|
75 |
+
if self._step >= self._warmup_steps:
|
76 |
+
self._timestamps.append(time.time())
|
77 |
+
|
78 |
+
def end(self, session):
|
79 |
+
deltas = np.array([self._timestamps[i + 1] - self._timestamps[i]
|
80 |
+
for i in range(len(self._timestamps) - 1)])
|
81 |
+
stats = process_performance_stats(np.array(deltas),
|
82 |
+
self._global_batch_size,
|
83 |
+
self._mode)
|
84 |
+
|
85 |
+
self._logger.log(step=(), data={metric: float(
|
86 |
+
value) for (metric, value) in stats})
|
87 |
+
self._logger.flush()
|
88 |
+
|
89 |
+
|
90 |
+
class TrainingHook(tf.estimator.SessionRunHook):
|
91 |
+
def __init__(self, params, logger, tensor_names):
|
92 |
+
self._params = params
|
93 |
+
self._step = 0
|
94 |
+
self._timestamp = time.time()
|
95 |
+
self._logger = logger
|
96 |
+
self._tensor_names = tensor_names
|
97 |
+
|
98 |
+
def before_run(self, run_context):
|
99 |
+
run_args = tf.estimator.SessionRunArgs(
|
100 |
+
fetches=self._tensor_names
|
101 |
+
)
|
102 |
+
|
103 |
+
return run_args
|
104 |
+
|
105 |
+
def after_run(self,
|
106 |
+
run_context,
|
107 |
+
run_values):
|
108 |
+
if self._step % self._params.log_every == 0:
|
109 |
+
duration = float(time.time() - self._timestamp) / \
|
110 |
+
self._params.log_every
|
111 |
+
self._timestamp = time.time()
|
112 |
+
data = {}
|
113 |
+
for i in range(len(self._tensor_names)):
|
114 |
+
data[self._tensor_names[i]] = str(run_values.results[i])
|
115 |
+
data["iter duration [ms]"] = 1000 * duration
|
116 |
+
data["examples/sec"] = self._params.batch_size / duration
|
117 |
+
self._logger.log(
|
118 |
+
step=(self._step, self._params.max_steps), data=data)
|
119 |
+
self._step += 1
|
120 |
+
|
121 |
+
def end(self, session):
|
122 |
+
self._logger.flush()
|
123 |
+
|
124 |
+
|
125 |
+
def process_performance_stats(timestamps, batch_size, mode):
|
126 |
+
timestamps_ms = 1000 * timestamps
|
127 |
+
latency_ms = timestamps_ms.mean()
|
128 |
+
std = timestamps_ms.std()
|
129 |
+
n = np.sqrt(len(timestamps_ms))
|
130 |
+
throughput_imgps = (1000.0 * batch_size / timestamps_ms).mean()
|
131 |
+
|
132 |
+
stats = [("throughput_{}".format(mode), str(throughput_imgps)),
|
133 |
+
('latency_{}:'.format(mode), str(latency_ms))]
|
134 |
+
for ci, lvl in zip(["90%:", "95%:", "99%:"],
|
135 |
+
[1.645, 1.960, 2.576]):
|
136 |
+
stats.append(("Latency_{} ".format(mode) + ci,
|
137 |
+
str(latency_ms + lvl * std / n)))
|
138 |
+
return stats
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/runtime/parse_results.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import os
|
16 |
+
import argparse
|
17 |
+
|
18 |
+
|
19 |
+
def parse_convergence_results(path, environment):
|
20 |
+
whole_tumor = []
|
21 |
+
tumor_core = []
|
22 |
+
peritumoral_edema = []
|
23 |
+
enhancing_tumor = []
|
24 |
+
mean_dice = []
|
25 |
+
logfiles = [f for f in os.listdir(path) if "log" in f and environment in f]
|
26 |
+
if not logfiles:
|
27 |
+
raise FileNotFoundError("No logfile found at {}".format(path))
|
28 |
+
for logfile in logfiles:
|
29 |
+
with open(os.path.join(path, logfile), "r") as f:
|
30 |
+
content = f.readlines()
|
31 |
+
if "TumorCore" not in content[-1]:
|
32 |
+
print("Evaluation score not found. The file", logfile, "might be corrupted.")
|
33 |
+
continue
|
34 |
+
content = content[-1].split("()")[1]
|
35 |
+
whole_tumor.append(float([val for val in content.split(" ")
|
36 |
+
if "WholeTumor" in val][0].split()[-1]))
|
37 |
+
tumor_core.append(float([val for val in content.split(" ")
|
38 |
+
if "TumorCore" in val][0].split()[-1]))
|
39 |
+
peritumoral_edema.append(float([val for val in content.split(" ")
|
40 |
+
if "PeritumoralEdema" in val][0].split()[-1]))
|
41 |
+
enhancing_tumor.append(float([val for val in content.split(" ")
|
42 |
+
if "EnhancingTumor" in val][0].split()[-1]))
|
43 |
+
mean_dice.append(float([val for val in content.split(" ")
|
44 |
+
if "MeanDice" in val][0].split()[-1]))
|
45 |
+
|
46 |
+
if whole_tumor:
|
47 |
+
print("Evaluation average dice score:", sum(mean_dice) / len(mean_dice))
|
48 |
+
print("Evaluation whole tumor dice score:", sum(whole_tumor) / len(whole_tumor))
|
49 |
+
print("Evaluation tumor core dice score:", sum(tumor_core) / len(tumor_core))
|
50 |
+
print("Evaluation peritumoral edema dice score:", sum(peritumoral_edema) / len(peritumoral_edema))
|
51 |
+
print("Evaluation enhancing tumor dice score:", sum(enhancing_tumor) / len(enhancing_tumor))
|
52 |
+
else:
|
53 |
+
print("All logfiles were corrupted, no loss was obtained.")
|
54 |
+
|
55 |
+
|
56 |
+
if __name__ == '__main__':
|
57 |
+
parser = argparse.ArgumentParser()
|
58 |
+
parser.add_argument('--model_dir',
|
59 |
+
type=str,
|
60 |
+
required=True)
|
61 |
+
parser.add_argument('--env',
|
62 |
+
type=str,
|
63 |
+
required=True)
|
64 |
+
|
65 |
+
args = parser.parse_args()
|
66 |
+
parse_convergence_results(path=args.model_dir, environment=args.env)
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/UNet3D/runtime/setup.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2021 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - script migration to Tensorflow 2.x version
|
19 |
+
# - added HPU custom setup functions
|
20 |
+
# - added possibility to handle non-HPU runs
|
21 |
+
# - added possibility to log data from every horovod worker
|
22 |
+
# - added possibility to disable checkpoint saving
|
23 |
+
# - moved horovod import under use_horovod condition so that the user is not required to install this
|
24 |
+
# library when the model is being run on a single card
|
25 |
+
# - stopped overriding model_dir path with additional `model_checkpoint` directory
|
26 |
+
|
27 |
+
import os
|
28 |
+
|
29 |
+
import dllogger as logger
|
30 |
+
import tensorflow as tf
|
31 |
+
from dllogger import StdOutBackend, Verbosity, JSONStreamBackend
|
32 |
+
|
33 |
+
from model.model_fn import unet_3d
|
34 |
+
|
35 |
+
|
36 |
+
def setup_horovod(params):
|
37 |
+
params.worker_id = 0
|
38 |
+
params.num_workers = 1
|
39 |
+
if params.use_horovod:
|
40 |
+
import horovod.tensorflow as hvd
|
41 |
+
if hvd is None:
|
42 |
+
raise RuntimeError(
|
43 |
+
"Problem encountered during Horovod import. Please make sure that habana-horovod package is installed.")
|
44 |
+
hvd.init()
|
45 |
+
if params.no_hpu:
|
46 |
+
# Horovod on GPU
|
47 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = str(hvd.local_rank())
|
48 |
+
params.worker_id = hvd.rank()
|
49 |
+
params.num_workers = hvd.size()
|
50 |
+
if params.log_all_workers:
|
51 |
+
params.log_dir = os.path.join(params.log_dir, f'worker_{params.worker_id}')
|
52 |
+
params.model_dir = os.path.join(params.model_dir, f'worker_{params.worker_id}')
|
53 |
+
|
54 |
+
return params
|
55 |
+
|
56 |
+
|
57 |
+
def set_flags(params):
|
58 |
+
if params.tf_verbosity:
|
59 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = params.tf_verbosity
|
60 |
+
|
61 |
+
if params.no_hpu:
|
62 |
+
os.environ['CUDA_CACHE_DISABLE'] = '1'
|
63 |
+
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
|
64 |
+
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
|
65 |
+
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0'
|
66 |
+
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
|
67 |
+
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
|
68 |
+
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
|
69 |
+
os.environ['TF_SYNC_ON_FINISH'] = '0'
|
70 |
+
else:
|
71 |
+
from habana_frameworks.tensorflow import load_habana_module
|
72 |
+
load_habana_module()
|
73 |
+
if params.dtype == 'bf16':
|
74 |
+
os.environ['TF_BF16_CONVERSION'] = params.bf16_config_path
|
75 |
+
|
76 |
+
|
77 |
+
def prepare_model_dir(params):
|
78 |
+
model_dir = params.model_dir
|
79 |
+
model_dir = model_dir if ((params.worker_id == 0 or params.log_all_workers) and not params.benchmark) else None
|
80 |
+
if model_dir is not None:
|
81 |
+
os.makedirs(model_dir, exist_ok=True)
|
82 |
+
if ('train' in params.exec_mode) and (not params.resume_training):
|
83 |
+
os.system('rm -rf {}/*'.format(model_dir))
|
84 |
+
|
85 |
+
return model_dir
|
86 |
+
|
87 |
+
|
88 |
+
def build_estimator(params, model_dir):
|
89 |
+
config = tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(), allow_soft_placement=True)
|
90 |
+
|
91 |
+
if params.use_xla:
|
92 |
+
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
|
93 |
+
|
94 |
+
config.gpu_options.allow_growth = True
|
95 |
+
config.gpu_options.visible_device_list = os.getenv('CUDA_VISIBLE_DEVICES', '0')
|
96 |
+
|
97 |
+
if params.use_amp:
|
98 |
+
config.graph_options.rewrite_options.auto_mixed_precision = 1
|
99 |
+
|
100 |
+
checkpoint_steps = (params.max_steps // params.num_workers) if params.worker_id == 0 else None
|
101 |
+
checkpoint_steps = checkpoint_steps if not (params.benchmark or params.disable_ckpt_saving) else None
|
102 |
+
|
103 |
+
run_config = tf.estimator.RunConfig(
|
104 |
+
save_summary_steps=params.max_steps,
|
105 |
+
session_config=config,
|
106 |
+
save_checkpoints_steps=checkpoint_steps,
|
107 |
+
save_checkpoints_secs=None,
|
108 |
+
keep_checkpoint_max=1)
|
109 |
+
|
110 |
+
return tf.estimator.Estimator(
|
111 |
+
model_fn=unet_3d,
|
112 |
+
model_dir=model_dir,
|
113 |
+
config=run_config,
|
114 |
+
params=params)
|
115 |
+
|
116 |
+
|
117 |
+
def get_logger(params):
|
118 |
+
backends = []
|
119 |
+
if params.worker_id == 0 or params.log_all_workers:
|
120 |
+
backends += [StdOutBackend(Verbosity.VERBOSE)]
|
121 |
+
if params.log_dir:
|
122 |
+
os.makedirs(params.log_dir, exist_ok=True)
|
123 |
+
log_file = f"{params.log_dir}/log.json"
|
124 |
+
backends += [JSONStreamBackend(Verbosity.VERBOSE, log_file)]
|
125 |
+
logger.init(backends=backends)
|
126 |
+
return logger
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
data/*
|
2 |
+
logs/*
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/LICENSE
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2021 Habana Labs, Ltd. an Intel Company
|
2 |
+
|
3 |
+
BSD 3-Clause License
|
4 |
+
|
5 |
+
Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
6 |
+
All rights reserved.
|
7 |
+
|
8 |
+
Redistribution and use in source and binary forms, with or without
|
9 |
+
modification, are permitted provided that the following conditions are met:
|
10 |
+
|
11 |
+
* Redistributions of source code must retain the above copyright notice, this
|
12 |
+
list of conditions and the following disclaimer.
|
13 |
+
|
14 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
15 |
+
this list of conditions and the following disclaimer in the documentation
|
16 |
+
and/or other materials provided with the distribution.
|
17 |
+
|
18 |
+
* Neither the name of the copyright holder nor the names of its
|
19 |
+
contributors may be used to endorse or promote products derived from
|
20 |
+
this software without specific prior written permission.
|
21 |
+
|
22 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
23 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
24 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
25 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
26 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
27 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
28 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
29 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
30 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
31 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/NOTICE
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
|
3 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
you may not use this file except in compliance with the License.
|
5 |
+
You may obtain a copy of the License at
|
6 |
+
|
7 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
|
9 |
+
Unless required by applicable law or agreed to in writing, software
|
10 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
See the License for the specific language governing permissions and
|
13 |
+
limitations under the License.
|
14 |
+
|
15 |
+
Ths repository includes software from:
|
16 |
+
* TensorFlow, (https://github.com/tensorflow/tensorflow) licensed
|
17 |
+
under the Apache License, Version 2.0
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/README.md
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# UNet2D for TensorFlow 2
|
2 |
+
|
3 |
+
This directory provides a script and recipe to train a UNet2D Medical model to achieve state of the art accuracy, and is tested and maintained by Habana.
|
4 |
+
For further information on performance, refer to [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-training-models/#performance).
|
5 |
+
|
6 |
+
For further information on training deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/).
|
7 |
+
|
8 |
+
## Table of Contents
|
9 |
+
|
10 |
+
* [Model-References](../../../README.md)
|
11 |
+
* [Model overview](#model-overview)
|
12 |
+
* [Setup](#setup)
|
13 |
+
* [Training and Examples](#training-and-examples)
|
14 |
+
* [Advanced](#Advanced)
|
15 |
+
* [Supported Configuration](#supported-configuration)
|
16 |
+
* [Changelog](#changelog)
|
17 |
+
|
18 |
+
## Model Overview
|
19 |
+
|
20 |
+
This directory describes how to train UNet Medical model for 2D Segmentation on Intel® Gaudi® AI Accelerator (HPU). The UNet Medical model is a modified version of the original model located in [NVIDIA UNet Medical Image Segmentation for TensorFlow 2.x](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Segmentation/UNet_Medical). The implementation provided covers UNet model as described in the original [UNet: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597) paper.
|
21 |
+
|
22 |
+
### Model Architecture
|
23 |
+
|
24 |
+
The UNet model allows seamless segmentation of 2D images with high accuracy and performance, and can be adapted to solve many different segmentation issues.
|
25 |
+
|
26 |
+
The following figure shows the construction of the UNet model and its components. A UNet is composed of a contractive and an expanding path, that aims at building a bottleneck in its centermost part through a combination of convolution and pooling operations. After this bottleneck, the image is reconstructed through a combination of convolutions and upsampling. Skip connections are added with the goal of helping the backward flow of gradients in order to improve the training.
|
27 |
+
|
28 |
+

|
29 |
+
Figure 1. The architecture of a UNet model from [UNet: Convolutional Networks for Biomedical Image Segmentation paper](https://arxiv.org/abs/1505.04597).
|
30 |
+
|
31 |
+
### Model Changes
|
32 |
+
|
33 |
+
The below lists the major changes applied to the model:
|
34 |
+
|
35 |
+
* Removed GPU specific configurations.
|
36 |
+
* Changed some scripts to run the model on Gaudi. This includes loading Habana TensorFlow modules and using multiple Gaudi cards helpers.
|
37 |
+
* Added support for using bfloat16 precision instead of float16.
|
38 |
+
* Replaced tf.keras.activations.softmax with tf.nn.softmax due to performance issues described in https://github.com/tensorflow/tensorflow/pull/47572;
|
39 |
+
* Added further TensorBoard and performance logging options.
|
40 |
+
* Removed GPU specific files (examples/*, Dockerfile etc.) and some unused codes.
|
41 |
+
* Enabled the tf.data.experimental.prefetch_to_device for HPU device to improve performance.
|
42 |
+
|
43 |
+
### Default Configuration
|
44 |
+
|
45 |
+
- Execution mode: train and evaluate
|
46 |
+
- Batch size: 8
|
47 |
+
- Data type: bfloat16
|
48 |
+
- Maximum number of steps: 6400
|
49 |
+
- Weight decay: 0.0005
|
50 |
+
- Learning rate: 0.0001
|
51 |
+
- Number of Horovod workers (HPUs): 1
|
52 |
+
- Data augmentation: True
|
53 |
+
- Cross-validation: disabled
|
54 |
+
- Using XLA: False
|
55 |
+
- Logging losses and performance every N steps: 100
|
56 |
+
|
57 |
+
## Setup
|
58 |
+
|
59 |
+
Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/GAUDI_Installation_Guide.html) to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/TensorFlow/Model_Optimization_TensorFlow/Optimization_Training_Platform.html).
|
60 |
+
The guides will walk you through the process of setting up your system to run the model on Gaudi.
|
61 |
+
|
62 |
+
|
63 |
+
### Clone Habana Model-References
|
64 |
+
|
65 |
+
In the docker container, clone this repository and switch to the branch that matches your SynapseAI version. You can run the [`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) utility to determine the SynapseAI version.
|
66 |
+
|
67 |
+
```bash
|
68 |
+
git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References /root/Model-References
|
69 |
+
```
|
70 |
+
|
71 |
+
**Note:** If Model-References repository path is not in the PYTHONPATH, make sure you update it:
|
72 |
+
```bash
|
73 |
+
export PYTHONPATH=$PYTHONPATH:/root/to/Model-References
|
74 |
+
```
|
75 |
+
|
76 |
+
### Install Model Requirements
|
77 |
+
|
78 |
+
1. In the docker container, go to the UNet2D directory:
|
79 |
+
|
80 |
+
```bash
|
81 |
+
cd /root/Model-References/TensorFlow/computer_vision/Unet2D
|
82 |
+
```
|
83 |
+
2. Install the required packages using pip:
|
84 |
+
|
85 |
+
```bash
|
86 |
+
$PYTHON -m pip install -r requirements.txt
|
87 |
+
```
|
88 |
+
|
89 |
+
### Download the Dataset
|
90 |
+
|
91 |
+
Download the [EM segmentation challenge dataset](http://brainiac2.mit.edu/isbi_challenge/home)*:
|
92 |
+
|
93 |
+
```bash
|
94 |
+
$PYTHON download_dataset.py
|
95 |
+
```
|
96 |
+
|
97 |
+
By default, it will download the dataset to `./data` path. Use `--data_dir <path>` to change it.
|
98 |
+
|
99 |
+
**NOTE:** If the original location is unavailable, the dataset is also mirrored on [Kaggle](https://www.kaggle.com/soumikrakshit/isbi-challenge-dataset). Registration is required.
|
100 |
+
|
101 |
+
## Training and Examples
|
102 |
+
|
103 |
+
### Single Card and Multi-Card Training Examples
|
104 |
+
|
105 |
+
**Run training on 1 HPU:**
|
106 |
+
|
107 |
+
```bash
|
108 |
+
$PYTHON unet2d.py --data_dir <path/to/dataset> --batch_size <batch_size> \
|
109 |
+
--dtype <precision> --model_dir <path/to/model_dir> --fold <fold>
|
110 |
+
```
|
111 |
+
|
112 |
+
- 1 HPU training with batch size 8, bfloat16 precision and fold 0:
|
113 |
+
|
114 |
+
```bash
|
115 |
+
$PYTHON unet2d.py --data_dir /data/tensorflow/unet2d --batch_size 8 --dtype bf16 --model_dir /tmp/unet2d_1_hpu --fold 0 --tensorboard_logging
|
116 |
+
```
|
117 |
+
|
118 |
+
- 1 HPU training with batch size 8, float32 precision and fold 0:
|
119 |
+
|
120 |
+
```bash
|
121 |
+
$PYTHON unet2d.py --data_dir /data/tensorflow/unet2d --batch_size 8 --dtype fp32 --model_dir /tmp/unet2d_1_hpu --fold 0 --tensorboard_logging
|
122 |
+
```
|
123 |
+
|
124 |
+
**Run training on 8 HPUs:**
|
125 |
+
|
126 |
+
Running the script via mpirun requires`--use_horovod` argument, and the mpirun prefix with several parameters.
|
127 |
+
|
128 |
+
**NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/TensorFlow/Tensorflow_Scaling_Guide/Horovod_Scaling/index.html#mpirun-configuration).
|
129 |
+
|
130 |
+
```bash
|
131 |
+
mpirun --allow-run-as-root --bind-to core --map-by socket:PE=6 -np 8 \
|
132 |
+
$PYTHON unet2d.py --data_dir <path/to/dataset> --batch_size <batch_size> \
|
133 |
+
--dtype <precision> --model_dir <path/to/model_dir> --fold <fold> --use_horovod
|
134 |
+
```
|
135 |
+
- 8 HPUs training with batch size 8, bfloat16 precision and fold 0:
|
136 |
+
|
137 |
+
```bash
|
138 |
+
mpirun --allow-run-as-root --tag-output --merge-stderr-to-stdout --bind-to core --map-by socket:PE=6 -np 8 \
|
139 |
+
$PYTHON unet2d.py --data_dir /data/tensorflow/unet2d/ --batch_size 8 \
|
140 |
+
--dtype bf16 --model_dir /tmp/unet2d_8_hpus --fold 0 --tensorboard_logging --log_all_workers --use_horovod
|
141 |
+
```
|
142 |
+
- 8 HPUs training with batch size 8, float32 precision and fold 0:
|
143 |
+
|
144 |
+
```bash
|
145 |
+
mpirun --allow-run-as-root --tag-output --merge-stderr-to-stdout --bind-to core --map-by socket:PE=6 -np 8 \
|
146 |
+
$PYTHON unet2d.py --data_dir /data/tensorflow/unet2d/ --batch_size 8 \
|
147 |
+
--dtype fp32 --model_dir /tmp/unet2d_8_hpus --fold 0 --tensorboard_logging --log_all_workers --use_horovod
|
148 |
+
```
|
149 |
+
|
150 |
+
**Run 5-fold Cross-Validation and compute average dice score:**
|
151 |
+
|
152 |
+
All the commands described above train and evaluate the model on the dataset with fold 0. To perform 5-fold-cross-validation on the dataset and compute average dice score across 5 folds, you can execute training the script 5 times and calculate the average dice score manually or run bash script `train_and_evaluate.sh`:
|
153 |
+
|
154 |
+
```bash
|
155 |
+
bash train_and_evaluate.sh <path/to/dataset> <path/for/results> <batch_size> <precision> <number_of_HPUs>
|
156 |
+
```
|
157 |
+
|
158 |
+
- 1 HPU 5-fold-cross-validation training with batch size 8 and bfloat16 precision:
|
159 |
+
|
160 |
+
```bash
|
161 |
+
bash train_and_evaluate.sh /data/tensorflow/unet2d/ /tmp/unet2d_1_hpu 8 bf16 1
|
162 |
+
```
|
163 |
+
|
164 |
+
- 1 HPU 5-fold-cross-validation training with batch size 8 and float32 precision:
|
165 |
+
|
166 |
+
```bash
|
167 |
+
bash train_and_evaluate.sh /data/tensorflow/unet2d/ /tmp/unet2d_1_hpu 8 fp32 1
|
168 |
+
```
|
169 |
+
|
170 |
+
- 8 HPUs 5-fold-cross-validation training with batch size 8 and bfloat16 precision:
|
171 |
+
|
172 |
+
```bash
|
173 |
+
bash train_and_evaluate.sh /data/tensorflow/unet2d/ /tmp/unet2d_8_hpus 8 bf16 8
|
174 |
+
```
|
175 |
+
|
176 |
+
- 8 HPUs 5-fold-cross-validation training with batch size 8 and float32 precision:
|
177 |
+
|
178 |
+
```bash
|
179 |
+
bash train_and_evaluate.sh /data/tensorflow/unet2d/ /tmp/unet2d_8_hpus 8 fp32 8
|
180 |
+
```
|
181 |
+
|
182 |
+
## Advanced
|
183 |
+
|
184 |
+
The following sections provide further details on the scripts in the directory, available parameters and command-line options.
|
185 |
+
|
186 |
+
### Scripts Definitions
|
187 |
+
|
188 |
+
* `unet2d.py`: The training script of the UNet2D model, entry point to the application.
|
189 |
+
* `download_dataset.py`: Script for downloading dataset.
|
190 |
+
* `data_loading/data_loader.py`: Implements the data loading and augmentation.
|
191 |
+
* `model/layers.py`: Defines the different blocks that are used to assemble UNet.
|
192 |
+
* `model/unet.py`: Defines the model architecture using the blocks from the `layers.py` script.
|
193 |
+
* `runtime/arguments.py`: Implements the command-line arguments parsing.
|
194 |
+
* `runtime/losses.py`: Implements the losses used during training and evaluation.
|
195 |
+
* `runtime/run.py`: Implements the logic for training, evaluation, and inference.
|
196 |
+
* `runtime/parse_results.py`: Implements the intermediate results parsing.
|
197 |
+
* `runtime/setup.py`: Implements helper setup functions.
|
198 |
+
* `train_and_evaluate.sh`: Runs the topology training and evaluates the model for 5 cross-validation.
|
199 |
+
|
200 |
+
Other folders included in the root directory are:
|
201 |
+
* `images/`: Contains a model diagram.
|
202 |
+
|
203 |
+
### Parameters
|
204 |
+
|
205 |
+
The complete list of the available parameters for the `unet2d.py` script contains:
|
206 |
+
* `--exec_mode`: Select the execution mode to run the model (default: `train_and_evaluate`). Modes available:
|
207 |
+
* `train` - trains model from scratch.
|
208 |
+
* `evaluate` - loads checkpoint from `--model_dir` (if available) and performs evaluation on validation subset (requires `--fold` other than `None`).
|
209 |
+
* `train_and_evaluate` - trains model from scratch and performs validation at the end (requires `--fold` other than `None`).
|
210 |
+
* `predict` - loads checkpoint from `--model_dir` (if available) and runs inference on the test set. Stores the results in `--model_dir` directory.
|
211 |
+
* `train_and_predict` - trains model from scratch and performs inference.
|
212 |
+
* `--model_dir`: Set the output directory for information related to the model (default: `/tmp/unet2d`).
|
213 |
+
* `--data_dir`: Set the input directory containing the dataset (default: `None`).
|
214 |
+
* `--log_dir`: Set the output directory for logs (default: `/tmp/unet2d`).
|
215 |
+
* `--batch_size`: Size of each minibatch per HPU (default: `8`).
|
216 |
+
* `--dtype`: Set precision to be used in model: fp32/bf16 (default: `bf16`).
|
217 |
+
* `--fold`: Selected fold for cross-validation (default: `None`).
|
218 |
+
* `--max_steps`: Maximum number of steps (batches) for training (default: `6400`).
|
219 |
+
* `--log_every`: Log data every n steps (default: `100`).
|
220 |
+
* `--evaluate_every`: Evaluate every n steps (default: `0` - evaluate once at the end).
|
221 |
+
* `--warmup_steps`: Number of steps to skip (default: `200`). First iterations are usually much slower since the graph is being constructed. Skipping the initial iterations is required for a fair performance assessment.
|
222 |
+
* `--weight_decay`: Weight decay coefficient (default: `0.0005`).
|
223 |
+
* `--learning_rate`: Model’s learning rate (default: `0.0001`).
|
224 |
+
* `--seed`: Set random seed for reproducibility (default: `123`).
|
225 |
+
* `--dump_config`: Directory for dumping debug traces (default: `None`).
|
226 |
+
* `--augment`: Enable data augmentation (default: `True`).
|
227 |
+
* `--benchmark`: Enable performance benchmarking (default: `False`). If the flag is set, the script runs in a benchmark mode - each iteration is timed and the performance result (in images per second) is printed at the end. Works for both `train` and `predict` execution modes.
|
228 |
+
* `--xla`: Enable accelerated linear algebra optimization (default: `False`).
|
229 |
+
* `--resume_training`: Resume training from a checkpoint (default: `False`).
|
230 |
+
* `--no_hpu`: Disable execution on HPU, train on CPU (default: `False`).
|
231 |
+
* `--synth_data`: Use deterministic and synthetic data (default: `False`).
|
232 |
+
* `--disable_ckpt_saving`: Disables saving checkpoints (default: `False`).
|
233 |
+
* `--use_horovod`: Enable horovod usage (default: `False`).
|
234 |
+
* `--tensorboard_logging`: Enable tensorboard logging (default: `False`).
|
235 |
+
* `--log_all_workers`: Enable logging data for every horovod worker in a separate directory named `worker_N` (default: False).
|
236 |
+
* `--bf16_config_path`: Path to custom mixed precision config to use given in JSON format.
|
237 |
+
* `--tf_verbosity`: If set changes logging level from Tensorflow:
|
238 |
+
* `0` - all messages are logged (default behavior);
|
239 |
+
* `1` - INFO messages are not printed;
|
240 |
+
* `2` - INFO and WARNING messages are not printed;
|
241 |
+
* `3` - INFO, WARNING, and ERROR messages are not printed.
|
242 |
+
|
243 |
+
### Command-line Options
|
244 |
+
|
245 |
+
To see the full list of the available options and their descriptions, use the `-h` or `--help` command-line option, for example:
|
246 |
+
|
247 |
+
```bash
|
248 |
+
$PYTHON unet2d.py --help
|
249 |
+
```
|
250 |
+
|
251 |
+
## Supported Configuration
|
252 |
+
|
253 |
+
| Validated on | SynapseAI Version | TensorFlow Version(s) | Mode |
|
254 |
+
|:------:|:-----------------:|:-----:|:----------:|
|
255 |
+
| Gaudi | 1.14.0 | 2.15.0 | Training |
|
256 |
+
| Gaudi2 | 1.13.0 | 2.13.1 | Training |
|
257 |
+
|
258 |
+
## Changelog
|
259 |
+
### 1.14.0
|
260 |
+
- Added time to train and throughput logs at the end of the training
|
261 |
+
- Changed benchmark mode to run the same amout of steps as non-benchmark mode
|
262 |
+
|
263 |
+
### 1.12.0
|
264 |
+
- Removed limited number of nodes inserted into a single HPU graph.
|
265 |
+
|
266 |
+
### 1.11.0
|
267 |
+
|
268 |
+
- Limited number of nodes inserted into a single HPU graph to improve model performance.
|
269 |
+
|
270 |
+
### 1.10.0
|
271 |
+
|
272 |
+
- Changed default seed value for higher accuracy.
|
273 |
+
|
274 |
+
### 1.7.0
|
275 |
+
|
276 |
+
- Added TimeToTrain callback for dumping evaluation timestamps
|
277 |
+
|
278 |
+
### 1.6.0
|
279 |
+
|
280 |
+
- Model enabled on Gaudi2 with the same config as first-gen Gaudi.
|
281 |
+
- Added num_parallel_calls for data loader to improve performance on Gaudi2.
|
282 |
+
|
283 |
+
### 1.4.0
|
284 |
+
|
285 |
+
- Enabled tf.data.experimental.prefetch_to_device for HPU device for better performance.
|
286 |
+
- Changed `python` or `python3` to `$PYTHON` to execute correct version based on environment setup.
|
287 |
+
- Added support to import horovod-fork package directly instead of using Model-References' TensorFlow.common.horovod_helpers. Wrapped horovod import with a try-catch block so that installing the library is not required when the model is running on a single card.
|
288 |
+
- Replaced references to custom demo script by community entry points in the README and
|
289 |
+
`train_and_evaluate.sh`.
|
290 |
+
|
291 |
+
### 1.3.0
|
292 |
+
|
293 |
+
- Moved BF16 config json file from TensorFlow/common/ to model's directory.
|
294 |
+
- Updated the requirements.txt
|
295 |
+
|
296 |
+
### 1.2.0
|
297 |
+
|
298 |
+
- Removed the setting number of parallel calls in dataloader mapping to improve performance for different TensorFlow versions.
|
299 |
+
- Updated requirements.txt
|
300 |
+
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/bf16_config/unet.json
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"allowlist": [
|
3 |
+
"BatchMatMul",
|
4 |
+
"BatchMatMulV2",
|
5 |
+
"BiasAdd",
|
6 |
+
"BiasAddGrad",
|
7 |
+
"Conv2D",
|
8 |
+
"Conv2DBackpropFilter",
|
9 |
+
"Conv2DBackpropInput",
|
10 |
+
"Conv3D",
|
11 |
+
"Conv3DBackpropFilter",
|
12 |
+
"Conv3DBackpropFilterV2",
|
13 |
+
"Conv3DBackpropInput",
|
14 |
+
"Conv3DBackpropInputV2",
|
15 |
+
"HabanaConv2DWithPadding",
|
16 |
+
"HabanaConv2DWithPaddingBackpropFilter",
|
17 |
+
"HabanaConv2DWithPaddingBackpropInput",
|
18 |
+
"HabanaInstanceNorm",
|
19 |
+
"HabanaInstanceNormGrad",
|
20 |
+
"LeakyRelu",
|
21 |
+
"LeakyReluGrad",
|
22 |
+
"MatMul",
|
23 |
+
"MaxPool",
|
24 |
+
"MaxPoolV2",
|
25 |
+
"MaxPoolGrad",
|
26 |
+
"MaxPoolGradV2",
|
27 |
+
"Relu",
|
28 |
+
"ReluGrad"
|
29 |
+
],
|
30 |
+
"conditional_list": [
|
31 |
+
"Add",
|
32 |
+
"AddN",
|
33 |
+
"AddV2",
|
34 |
+
"CollectiveReduceV2",
|
35 |
+
"CollectiveReduceV3",
|
36 |
+
"Concat",
|
37 |
+
"ConcatV2",
|
38 |
+
"Equal",
|
39 |
+
"Exp",
|
40 |
+
"GreaterEqual",
|
41 |
+
"HabanaClampBwd",
|
42 |
+
"HabanaClampFwd",
|
43 |
+
"HabanaDropout",
|
44 |
+
"HabanaDropoutGrad",
|
45 |
+
"HorovodAllgather",
|
46 |
+
"HorovodAllreduce",
|
47 |
+
"HpuCollectiveGather",
|
48 |
+
"HpuCollectiveGatherV2",
|
49 |
+
"HpuCollectiveReduce",
|
50 |
+
"Identity",
|
51 |
+
"Log1p",
|
52 |
+
"L2Loss",
|
53 |
+
"Max",
|
54 |
+
"Mean",
|
55 |
+
"Mul",
|
56 |
+
"Neg",
|
57 |
+
"Pad",
|
58 |
+
"PadV2",
|
59 |
+
"Pow",
|
60 |
+
"RealDiv",
|
61 |
+
"Reciprocal",
|
62 |
+
"Reshape",
|
63 |
+
"ResizeNearestNeighbor",
|
64 |
+
"ResizeNearestNeighborGrad",
|
65 |
+
"Select",
|
66 |
+
"Shape",
|
67 |
+
"ShapeN",
|
68 |
+
"Slice",
|
69 |
+
"Square",
|
70 |
+
"Tile"
|
71 |
+
],
|
72 |
+
"strict_conditional_list": [],
|
73 |
+
"non_convertible_exceptions": [
|
74 |
+
[
|
75 |
+
".*KEEP_FP32_PRECISION.*",
|
76 |
+
""
|
77 |
+
]
|
78 |
+
],
|
79 |
+
"convertible_exceptions": [
|
80 |
+
[
|
81 |
+
".*FORCE_BF16_PRECISION.*",
|
82 |
+
""
|
83 |
+
]
|
84 |
+
]
|
85 |
+
}
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/data_loading/data_loader.py
ADDED
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2020-2022 Habana Labs, Ltd. an Intel Company
|
16 |
+
#
|
17 |
+
# Changes:
|
18 |
+
# - gpu_id, num_gpus replaced with hpu_id, num_hpus
|
19 |
+
# - removed setting number of parallel calls in dataloader mapping
|
20 |
+
# in order to improve performance for different TF versions
|
21 |
+
# - enable experimental.prefetch_to_device functionality to improve the performance
|
22 |
+
# - add num_parallel_calls to improve the performance on Gaudi2
|
23 |
+
|
24 |
+
""" Dataset class encapsulates the data loading"""
|
25 |
+
import os
|
26 |
+
from collections import deque
|
27 |
+
|
28 |
+
import numpy as np
|
29 |
+
import tensorflow as tf
|
30 |
+
from PIL import Image, ImageSequence
|
31 |
+
|
32 |
+
|
33 |
+
class Dataset:
|
34 |
+
"""Load, separate and prepare the data for training and prediction"""
|
35 |
+
|
36 |
+
def __init__(self, data_dir, batch_size, fold, augment=False, hpu_id=0, num_hpus=1, seed=0, gaudi_type=None):
|
37 |
+
if not os.path.exists(data_dir):
|
38 |
+
raise FileNotFoundError('Cannot find data dir: {}'.format(data_dir))
|
39 |
+
self._data_dir = data_dir
|
40 |
+
self._batch_size = batch_size
|
41 |
+
self._augment = augment
|
42 |
+
self._seed = seed
|
43 |
+
|
44 |
+
images = self._load_multipage_tiff(os.path.join(self._data_dir, 'train-volume.tif'))
|
45 |
+
masks = self._load_multipage_tiff(os.path.join(self._data_dir, 'train-labels.tif'))
|
46 |
+
self._test_images = \
|
47 |
+
self._load_multipage_tiff(os.path.join(self._data_dir, 'test-volume.tif'))
|
48 |
+
|
49 |
+
train_indices, val_indices = self._get_val_train_indices(len(images), fold)
|
50 |
+
self._train_images = images[train_indices]
|
51 |
+
self._train_masks = masks[train_indices]
|
52 |
+
self._val_images = images[val_indices]
|
53 |
+
self._val_masks = masks[val_indices]
|
54 |
+
|
55 |
+
self._num_hpus = num_hpus
|
56 |
+
self._hpu_id = hpu_id
|
57 |
+
|
58 |
+
self._deterministic, self._num_parallel_calls = None, None
|
59 |
+
if gaudi_type is not None and 'GAUDI2' in gaudi_type:
|
60 |
+
self._deterministic = self._seed is not None
|
61 |
+
self._num_parallel_calls = tf.data.AUTOTUNE
|
62 |
+
|
63 |
+
@property
|
64 |
+
def train_size(self):
|
65 |
+
return len(self._train_images)
|
66 |
+
|
67 |
+
@property
|
68 |
+
def eval_size(self):
|
69 |
+
return len(self._val_images)
|
70 |
+
|
71 |
+
@property
|
72 |
+
def test_size(self):
|
73 |
+
return len(self._test_images)
|
74 |
+
|
75 |
+
def _load_multipage_tiff(self, path):
|
76 |
+
"""Load tiff images containing many images in the channel dimension"""
|
77 |
+
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
|
78 |
+
|
79 |
+
def _get_val_train_indices(self, length, fold, ratio=0.8):
|
80 |
+
assert 0 < ratio <= 1, "Train/total data ratio must be in range (0.0, 1.0]"
|
81 |
+
np.random.seed(self._seed)
|
82 |
+
indices = np.arange(0, length, 1, dtype=int)
|
83 |
+
np.random.shuffle(indices)
|
84 |
+
if fold is not None:
|
85 |
+
indices = deque(indices)
|
86 |
+
indices.rotate(fold * int((1.0 - ratio) * length))
|
87 |
+
indices = np.array(indices)
|
88 |
+
train_indices = indices[:int(ratio * len(indices))]
|
89 |
+
val_indices = indices[int(ratio * len(indices)):]
|
90 |
+
else:
|
91 |
+
train_indices = indices
|
92 |
+
val_indices = []
|
93 |
+
return train_indices, val_indices
|
94 |
+
|
95 |
+
def _normalize_inputs(self, inputs):
|
96 |
+
"""Normalize inputs"""
|
97 |
+
inputs = tf.expand_dims(tf.cast(inputs, tf.float32), -1)
|
98 |
+
|
99 |
+
# Center around zero
|
100 |
+
inputs = tf.divide(inputs, 127.5) - 1
|
101 |
+
# Resize to match output size
|
102 |
+
inputs = tf.image.resize(inputs, (388, 388))
|
103 |
+
|
104 |
+
return tf.image.resize_with_crop_or_pad(inputs, 572, 572)
|
105 |
+
|
106 |
+
def _normalize_labels(self, labels):
|
107 |
+
"""Normalize labels"""
|
108 |
+
labels = tf.expand_dims(tf.cast(labels, tf.float32), -1)
|
109 |
+
labels = tf.divide(labels, 255)
|
110 |
+
|
111 |
+
# Resize to match output size
|
112 |
+
labels = tf.image.resize(labels, (388, 388))
|
113 |
+
labels = tf.image.resize_with_crop_or_pad(labels, 572, 572)
|
114 |
+
|
115 |
+
cond = tf.less(labels, 0.5 * tf.ones(tf.shape(input=labels)))
|
116 |
+
labels = tf.where(cond, tf.zeros(tf.shape(input=labels)), tf.ones(tf.shape(input=labels)))
|
117 |
+
|
118 |
+
return tf.one_hot(tf.squeeze(tf.cast(labels, tf.int32)), 2)
|
119 |
+
|
120 |
+
@tf.function
|
121 |
+
def _preproc_samples(self, inputs, labels, seed=None):
|
122 |
+
"""Preprocess samples and perform random augmentations"""
|
123 |
+
inputs = self._normalize_inputs(inputs)
|
124 |
+
labels = self._normalize_labels(labels)
|
125 |
+
|
126 |
+
if self._augment:
|
127 |
+
# Horizontal flip
|
128 |
+
h_flip = tf.random.uniform([], seed=seed) > 0.5
|
129 |
+
inputs = tf.cond(pred=h_flip, true_fn=lambda: tf.image.flip_left_right(inputs), false_fn=lambda: inputs)
|
130 |
+
labels = tf.cond(pred=h_flip, true_fn=lambda: tf.image.flip_left_right(labels), false_fn=lambda: labels)
|
131 |
+
|
132 |
+
# Vertical flip
|
133 |
+
v_flip = tf.random.uniform([], seed=seed) > 0.5
|
134 |
+
inputs = tf.cond(pred=v_flip, true_fn=lambda: tf.image.flip_up_down(inputs), false_fn=lambda: inputs)
|
135 |
+
labels = tf.cond(pred=v_flip, true_fn=lambda: tf.image.flip_up_down(labels), false_fn=lambda: labels)
|
136 |
+
|
137 |
+
# Prepare for batched transforms
|
138 |
+
inputs = tf.expand_dims(inputs, 0)
|
139 |
+
labels = tf.expand_dims(labels, 0)
|
140 |
+
|
141 |
+
# Random crop and resize
|
142 |
+
left = tf.random.uniform([], seed=seed) * 0.3
|
143 |
+
right = 1 - tf.random.uniform([], seed=seed) * 0.3
|
144 |
+
top = tf.random.uniform([], seed=seed) * 0.3
|
145 |
+
bottom = 1 - tf.random.uniform([], seed=seed) * 0.3
|
146 |
+
|
147 |
+
inputs = tf.image.crop_and_resize(inputs, [[top, left, bottom, right]], [0], (572, 572))
|
148 |
+
labels = tf.image.crop_and_resize(labels, [[top, left, bottom, right]], [0], (572, 572))
|
149 |
+
|
150 |
+
# Gray value variations
|
151 |
+
|
152 |
+
# Adjust brightness and keep values in range
|
153 |
+
inputs = tf.image.random_brightness(inputs, max_delta=0.2, seed=seed)
|
154 |
+
inputs = tf.clip_by_value(inputs, clip_value_min=-1, clip_value_max=1)
|
155 |
+
|
156 |
+
inputs = tf.squeeze(inputs, 0)
|
157 |
+
labels = tf.squeeze(labels, 0)
|
158 |
+
|
159 |
+
# Bring back labels to network's output size and remove interpolation artifacts
|
160 |
+
labels = tf.image.resize_with_crop_or_pad(labels, target_width=388, target_height=388)
|
161 |
+
cond = tf.less(labels, 0.5 * tf.ones(tf.shape(input=labels)))
|
162 |
+
labels = tf.where(cond, tf.zeros(tf.shape(input=labels)), tf.ones(tf.shape(input=labels)))
|
163 |
+
|
164 |
+
return inputs, labels
|
165 |
+
|
166 |
+
@tf.function
|
167 |
+
def _preproc_eval_samples(self, inputs, labels):
|
168 |
+
"""Preprocess samples and perform random augmentations"""
|
169 |
+
inputs = self._normalize_inputs(inputs)
|
170 |
+
labels = self._normalize_labels(labels)
|
171 |
+
|
172 |
+
# Bring back labels to network's output size and remove interpolation artifacts
|
173 |
+
labels = tf.image.resize_with_crop_or_pad(labels, target_width=388, target_height=388)
|
174 |
+
cond = tf.less(labels, 0.5 * tf.ones(tf.shape(input=labels)))
|
175 |
+
labels = tf.where(cond, tf.zeros(tf.shape(input=labels)), tf.ones(tf.shape(input=labels)))
|
176 |
+
|
177 |
+
return (inputs, labels)
|
178 |
+
|
179 |
+
def prefetch(self, dataset, buffer_size):
|
180 |
+
"""Dataset prefetching function"""
|
181 |
+
if len(tf.config.list_logical_devices('HPU')) > 0:
|
182 |
+
device = tf.config.list_logical_devices('HPU')[0].name
|
183 |
+
with tf.device(device):
|
184 |
+
dataset = dataset.apply(tf.data.experimental.prefetch_to_device(device))
|
185 |
+
else:
|
186 |
+
dataset = dataset.prefetch(buffer_size)
|
187 |
+
|
188 |
+
return dataset
|
189 |
+
|
190 |
+
def train_fn(self, drop_remainder=False):
|
191 |
+
"""Input function for training"""
|
192 |
+
dataset = tf.data.Dataset.from_tensor_slices(
|
193 |
+
(self._train_images, self._train_masks))
|
194 |
+
dataset = dataset.shard(self._num_hpus, self._hpu_id)
|
195 |
+
dataset = dataset.repeat()
|
196 |
+
dataset = dataset.shuffle(self._batch_size * 3)
|
197 |
+
|
198 |
+
# Starting with Gaudi2 device, data augmentation is performed in parallel.
|
199 |
+
# To ensure determinism, the seed param must be passed to the preprocessing method.
|
200 |
+
# Sequential execution on first-gen Gaudi does not require this.
|
201 |
+
seed = self._seed if self._num_parallel_calls is not None else None
|
202 |
+
dataset = dataset.map(lambda images, labels: self._preproc_samples(images, labels, seed),
|
203 |
+
num_parallel_calls=self._num_parallel_calls, deterministic=self._deterministic)
|
204 |
+
dataset = dataset.batch(self._batch_size, drop_remainder=drop_remainder,
|
205 |
+
num_parallel_calls=self._num_parallel_calls, deterministic=self._deterministic)
|
206 |
+
dataset = self.prefetch(dataset, self._batch_size)
|
207 |
+
|
208 |
+
return dataset
|
209 |
+
|
210 |
+
def eval_fn(self, count, drop_remainder=False):
|
211 |
+
"""Input function for validation"""
|
212 |
+
dataset = tf.data.Dataset.from_tensor_slices(
|
213 |
+
(self._val_images, self._val_masks))
|
214 |
+
dataset = dataset.repeat(count=count)
|
215 |
+
dataset = dataset.map(self._preproc_eval_samples,
|
216 |
+
num_parallel_calls=self._num_parallel_calls, deterministic=self._deterministic)
|
217 |
+
dataset = dataset.batch(self._batch_size, drop_remainder=drop_remainder,
|
218 |
+
num_parallel_calls=self._num_parallel_calls, deterministic=self._deterministic)
|
219 |
+
dataset = self.prefetch(dataset, self._batch_size)
|
220 |
+
|
221 |
+
return dataset
|
222 |
+
|
223 |
+
def test_fn(self, count, drop_remainder=False):
|
224 |
+
"""Input function for testing"""
|
225 |
+
dataset = tf.data.Dataset.from_tensor_slices(
|
226 |
+
self._test_images)
|
227 |
+
dataset = dataset.repeat(count=count)
|
228 |
+
dataset = dataset.map(self._normalize_inputs)
|
229 |
+
dataset = dataset.batch(self._batch_size, drop_remainder=drop_remainder)
|
230 |
+
dataset = self.prefetch(dataset, self._batch_size)
|
231 |
+
|
232 |
+
return dataset
|
233 |
+
|
234 |
+
def synth_fn(self):
|
235 |
+
"""Synthetic data function for testing"""
|
236 |
+
inputs = tf.random.truncated_normal((572, 572, 1), dtype=tf.float32, mean=127.5, stddev=1, seed=self._seed,
|
237 |
+
name='synth_inputs')
|
238 |
+
masks = tf.random.truncated_normal((388, 388, 2), dtype=tf.float32, mean=0.01, stddev=0.1, seed=self._seed,
|
239 |
+
name='synth_masks')
|
240 |
+
|
241 |
+
dataset = tf.data.Dataset.from_tensors((inputs, masks))
|
242 |
+
|
243 |
+
dataset = dataset.cache()
|
244 |
+
dataset = dataset.repeat()
|
245 |
+
dataset = dataset.batch(self._batch_size)
|
246 |
+
|
247 |
+
dataset = self.prefetch(dataset, tf.data.experimental.AUTOTUNE)
|
248 |
+
|
249 |
+
return dataset
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/download_dataset.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import argparse
|
16 |
+
import os
|
17 |
+
|
18 |
+
|
19 |
+
PARSER = argparse.ArgumentParser(description="U-Net medical")
|
20 |
+
|
21 |
+
PARSER.add_argument('--data_dir',
|
22 |
+
type=str,
|
23 |
+
default='./data',
|
24 |
+
help="""Directory where to download the dataset""")
|
25 |
+
|
26 |
+
|
27 |
+
def main():
|
28 |
+
FLAGS = PARSER.parse_args()
|
29 |
+
|
30 |
+
if not os.path.exists(FLAGS.data_dir):
|
31 |
+
os.makedirs(FLAGS.data_dir)
|
32 |
+
|
33 |
+
os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-volume.tif -P {}'.format(FLAGS.data_dir))
|
34 |
+
os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-labels.tif -P {}'.format(FLAGS.data_dir))
|
35 |
+
os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/test-volume.tif -P {}'.format(FLAGS.data_dir))
|
36 |
+
|
37 |
+
print("Finished downloading files for U-Net medical to {}".format(FLAGS.data_dir))
|
38 |
+
|
39 |
+
|
40 |
+
if __name__ == '__main__':
|
41 |
+
main()
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/model/layers.py
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
# -*- coding: utf-8 -*-
|
16 |
+
""" Contains a set of utilities that allow building the UNet model"""
|
17 |
+
|
18 |
+
import tensorflow as tf
|
19 |
+
|
20 |
+
|
21 |
+
def _crop_and_concat(inputs, residual_input):
|
22 |
+
""" Perform a central crop of ``residual_input`` and concatenate to ``inputs``
|
23 |
+
|
24 |
+
Args:
|
25 |
+
inputs (tf.Tensor): Tensor with input
|
26 |
+
residual_input (tf.Tensor): Residual input
|
27 |
+
|
28 |
+
Return:
|
29 |
+
Concatenated tf.Tensor with the size of ``inputs``
|
30 |
+
|
31 |
+
"""
|
32 |
+
factor = inputs.shape[1] / residual_input.shape[1]
|
33 |
+
return tf.concat([inputs, tf.image.central_crop(residual_input, factor)], axis=-1)
|
34 |
+
|
35 |
+
|
36 |
+
class InputBlock(tf.keras.Model):
|
37 |
+
def __init__(self, filters, seed=None):
|
38 |
+
""" UNet input block
|
39 |
+
|
40 |
+
Perform two unpadded convolutions with a specified number of filters and downsample
|
41 |
+
through max-pooling. First convolution
|
42 |
+
|
43 |
+
Args:
|
44 |
+
filters (int): Number of filters in convolution
|
45 |
+
"""
|
46 |
+
super().__init__(self)
|
47 |
+
self.initializer = tf.keras.initializers.GlorotUniform(seed=seed)
|
48 |
+
with tf.name_scope('input_block'):
|
49 |
+
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
|
50 |
+
kernel_size=(3, 3),
|
51 |
+
activation=tf.nn.relu,
|
52 |
+
kernel_initializer=self.initializer)
|
53 |
+
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
|
54 |
+
kernel_size=(3, 3),
|
55 |
+
activation=tf.nn.relu,
|
56 |
+
kernel_initializer=self.initializer)
|
57 |
+
self.maxpool = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)
|
58 |
+
|
59 |
+
def call(self, inputs):
|
60 |
+
out = self.conv1(inputs)
|
61 |
+
out = self.conv2(out)
|
62 |
+
mp = self.maxpool(out)
|
63 |
+
return mp, out
|
64 |
+
|
65 |
+
|
66 |
+
class DownsampleBlock(tf.keras.Model):
|
67 |
+
def __init__(self, filters, idx, seed=None):
|
68 |
+
""" UNet downsample block
|
69 |
+
|
70 |
+
Perform two unpadded convolutions with a specified number of filters and downsample
|
71 |
+
through max-pooling
|
72 |
+
|
73 |
+
Args:
|
74 |
+
filters (int): Number of filters in convolution
|
75 |
+
idx (int): Index of block
|
76 |
+
|
77 |
+
Return:
|
78 |
+
Tuple of convolved ``inputs`` after and before downsampling
|
79 |
+
|
80 |
+
"""
|
81 |
+
super().__init__(self)
|
82 |
+
self.initializer = tf.keras.initializers.GlorotUniform(seed=seed)
|
83 |
+
with tf.name_scope('downsample_block_{}'.format(idx)):
|
84 |
+
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
|
85 |
+
kernel_size=(3, 3),
|
86 |
+
activation=tf.nn.relu,
|
87 |
+
kernel_initializer=self.initializer)
|
88 |
+
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
|
89 |
+
kernel_size=(3, 3),
|
90 |
+
activation=tf.nn.relu,
|
91 |
+
kernel_initializer=self.initializer)
|
92 |
+
self.maxpool = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)
|
93 |
+
|
94 |
+
def call(self, inputs):
|
95 |
+
out = self.conv1(inputs)
|
96 |
+
out = self.conv2(out)
|
97 |
+
mp = self.maxpool(out)
|
98 |
+
return mp, out
|
99 |
+
|
100 |
+
|
101 |
+
class BottleneckBlock(tf.keras.Model):
|
102 |
+
def __init__(self, filters, seed=None):
|
103 |
+
""" UNet central block
|
104 |
+
|
105 |
+
Perform two unpadded convolutions with a specified number of filters and upsample
|
106 |
+
including dropout before upsampling for training
|
107 |
+
|
108 |
+
Args:
|
109 |
+
filters (int): Number of filters in convolution
|
110 |
+
"""
|
111 |
+
super().__init__(self)
|
112 |
+
self.initializer = tf.keras.initializers.GlorotUniform(seed=seed)
|
113 |
+
with tf.name_scope('bottleneck_block'):
|
114 |
+
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
|
115 |
+
kernel_size=(3, 3),
|
116 |
+
activation=tf.nn.relu,
|
117 |
+
kernel_initializer=self.initializer)
|
118 |
+
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
|
119 |
+
kernel_size=(3, 3),
|
120 |
+
activation=tf.nn.relu,
|
121 |
+
kernel_initializer=self.initializer)
|
122 |
+
self.dropout = tf.keras.layers.Dropout(rate=0.5)
|
123 |
+
self.conv_transpose = tf.keras.layers.Conv2DTranspose(filters=filters // 2,
|
124 |
+
kernel_size=(3, 3),
|
125 |
+
strides=(2, 2),
|
126 |
+
padding='same',
|
127 |
+
activation=tf.nn.relu,
|
128 |
+
kernel_initializer=self.initializer)
|
129 |
+
|
130 |
+
def call(self, inputs, training):
|
131 |
+
out = self.conv1(inputs)
|
132 |
+
out = self.conv2(out)
|
133 |
+
out = self.dropout(out, training=training)
|
134 |
+
out = self.conv_transpose(out)
|
135 |
+
return out
|
136 |
+
|
137 |
+
|
138 |
+
class UpsampleBlock(tf.keras.Model):
|
139 |
+
def __init__(self, filters, idx, seed=None):
|
140 |
+
""" UNet upsample block
|
141 |
+
|
142 |
+
Perform two unpadded convolutions with a specified number of filters and upsample
|
143 |
+
|
144 |
+
Args:
|
145 |
+
filters (int): Number of filters in convolution
|
146 |
+
idx (int): Index of block
|
147 |
+
"""
|
148 |
+
super().__init__(self)
|
149 |
+
self.initializer = tf.keras.initializers.GlorotUniform(seed=seed)
|
150 |
+
with tf.name_scope('upsample_block_{}'.format(idx)):
|
151 |
+
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
|
152 |
+
kernel_size=(3, 3),
|
153 |
+
activation=tf.nn.relu,
|
154 |
+
kernel_initializer=self.initializer)
|
155 |
+
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
|
156 |
+
kernel_size=(3, 3),
|
157 |
+
activation=tf.nn.relu,
|
158 |
+
kernel_initializer=self.initializer)
|
159 |
+
self.conv_transpose = tf.keras.layers.Conv2DTranspose(filters=filters // 2,
|
160 |
+
kernel_size=(3, 3),
|
161 |
+
strides=(2, 2),
|
162 |
+
padding='same',
|
163 |
+
activation=tf.nn.relu,
|
164 |
+
kernel_initializer=self.initializer)
|
165 |
+
|
166 |
+
def call(self, inputs, residual_input):
|
167 |
+
out = _crop_and_concat(inputs, residual_input)
|
168 |
+
out = self.conv1(out)
|
169 |
+
out = self.conv2(out)
|
170 |
+
out = self.conv_transpose(out)
|
171 |
+
return out
|
172 |
+
|
173 |
+
|
174 |
+
class OutputBlock(tf.keras.Model):
|
175 |
+
def __init__(self, filters, n_classes, seed=None):
|
176 |
+
""" UNet output block
|
177 |
+
|
178 |
+
Perform three unpadded convolutions, the last one with the same number
|
179 |
+
of channels as classes we want to classify
|
180 |
+
|
181 |
+
Args:
|
182 |
+
filters (int): Number of filters in convolution
|
183 |
+
n_classes (int): Number of output classes
|
184 |
+
"""
|
185 |
+
super().__init__(self)
|
186 |
+
self.initializer = tf.keras.initializers.GlorotUniform(seed=seed)
|
187 |
+
with tf.name_scope('output_block'):
|
188 |
+
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
|
189 |
+
kernel_size=(3, 3),
|
190 |
+
activation=tf.nn.relu,
|
191 |
+
kernel_initializer=self.initializer)
|
192 |
+
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
|
193 |
+
kernel_size=(3, 3),
|
194 |
+
activation=tf.nn.relu,
|
195 |
+
kernel_initializer=self.initializer)
|
196 |
+
self.conv3 = tf.keras.layers.Conv2D(filters=n_classes,
|
197 |
+
kernel_size=(1, 1),
|
198 |
+
activation=None,
|
199 |
+
kernel_initializer=self.initializer)
|
200 |
+
|
201 |
+
def call(self, inputs, residual_input):
|
202 |
+
out = _crop_and_concat(inputs, residual_input)
|
203 |
+
out = self.conv1(out)
|
204 |
+
out = self.conv2(out)
|
205 |
+
out = self.conv3(out)
|
206 |
+
return out
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/model/unet.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
""" Model construction utils
|
16 |
+
|
17 |
+
This module provides a convenient way to create different topologies
|
18 |
+
based around UNet.
|
19 |
+
|
20 |
+
"""
|
21 |
+
|
22 |
+
import tensorflow as tf
|
23 |
+
|
24 |
+
from model.layers import InputBlock, DownsampleBlock, BottleneckBlock, UpsampleBlock, OutputBlock
|
25 |
+
|
26 |
+
|
27 |
+
class Unet(tf.keras.Model):
|
28 |
+
""" U-Net: Convolutional Networks for Biomedical Image Segmentation
|
29 |
+
|
30 |
+
Source:
|
31 |
+
https://arxiv.org/pdf/1505.04597
|
32 |
+
|
33 |
+
"""
|
34 |
+
def __init__(self, seed=None):
|
35 |
+
super().__init__(self)
|
36 |
+
self.input_block = InputBlock(filters=64, seed=seed)
|
37 |
+
self.bottleneck = BottleneckBlock(filters=1024, seed=seed)
|
38 |
+
self.output_block = OutputBlock(filters=64, n_classes=2, seed=seed)
|
39 |
+
|
40 |
+
self.down_blocks = [DownsampleBlock(filters, idx, seed=seed)
|
41 |
+
for idx, filters in enumerate([128, 256, 512])]
|
42 |
+
|
43 |
+
self.up_blocks = [UpsampleBlock(filters, idx, seed=seed)
|
44 |
+
for idx, filters in enumerate([512, 256, 128])]
|
45 |
+
|
46 |
+
def call(self, x, training=True):
|
47 |
+
skip_connections = []
|
48 |
+
out, residual = self.input_block(x)
|
49 |
+
skip_connections.append(residual)
|
50 |
+
|
51 |
+
for down_block in self.down_blocks:
|
52 |
+
out, residual = down_block(out)
|
53 |
+
skip_connections.append(residual)
|
54 |
+
|
55 |
+
out = self.bottleneck(out, training)
|
56 |
+
|
57 |
+
for up_block in self.up_blocks:
|
58 |
+
out = up_block(out, skip_connections.pop())
|
59 |
+
|
60 |
+
out = self.output_block(out, skip_connections.pop())
|
61 |
+
return out
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/nvidia/dllogger@26a0f8f1958de2c0c460925ff6102a4d2486d6cc#egg=dllogger
|
2 |
+
munch==2.5.0
|
3 |
+
pillow==10.2.0
|
4 |
+
PyYAML==6.0.0
|
5 |
+
numpy==1.24.0
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/arguments.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2020-2023 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - default values for model_dir, log_dir, batch_size, max_steps, augment
|
19 |
+
# - removed use_amp, use_trt flags
|
20 |
+
# - added dtype, hvd_workers, dump_config, no_hpu, synth_data, disable_ckpt_saving,
|
21 |
+
# use_horovod, tensorboard_logging, bf16_config_path, tf_verbosity,
|
22 |
+
# kubernetes_run options
|
23 |
+
# - SmartFormatter for textwrapping help message
|
24 |
+
# - added gaudi_type param
|
25 |
+
# - changed seed default value
|
26 |
+
|
27 |
+
"""Command line argument parsing"""
|
28 |
+
|
29 |
+
import os
|
30 |
+
import argparse
|
31 |
+
from pathlib import Path
|
32 |
+
|
33 |
+
from munch import Munch
|
34 |
+
|
35 |
+
|
36 |
+
class SmartFormatter(argparse.ArgumentDefaultsHelpFormatter):
|
37 |
+
'''
|
38 |
+
Custom Help Formatter used to split help text when '\n' was
|
39 |
+
inserted in it.
|
40 |
+
'''
|
41 |
+
def _fill_text(self, text, width, indent):
|
42 |
+
return ''.join(line for line in text.splitlines(keepends=True))
|
43 |
+
|
44 |
+
def _split_lines(self, text, width):
|
45 |
+
return [argparse.HelpFormatter._split_lines(self, t, width)[0] for t in text.splitlines()]
|
46 |
+
|
47 |
+
|
48 |
+
def get_parser(description, distributed_launcher):
|
49 |
+
parser = argparse.ArgumentParser(description=description, formatter_class=SmartFormatter)
|
50 |
+
|
51 |
+
parser.add_argument('--exec_mode',
|
52 |
+
type=str,
|
53 |
+
choices=['train', 'train_and_predict', 'predict', 'evaluate', 'train_and_evaluate'],
|
54 |
+
default='train_and_evaluate',
|
55 |
+
help="""Execution mode of running the model""")
|
56 |
+
|
57 |
+
parser.add_argument('--model_dir',
|
58 |
+
type=str,
|
59 |
+
default='/tmp/unet2d',
|
60 |
+
help="""Output directory for information related to the model""")
|
61 |
+
|
62 |
+
parser.add_argument('--data_dir',
|
63 |
+
type=str,
|
64 |
+
required=True,
|
65 |
+
help="""Input directory containing the dataset for training the model""")
|
66 |
+
|
67 |
+
parser.add_argument('--log_dir',
|
68 |
+
type=str,
|
69 |
+
default="/tmp/unet2d",
|
70 |
+
help="""Output directory for training logs""")
|
71 |
+
|
72 |
+
parser.add_argument('-b', '--batch_size',
|
73 |
+
type=int,
|
74 |
+
default=8,
|
75 |
+
help="""Size of each minibatch per HPU""")
|
76 |
+
|
77 |
+
parser.add_argument('-d', '--dtype',
|
78 |
+
type=str,
|
79 |
+
default='bf16',
|
80 |
+
metavar='bf16/fp32',
|
81 |
+
choices=['fp32', 'bf16'],
|
82 |
+
help='Data type: fp32 or bf16')
|
83 |
+
|
84 |
+
parser.add_argument('--fold',
|
85 |
+
type=int,
|
86 |
+
default=None,
|
87 |
+
help="""Chosen fold for cross-validation. Use None to disable cross-validation""")
|
88 |
+
|
89 |
+
parser.add_argument('--max_steps',
|
90 |
+
type=int,
|
91 |
+
default=6400,
|
92 |
+
help="""Maximum number of steps (batches) used for training""")
|
93 |
+
|
94 |
+
parser.add_argument('--log_every',
|
95 |
+
type=int,
|
96 |
+
default=100,
|
97 |
+
help="""Log data every n steps""")
|
98 |
+
|
99 |
+
parser.add_argument('--evaluate_every',
|
100 |
+
type=int,
|
101 |
+
default=0,
|
102 |
+
help="""Evaluate every n steps""")
|
103 |
+
|
104 |
+
parser.add_argument('--warmup_steps',
|
105 |
+
type=int,
|
106 |
+
default=200,
|
107 |
+
help="""Number of warmup steps""")
|
108 |
+
|
109 |
+
parser.add_argument('--weight_decay',
|
110 |
+
type=float,
|
111 |
+
default=0.0005,
|
112 |
+
help="""Weight decay coefficient""")
|
113 |
+
|
114 |
+
parser.add_argument('--learning_rate',
|
115 |
+
type=float,
|
116 |
+
default=0.0001,
|
117 |
+
help="""Learning rate coefficient for AdamOptimizer""")
|
118 |
+
|
119 |
+
parser.add_argument('--seed',
|
120 |
+
type=int,
|
121 |
+
default=123,
|
122 |
+
help="""Random seed""")
|
123 |
+
|
124 |
+
parser.add_argument('--dump_config',
|
125 |
+
type=str,
|
126 |
+
default=None,
|
127 |
+
help="""Directory for dumping debug traces""")
|
128 |
+
|
129 |
+
parser.add_argument('--augment', dest='augment', action='store_true',
|
130 |
+
help="""Perform data augmentation during training""")
|
131 |
+
parser.add_argument('--no-augment', dest='augment', action='store_false')
|
132 |
+
parser.set_defaults(augment=True)
|
133 |
+
|
134 |
+
parser.add_argument('--benchmark', dest='benchmark', action='store_true',
|
135 |
+
help="""Collect performance metrics during training""")
|
136 |
+
parser.add_argument('--no-benchmark', dest='benchmark', action='store_false')
|
137 |
+
|
138 |
+
parser.add_argument('--use_xla', '--xla', dest='use_xla', action='store_true',
|
139 |
+
help="""Train using XLA""")
|
140 |
+
|
141 |
+
parser.add_argument('--resume_training', dest='resume_training', action='store_true',
|
142 |
+
help="""Resume training from a checkpoint""")
|
143 |
+
|
144 |
+
parser.add_argument('--no_hpu', dest='no_hpu', action='store_true',
|
145 |
+
help="""Disables execution on HPU, train on CPU""")
|
146 |
+
|
147 |
+
parser.add_argument('--synth_data', dest='synth_data', action='store_true',
|
148 |
+
help="""Use deterministic and synthetic data""")
|
149 |
+
|
150 |
+
parser.add_argument('--disable_ckpt_saving', dest='disable_ckpt_saving', action='store_true',
|
151 |
+
help="""Disables saving checkpoints""")
|
152 |
+
|
153 |
+
parser.add_argument('--hvd_workers', dest='hvd_workers', type=int, default=1,
|
154 |
+
help="""Number of Horovod workers for single HLS""" if distributed_launcher else argparse.SUPPRESS) # ignored by unet2d.py
|
155 |
+
|
156 |
+
parser.add_argument("--kubernetes_run", default=False, type=bool,
|
157 |
+
help="Kubernetes run" if distributed_launcher else argparse.SUPPRESS) # ignored by unet2d.py
|
158 |
+
|
159 |
+
parser.add_argument('--use_horovod', dest='use_horovod', action='store_true',
|
160 |
+
help="""Enable horovod usage""")
|
161 |
+
|
162 |
+
parser.add_argument('--tensorboard_logging', dest='tensorboard_logging', action='store_true',
|
163 |
+
help="""Enable tensorboard logging""")
|
164 |
+
|
165 |
+
parser.add_argument('--log_all_workers', dest='log_all_workers', action='store_true',
|
166 |
+
help="""Enable logging data for every horovod worker in a separate directory named `worker_N`""")
|
167 |
+
|
168 |
+
DEFAULT_BF16_CONFIG_PATH = os.fspath(Path(os.path.realpath(__file__)).parents[1].joinpath("bf16_config/unet.json"))
|
169 |
+
parser.add_argument('--bf16_config_path', metavar='</path/to/custom/bf16/config>', required=False, type=str, default=DEFAULT_BF16_CONFIG_PATH,
|
170 |
+
help="""Path to custom mixed precision config to use given in JSON format.""")
|
171 |
+
|
172 |
+
parser.add_argument('--tf_verbosity', dest='tf_verbosity', type=int, choices=[0, 1, 2, 3],
|
173 |
+
help="""If set changes logging level from Tensorflow:
|
174 |
+
0 - all messages are logged (default behavior);
|
175 |
+
1 - INFO messages are not printed;
|
176 |
+
2 - INFO and WARNING messages are not printed;
|
177 |
+
3 - INFO, WARNING, and ERROR messages are not printed.""")
|
178 |
+
return parser
|
179 |
+
|
180 |
+
|
181 |
+
def parse_args(description="UNet-medical", distributed_launcher=False):
|
182 |
+
flags = get_parser(description, distributed_launcher).parse_args()
|
183 |
+
return Munch({
|
184 |
+
'exec_mode': flags.exec_mode,
|
185 |
+
'model_dir': flags.model_dir,
|
186 |
+
'data_dir': flags.data_dir,
|
187 |
+
'log_dir': flags.log_dir,
|
188 |
+
'batch_size': flags.batch_size,
|
189 |
+
'dtype': flags.dtype,
|
190 |
+
'fold': flags.fold,
|
191 |
+
'max_steps': flags.max_steps,
|
192 |
+
'log_every': flags.log_every,
|
193 |
+
'evaluate_every': flags.evaluate_every,
|
194 |
+
'warmup_steps': flags.warmup_steps,
|
195 |
+
'weight_decay': flags.weight_decay,
|
196 |
+
'learning_rate': flags.learning_rate,
|
197 |
+
'seed': flags.seed,
|
198 |
+
'dump_config': flags.dump_config,
|
199 |
+
'augment': flags.augment,
|
200 |
+
'benchmark': flags.benchmark,
|
201 |
+
'use_xla': flags.use_xla,
|
202 |
+
'resume_training': flags.resume_training,
|
203 |
+
'no_hpu': flags.no_hpu,
|
204 |
+
'synth_data': flags.synth_data,
|
205 |
+
'disable_ckpt_saving': flags.disable_ckpt_saving,
|
206 |
+
'hvd_workers': flags.hvd_workers,
|
207 |
+
'kubernetes_run': flags.kubernetes_run,
|
208 |
+
'use_horovod': flags.use_horovod,
|
209 |
+
'tensorboard_logging': flags.tensorboard_logging,
|
210 |
+
'log_all_workers': flags.log_all_workers,
|
211 |
+
'bf16_config_path': flags.bf16_config_path,
|
212 |
+
'tf_verbosity': flags.tf_verbosity,
|
213 |
+
'gaudi_type': None
|
214 |
+
})
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/losses.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - replace tf.keras.activations.softmax with tf.nn.softmax due to
|
19 |
+
# https://github.com/tensorflow/tensorflow/pull/47572
|
20 |
+
# - replace tf.keras.backend.binary_crossentropy with tf.nn.sigmoid_cross_entropy_with_logits
|
21 |
+
|
22 |
+
"""Training and evaluation losses"""
|
23 |
+
|
24 |
+
import tensorflow as tf
|
25 |
+
|
26 |
+
|
27 |
+
# Class Dice coefficient averaged over batch
|
28 |
+
def dice_coef(predict, target, axis=1, eps=1e-6):
|
29 |
+
intersection = tf.reduce_sum(input_tensor=predict * target, axis=axis)
|
30 |
+
union = tf.reduce_sum(input_tensor=predict * predict + target * target, axis=axis)
|
31 |
+
dice = (2. * intersection + eps) / (union + eps)
|
32 |
+
return tf.reduce_mean(input_tensor=dice, axis=0) # average over batch
|
33 |
+
|
34 |
+
|
35 |
+
def partial_losses(predict, target):
|
36 |
+
n_classes = predict.shape[-1]
|
37 |
+
|
38 |
+
flat_logits = tf.reshape(tf.cast(predict, tf.float32),
|
39 |
+
[tf.shape(input=predict)[0], -1, n_classes])
|
40 |
+
flat_labels = tf.reshape(target,
|
41 |
+
[tf.shape(input=predict)[0], -1, n_classes])
|
42 |
+
|
43 |
+
crossentropy_loss = tf.reduce_mean(input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(logits=flat_logits,
|
44 |
+
labels=flat_labels),
|
45 |
+
name='cross_loss_ref')
|
46 |
+
|
47 |
+
dice_loss = tf.reduce_mean(input_tensor=1 - dice_coef(tf.nn.softmax(flat_logits, axis=-1),
|
48 |
+
flat_labels), name='dice_loss_ref')
|
49 |
+
return crossentropy_loss, dice_loss
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/parse_results.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2020-2023 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - removed exec_mode from argparser
|
19 |
+
# - replaced gpu specific env choices to hpu
|
20 |
+
|
21 |
+
import os
|
22 |
+
import argparse
|
23 |
+
|
24 |
+
import numpy as np
|
25 |
+
|
26 |
+
|
27 |
+
def process_performance_stats(timestamps, batch_size, mode):
|
28 |
+
""" Get confidence intervals
|
29 |
+
|
30 |
+
:param timestamps: Collection of timestamps
|
31 |
+
:param batch_size: Number of samples per batch
|
32 |
+
:param mode: Estimator's execution mode
|
33 |
+
:return: Stats
|
34 |
+
"""
|
35 |
+
timestamps_ms = 1000 * timestamps
|
36 |
+
throughput_imgps = 1000.0 * batch_size / timestamps_ms.mean()
|
37 |
+
stats = {f"throughput_{mode}": throughput_imgps,
|
38 |
+
f"latency_{mode}_mean": timestamps_ms.mean()}
|
39 |
+
for level in [90, 95, 99]:
|
40 |
+
stats.update({f"latency_{mode}_{level}": np.percentile(timestamps_ms, level)})
|
41 |
+
|
42 |
+
return stats
|
43 |
+
|
44 |
+
|
45 |
+
def parse_convergence_results(path, environment):
|
46 |
+
dice_scores = []
|
47 |
+
ce_scores = []
|
48 |
+
logfiles = [f for f in os.listdir(path) if "log" in f and environment in f]
|
49 |
+
if not logfiles:
|
50 |
+
raise FileNotFoundError("No logfile found at {}".format(path))
|
51 |
+
for logfile in logfiles:
|
52 |
+
with open(os.path.join(path, logfile), "r") as f:
|
53 |
+
content = f.readlines()[-1]
|
54 |
+
if "eval_dice_score" not in content:
|
55 |
+
print("Evaluation score not found. The file", logfile, "might be corrupted.")
|
56 |
+
continue
|
57 |
+
dice_scores.append(float([val for val in content.split(" ")
|
58 |
+
if "eval_dice_score" in val][0].split()[-1]))
|
59 |
+
ce_scores.append(float([val for val in content.split(" ")
|
60 |
+
if "eval_ce_loss" in val][0].split()[-1]))
|
61 |
+
if dice_scores:
|
62 |
+
print("Evaluation dice score:", sum(dice_scores) / len(dice_scores))
|
63 |
+
print("Evaluation cross-entropy loss:", sum(ce_scores) / len(ce_scores))
|
64 |
+
else:
|
65 |
+
print("All logfiles were corrupted, no loss was obtained.")
|
66 |
+
|
67 |
+
|
68 |
+
if __name__ == '__main__':
|
69 |
+
parser = argparse.ArgumentParser(description="UNet-medical-utils")
|
70 |
+
|
71 |
+
parser.add_argument('--model_dir',
|
72 |
+
type=str,
|
73 |
+
required=True)
|
74 |
+
|
75 |
+
parser.add_argument('--env',
|
76 |
+
choices=['fp32_1hpu', 'fp32_8hpu', 'bf16_1hpu', 'bf16_8hpu'],
|
77 |
+
type=str,
|
78 |
+
required=True)
|
79 |
+
|
80 |
+
args = parser.parse_args()
|
81 |
+
parse_convergence_results(path=args.model_dir, environment=args.env)
|
82 |
+
print()
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/run.py
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2020-2023 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - wrapped horovod import in a try-catch block so that the user is not required to install this library
|
19 |
+
# when the model is being run on a single card
|
20 |
+
# - removed GPU specific mixed_precision handling
|
21 |
+
# - added synthetic data option for deterministic training
|
22 |
+
# - added tensorboard logging and performance measurements logs
|
23 |
+
# - in a training mode return loss from train_step as a numpy object to transfer the data to host
|
24 |
+
# - added TimeToTrain callback for dumping evaluation timestamps
|
25 |
+
|
26 |
+
import os
|
27 |
+
from time import time
|
28 |
+
|
29 |
+
import numpy as np
|
30 |
+
from PIL import Image
|
31 |
+
import tensorflow as tf
|
32 |
+
|
33 |
+
from runtime.losses import partial_losses
|
34 |
+
from runtime.parse_results import process_performance_stats
|
35 |
+
from TensorFlow.common.tb_utils import write_hparams_v2
|
36 |
+
|
37 |
+
|
38 |
+
try:
|
39 |
+
import horovod.tensorflow as hvd
|
40 |
+
except ImportError:
|
41 |
+
hvd = None
|
42 |
+
|
43 |
+
|
44 |
+
def train(params, model, dataset, logger, tb_logger=None, ttt_callback=None):
|
45 |
+
np.random.seed(params.seed)
|
46 |
+
tf.random.set_seed(params.seed)
|
47 |
+
|
48 |
+
num_workers = hvd.size() if hvd is not None and hvd.is_initialized() else 1
|
49 |
+
worker_id = hvd.rank() if hvd is not None and hvd.is_initialized() else 0
|
50 |
+
max_steps = params.max_steps // num_workers
|
51 |
+
|
52 |
+
optimizer = tf.keras.optimizers.legacy.Adam(learning_rate=params.learning_rate)
|
53 |
+
|
54 |
+
ce_loss = tf.keras.metrics.Mean(name='ce_loss')
|
55 |
+
f1_loss = tf.keras.metrics.Mean(name='dice_loss')
|
56 |
+
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
|
57 |
+
if params.resume_training and params.model_dir:
|
58 |
+
checkpoint.restore(tf.train.latest_checkpoint(params.model_dir))
|
59 |
+
|
60 |
+
if tb_logger is not None:
|
61 |
+
write_hparams_v2(tb_logger.train_writer, vars(params))
|
62 |
+
|
63 |
+
@tf.function
|
64 |
+
def train_step(features, labels, warmup_batch=False):
|
65 |
+
with tf.GradientTape() as tape:
|
66 |
+
output_map = model(features)
|
67 |
+
crossentropy_loss, dice_loss = partial_losses(output_map, labels)
|
68 |
+
added_losses = tf.add(crossentropy_loss, dice_loss, name="total_loss_ref")
|
69 |
+
loss = added_losses + params.weight_decay * tf.add_n(
|
70 |
+
[tf.nn.l2_loss(v) for v in model.trainable_variables
|
71 |
+
if 'batch_normalization' not in v.name])
|
72 |
+
|
73 |
+
if hvd is not None and hvd.is_initialized():
|
74 |
+
tape = hvd.DistributedGradientTape(tape)
|
75 |
+
gradients = tape.gradient(loss, model.trainable_variables)
|
76 |
+
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
|
77 |
+
|
78 |
+
# Note: broadcast should be done after the first gradient step to ensure optimizer
|
79 |
+
# initialization.
|
80 |
+
if hvd is not None and hvd.is_initialized() and warmup_batch:
|
81 |
+
hvd.broadcast_variables(model.variables, root_rank=0)
|
82 |
+
hvd.broadcast_variables(optimizer.variables(), root_rank=0)
|
83 |
+
|
84 |
+
ce_loss(crossentropy_loss)
|
85 |
+
f1_loss(dice_loss)
|
86 |
+
return loss
|
87 |
+
|
88 |
+
timestamps = []
|
89 |
+
if params.benchmark:
|
90 |
+
assert max_steps > params.warmup_steps, \
|
91 |
+
"max_steps value has to be greater than warmup_steps"
|
92 |
+
for iteration, (images, labels) in enumerate(dataset.train_fn(drop_remainder=True)):
|
93 |
+
loss = train_step(images, labels, warmup_batch=iteration == 0).numpy()
|
94 |
+
if iteration > params.warmup_steps:
|
95 |
+
timestamps.append(time())
|
96 |
+
|
97 |
+
if iteration >= max_steps:
|
98 |
+
break
|
99 |
+
|
100 |
+
if worker_id == 0:
|
101 |
+
deltas = np.array([timestamps[i + 1] - timestamps[i] for i in range(len(timestamps) - 1)])
|
102 |
+
stats = process_performance_stats(deltas, num_workers * params.batch_size, mode="train")
|
103 |
+
logger.log(step=(), data=stats)
|
104 |
+
else:
|
105 |
+
timestamp = time()
|
106 |
+
start_time = timestamp
|
107 |
+
dataset_fn = dataset.synth_fn if params.synth_data else dataset.train_fn
|
108 |
+
for iteration, (images, labels) in enumerate(dataset_fn()):
|
109 |
+
# assign returned loss as a numpy object to transfer the data to host
|
110 |
+
loss = train_step(images, labels, warmup_batch=iteration == 0).numpy()
|
111 |
+
if iteration > params.warmup_steps:
|
112 |
+
timestamps.append(time())
|
113 |
+
if worker_id == 0 or params.log_all_workers:
|
114 |
+
if iteration % params.log_every == 0:
|
115 |
+
duration = float(time() - timestamp) / params.log_every
|
116 |
+
timestamp = time()
|
117 |
+
data = {
|
118 |
+
"train_ce_loss": float(ce_loss.result()),
|
119 |
+
"train_dice_loss": float(f1_loss.result()),
|
120 |
+
"train_total_loss": float(f1_loss.result() + ce_loss.result()),
|
121 |
+
"iter duration [ms]": 1000 * duration,
|
122 |
+
"IPS": params.batch_size / duration
|
123 |
+
}
|
124 |
+
logger.log(step=(iteration, max_steps), data=data)
|
125 |
+
|
126 |
+
if tb_logger is not None:
|
127 |
+
with tb_logger.train_writer.as_default():
|
128 |
+
for name, value in data.items():
|
129 |
+
tf.summary.scalar(name, value, step=iteration)
|
130 |
+
# for consistency
|
131 |
+
tf.summary.scalar("loss", data["train_total_loss"], step=iteration)
|
132 |
+
tf.summary.scalar("examples/sec", data["IPS"], step=iteration)
|
133 |
+
tf.summary.scalar("global_step/sec", 1. / duration, step=iteration)
|
134 |
+
|
135 |
+
if (params.evaluate_every > 0) and (iteration % params.evaluate_every == 0):
|
136 |
+
evaluate(params, model, dataset, logger, tb_logger, ttt_callback, restore_checkpoint=False)
|
137 |
+
|
138 |
+
f1_loss.reset_states()
|
139 |
+
ce_loss.reset_states()
|
140 |
+
|
141 |
+
if iteration >= max_steps:
|
142 |
+
break
|
143 |
+
|
144 |
+
if worker_id == 0:
|
145 |
+
if max_steps > params.warmup_steps:
|
146 |
+
deltas = np.array([timestamps[i + 1] - timestamps[i] for i in range(len(timestamps) - 1)])
|
147 |
+
stats = process_performance_stats(deltas, num_workers * params.batch_size, mode="train")
|
148 |
+
logger.log(step=(), data=stats)
|
149 |
+
else:
|
150 |
+
print(f"max_steps value has to be greater than warmup_steps, skipping throughput calculation...")
|
151 |
+
logger.log(step=(), data={"train_time [sec]": (time() - start_time)})
|
152 |
+
|
153 |
+
if not params.disable_ckpt_saving and worker_id == 0:
|
154 |
+
checkpoint.save(file_prefix=os.path.join(params.model_dir, "checkpoint"))
|
155 |
+
|
156 |
+
logger.flush()
|
157 |
+
|
158 |
+
|
159 |
+
def evaluate(params, model, dataset, logger, tb_logger=None, ttt_callback=None, restore_checkpoint=True):
|
160 |
+
if ttt_callback is not None:
|
161 |
+
ttt_callback.on_test_begin()
|
162 |
+
if params.fold is None:
|
163 |
+
print("No fold specified for evaluation. Please use --fold [int] to select a fold.")
|
164 |
+
ce_loss = tf.keras.metrics.Mean(name='ce_loss')
|
165 |
+
f1_loss = tf.keras.metrics.Mean(name='dice_loss')
|
166 |
+
checkpoint = tf.train.Checkpoint(model=model)
|
167 |
+
if params.model_dir and restore_checkpoint:
|
168 |
+
checkpoint.restore(tf.train.latest_checkpoint(params.model_dir)).expect_partial()
|
169 |
+
|
170 |
+
def validation_step(features, labels):
|
171 |
+
output_map = model(features, training=False)
|
172 |
+
crossentropy_loss, dice_loss = partial_losses(output_map, labels)
|
173 |
+
ce_loss(crossentropy_loss)
|
174 |
+
f1_loss(dice_loss)
|
175 |
+
|
176 |
+
for iteration, (images, labels) in enumerate(dataset.eval_fn(count=1)):
|
177 |
+
validation_step(images, labels)
|
178 |
+
if iteration >= dataset.eval_size // params.batch_size:
|
179 |
+
break
|
180 |
+
|
181 |
+
data = {}
|
182 |
+
if dataset.eval_size > 0:
|
183 |
+
data = {
|
184 |
+
"eval_ce_loss": float(ce_loss.result()),
|
185 |
+
"eval_dice_loss": float(f1_loss.result()),
|
186 |
+
"eval_total_loss": float(f1_loss.result() + ce_loss.result()),
|
187 |
+
"eval_dice_score": 1.0 - float(f1_loss.result()),
|
188 |
+
"loss": float(f1_loss.result() + ce_loss.result()),
|
189 |
+
"accuracy": 1.0 - float(f1_loss.result()), # for consistency
|
190 |
+
}
|
191 |
+
logger.log(step=(), data=data)
|
192 |
+
if tb_logger is not None:
|
193 |
+
with tb_logger.eval_writer.as_default():
|
194 |
+
for name, value in data.items():
|
195 |
+
tf.summary.scalar(name, value, step=iteration)
|
196 |
+
|
197 |
+
logger.flush()
|
198 |
+
if ttt_callback is not None:
|
199 |
+
ttt_callback.on_test_end()
|
200 |
+
|
201 |
+
|
202 |
+
def predict(params, model, dataset, logger):
|
203 |
+
checkpoint = tf.train.Checkpoint(model=model)
|
204 |
+
if params.model_dir:
|
205 |
+
checkpoint.restore(tf.train.latest_checkpoint(params.model_dir)).expect_partial()
|
206 |
+
|
207 |
+
@tf.function
|
208 |
+
def prediction_step(features):
|
209 |
+
return tf.nn.softmax(model(features, training=False), axis=-1)
|
210 |
+
|
211 |
+
if params.benchmark:
|
212 |
+
assert params.max_steps > params.warmup_steps, \
|
213 |
+
"max_steps value has to be greater than warmup_steps"
|
214 |
+
timestamps = []
|
215 |
+
for iteration, images in enumerate(dataset.test_fn(count=None, drop_remainder=True)):
|
216 |
+
prediction_step(images)
|
217 |
+
if iteration > params.warmup_steps:
|
218 |
+
timestamps.append(time())
|
219 |
+
if iteration >= params.max_steps:
|
220 |
+
break
|
221 |
+
|
222 |
+
deltas = np.array([timestamps[i + 1] - timestamps[i] for i in range(len(timestamps) - 1)])
|
223 |
+
stats = process_performance_stats(deltas, params.batch_size, mode="test")
|
224 |
+
logger.log(step=(), data=stats)
|
225 |
+
else:
|
226 |
+
predictions = np.concatenate([prediction_step(images).numpy()
|
227 |
+
for images in dataset.test_fn(count=1)], axis=0)
|
228 |
+
binary_masks = [np.argmax(p, axis=-1).astype(np.uint8) * 255 for p in predictions]
|
229 |
+
multipage_tif = [Image.fromarray(mask).resize(size=(512, 512), resample=Image.BILINEAR)
|
230 |
+
for mask in binary_masks]
|
231 |
+
|
232 |
+
output_dir = os.path.join(params.model_dir, 'predictions')
|
233 |
+
if not os.path.exists(output_dir):
|
234 |
+
os.makedirs(output_dir)
|
235 |
+
multipage_tif[0].save(os.path.join(output_dir, 'test-masks.tif'),
|
236 |
+
compression="tiff_deflate",
|
237 |
+
save_all=True,
|
238 |
+
append_images=multipage_tif[1:])
|
239 |
+
|
240 |
+
print("Predictions saved at {}".format(output_dir))
|
241 |
+
logger.flush()
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/runtime/setup.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2020-2023 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - wrapped horovod import in a try-catch block so that the user is not required to install this library
|
19 |
+
# when the model is being run on a single card
|
20 |
+
# - removed GPU specific flags
|
21 |
+
# - set TF_BF16_CONVERSION flag to default unet2d config for bfloat16 precision
|
22 |
+
# - changed GPU specific logic for configuration with HPU
|
23 |
+
# - fixed JSON logger output directory
|
24 |
+
# - read type of Gaudi device if available
|
25 |
+
|
26 |
+
import os
|
27 |
+
import multiprocessing
|
28 |
+
|
29 |
+
import numpy as np
|
30 |
+
import tensorflow as tf
|
31 |
+
import dllogger as logger
|
32 |
+
from dllogger import StdOutBackend, Verbosity, JSONStreamBackend
|
33 |
+
|
34 |
+
try:
|
35 |
+
import horovod.tensorflow as hvd
|
36 |
+
except ImportError:
|
37 |
+
hvd = None
|
38 |
+
|
39 |
+
|
40 |
+
def set_flags(params):
|
41 |
+
if params.tf_verbosity:
|
42 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(params.tf_verbosity)
|
43 |
+
|
44 |
+
if not params.no_hpu:
|
45 |
+
from habana_frameworks.tensorflow import load_habana_module
|
46 |
+
load_habana_module()
|
47 |
+
|
48 |
+
from habana_frameworks.tensorflow.habana_device import get_type
|
49 |
+
params.gaudi_type = get_type()
|
50 |
+
|
51 |
+
if params.dtype == 'bf16':
|
52 |
+
os.environ['TF_BF16_CONVERSION'] = params.bf16_config_path
|
53 |
+
|
54 |
+
np.random.seed(params.seed)
|
55 |
+
tf.random.set_seed(params.seed)
|
56 |
+
|
57 |
+
if params.use_xla:
|
58 |
+
tf.config.optimizer.set_jit(True)
|
59 |
+
|
60 |
+
per_hpu_thread_count = 1
|
61 |
+
num_hpus = hvd.size() if hvd is not None and hvd.is_initialized() else 1
|
62 |
+
cpu_count = multiprocessing.cpu_count()
|
63 |
+
total_hpu_thread_count = per_hpu_thread_count * num_hpus
|
64 |
+
|
65 |
+
tf.config.threading.set_intra_op_parallelism_threads(0)
|
66 |
+
tf.config.threading.set_inter_op_parallelism_threads(cpu_count - total_hpu_thread_count)
|
67 |
+
|
68 |
+
|
69 |
+
def prepare_model_dir(params):
|
70 |
+
worker_id = hvd.rank() if hvd is not None and hvd.is_initialized() else 0
|
71 |
+
if params.benchmark or (not params.log_all_workers and worker_id != 0):
|
72 |
+
return None
|
73 |
+
|
74 |
+
model_dir = os.path.join(params.model_dir, "model_checkpoint")
|
75 |
+
if params.log_all_workers and hvd is not None and hvd.is_initialized():
|
76 |
+
model_dir = os.path.join(model_dir, f'worker_{worker_id}')
|
77 |
+
|
78 |
+
os.makedirs(model_dir, exist_ok=True)
|
79 |
+
if ('train' in params.exec_mode) and (not params.resume_training):
|
80 |
+
os.system('rm -rf {}/*'.format(model_dir))
|
81 |
+
return model_dir
|
82 |
+
|
83 |
+
|
84 |
+
def get_logger(params):
|
85 |
+
backends = []
|
86 |
+
worker_id = hvd.rank() if hvd is not None and hvd.is_initialized() else 0
|
87 |
+
if worker_id == 0:
|
88 |
+
backends += [StdOutBackend(Verbosity.VERBOSE)]
|
89 |
+
if params.log_dir:
|
90 |
+
os.makedirs(params.log_dir, exist_ok=True)
|
91 |
+
log_file = f"{params.log_dir}/log.json"
|
92 |
+
backends += [JSONStreamBackend(Verbosity.VERBOSE, log_file)]
|
93 |
+
logger.init(backends=backends)
|
94 |
+
return logger
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/train_and_evaluate.sh
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
###############################################################################
|
2 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
3 |
+
###############################################################################
|
4 |
+
#
|
5 |
+
# This script runs 5-fold cross-validation of UNet2D topology for 6400 iterations
|
6 |
+
# Usage:
|
7 |
+
# bash train_and_evaluate.sh <path/to/dataset> <path/for/results> <batch size> <data type> <number of HPUs>
|
8 |
+
|
9 |
+
if [ ! -d $2 ] # If the results path doesn't exist, create
|
10 |
+
then
|
11 |
+
mkdir $2
|
12 |
+
fi
|
13 |
+
|
14 |
+
if [ $5 == 1 ] # Single card training
|
15 |
+
then
|
16 |
+
$PYTHON unet2d.py --data_dir $1 --model_dir $2 --log_dir $2/fold_0 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate --fold 0 --tensorboard_logging > $2/${4}_${5}hpu_fold0.log
|
17 |
+
$PYTHON unet2d.py --data_dir $1 --model_dir $2 --log_dir $2/fold_1 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate --fold 1 --tensorboard_logging > $2/${4}_${5}hpu_fold1.log
|
18 |
+
$PYTHON unet2d.py --data_dir $1 --model_dir $2 --log_dir $2/fold_2 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate --fold 2 --tensorboard_logging > $2/${4}_${5}hpu_fold2.log
|
19 |
+
$PYTHON unet2d.py --data_dir $1 --model_dir $2 --log_dir $2/fold_3 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate --fold 3 --tensorboard_logging > $2/${4}_${5}hpu_fold3.log
|
20 |
+
$PYTHON unet2d.py --data_dir $1 --model_dir $2 --log_dir $2/fold_4 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate --fold 4 --tensorboard_logging > $2/${4}_${5}hpu_fold4.log
|
21 |
+
else # Multi card training
|
22 |
+
mpirun --allow-run-as-root --tag-output --merge-stderr-to-stdout --bind-to core --map-by socket:PE=6 -np $5 $PYTHON unet2d.py \
|
23 |
+
--data_dir $1 --model_dir $2 --log_dir $2/fold_0 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate \
|
24 |
+
--fold 0 --tensorboard_logging --log_all_workers --use_horovod > $2/${4}_${5}hpu_fold0.log
|
25 |
+
mpirun --allow-run-as-root --tag-output --merge-stderr-to-stdout --bind-to core --map-by socket:PE=6 -np $5 $PYTHON unet2d.py \
|
26 |
+
--data_dir $1 --model_dir $2 --log_dir $2/fold_1 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate \
|
27 |
+
--fold 1 --tensorboard_logging --log_all_workers --use_horovod > $2/${4}_${5}hpu_fold1.log
|
28 |
+
mpirun --allow-run-as-root --tag-output --merge-stderr-to-stdout --bind-to core --map-by socket:PE=6 -np $5 $PYTHON unet2d.py \
|
29 |
+
--data_dir $1 --model_dir $2 --log_dir $2/fold_2 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate \
|
30 |
+
--fold 2 --tensorboard_logging --log_all_workers --use_horovod > $2/${4}_${5}hpu_fold2.log
|
31 |
+
mpirun --allow-run-as-root --tag-output --merge-stderr-to-stdout --bind-to core --map-by socket:PE=6 -np $5 $PYTHON unet2d.py \
|
32 |
+
--data_dir $1 --model_dir $2 --log_dir $2/fold_3 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate \
|
33 |
+
--fold 3 --tensorboard_logging --log_all_workers --use_horovod > $2/${4}_${5}hpu_fold3.log
|
34 |
+
mpirun --allow-run-as-root --tag-output --merge-stderr-to-stdout --bind-to core --map-by socket:PE=6 -np $5 $PYTHON unet2d.py \
|
35 |
+
--data_dir $1 --model_dir $2 --log_dir $2/fold_4 --max_steps 6400 --batch_size $3 --dtype $4 --exec_mode train_and_evaluate \
|
36 |
+
--fold 4 --tensorboard_logging --log_all_workers --use_horovod > $2/${4}_${5}hpu_fold4.log
|
37 |
+
fi
|
38 |
+
$PYTHON runtime/parse_results.py --model_dir $2 --env ${4}_${5}hpu
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/Unet2D/unet2d.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
###############################################################################
|
15 |
+
# Copyright (C) 2020-2022 Habana Labs, Ltd. an Intel Company
|
16 |
+
###############################################################################
|
17 |
+
# Changes:
|
18 |
+
# - renamed script from main.py to unet2d.py
|
19 |
+
# - wrapped horovod import in a try-catch block so that the user is not required to install this library
|
20 |
+
# when the model is being run on a single card
|
21 |
+
# - added tensorboard logging functionality
|
22 |
+
# - added TimeToTrain callback for dumping evaluation timestamps
|
23 |
+
|
24 |
+
import os
|
25 |
+
from collections import namedtuple
|
26 |
+
|
27 |
+
import tensorflow as tf
|
28 |
+
|
29 |
+
from model.unet import Unet
|
30 |
+
from runtime.run import train, evaluate, predict
|
31 |
+
from runtime.setup import get_logger, set_flags, prepare_model_dir
|
32 |
+
from runtime.arguments import parse_args
|
33 |
+
from data_loading.data_loader import Dataset
|
34 |
+
from TensorFlow.common.debug import dump_callback
|
35 |
+
from TensorFlow.common.tb_utils import TimeToTrainKerasHook
|
36 |
+
|
37 |
+
try:
|
38 |
+
import horovod.tensorflow as hvd
|
39 |
+
except ImportError:
|
40 |
+
hvd = None
|
41 |
+
|
42 |
+
|
43 |
+
def main():
|
44 |
+
"""
|
45 |
+
Starting point of the application
|
46 |
+
"""
|
47 |
+
params = parse_args(description="UNet-medical")
|
48 |
+
if params.use_horovod:
|
49 |
+
if hvd is None:
|
50 |
+
raise RuntimeError(
|
51 |
+
"Problem encountered during Horovod import. Please make sure that habana-horovod package is installed.")
|
52 |
+
hvd.init()
|
53 |
+
set_flags(params)
|
54 |
+
|
55 |
+
model_dir = prepare_model_dir(params)
|
56 |
+
params.model_dir = model_dir
|
57 |
+
logger = get_logger(params)
|
58 |
+
|
59 |
+
tb_logger = None
|
60 |
+
ttt_callback = None
|
61 |
+
if params.tensorboard_logging:
|
62 |
+
log_dir = params.log_dir
|
63 |
+
if hvd is not None and hvd.is_initialized() and params.log_all_workers:
|
64 |
+
log_dir = os.path.join(log_dir, f'worker_{hvd.rank()}')
|
65 |
+
tb_logger = namedtuple('TBSummaryWriters', 'train_writer eval_writer')(
|
66 |
+
tf.summary.create_file_writer(log_dir),
|
67 |
+
tf.summary.create_file_writer(os.path.join(log_dir, 'eval')))
|
68 |
+
ttt_callback = TimeToTrainKerasHook(os.path.join(log_dir, 'eval'))
|
69 |
+
|
70 |
+
model = Unet(seed=params.seed)
|
71 |
+
|
72 |
+
dataset = Dataset(data_dir=params.data_dir,
|
73 |
+
batch_size=params.batch_size,
|
74 |
+
fold=params.fold,
|
75 |
+
augment=params.augment,
|
76 |
+
hpu_id=hvd.rank() if hvd is not None and hvd.is_initialized() else 0,
|
77 |
+
num_hpus=hvd.size() if hvd is not None and hvd.is_initialized() else 1,
|
78 |
+
seed=params.seed,
|
79 |
+
gaudi_type=params.gaudi_type)
|
80 |
+
|
81 |
+
if 'train' in params.exec_mode:
|
82 |
+
with dump_callback(params.dump_config):
|
83 |
+
train(params, model, dataset, logger, tb_logger, ttt_callback)
|
84 |
+
|
85 |
+
if 'evaluate' in params.exec_mode:
|
86 |
+
evaluate(params, model, dataset, logger, tb_logger, ttt_callback)
|
87 |
+
|
88 |
+
if 'predict' in params.exec_mode:
|
89 |
+
predict(params, model, dataset, logger)
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == '__main__':
|
93 |
+
main()
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/LICENSE
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2022 Habana Labs, Ltd. an Intel Company
|
2 |
+
|
3 |
+
Apache License
|
4 |
+
Version 2.0, January 2004
|
5 |
+
https://www.apache.org/licenses/
|
6 |
+
|
7 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
8 |
+
|
9 |
+
1. Definitions.
|
10 |
+
|
11 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
12 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
13 |
+
|
14 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
15 |
+
the copyright owner that is granting the License.
|
16 |
+
|
17 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
18 |
+
other entities that control, are controlled by, or are under common
|
19 |
+
control with that entity. For the purposes of this definition,
|
20 |
+
"control" means (i) the power, direct or indirect, to cause the
|
21 |
+
direction or management of such entity, whether by contract or
|
22 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
23 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
24 |
+
|
25 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
26 |
+
exercising permissions granted by this License.
|
27 |
+
|
28 |
+
"Source" form shall mean the preferred form for making modifications,
|
29 |
+
including but not limited to software source code, documentation
|
30 |
+
source, and configuration files.
|
31 |
+
|
32 |
+
"Object" form shall mean any form resulting from mechanical
|
33 |
+
transformation or translation of a Source form, including but
|
34 |
+
not limited to compiled object code, generated documentation,
|
35 |
+
and conversions to other media types.
|
36 |
+
|
37 |
+
"Work" shall mean the work of authorship, whether in Source or
|
38 |
+
Object form, made available under the License, as indicated by a
|
39 |
+
copyright notice that is included in or attached to the work
|
40 |
+
(an example is provided in the Appendix below).
|
41 |
+
|
42 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
43 |
+
form, that is based on (or derived from) the Work and for which the
|
44 |
+
editorial revisions, annotations, elaborations, or other modifications
|
45 |
+
represent, as a whole, an original work of authorship. For the purposes
|
46 |
+
of this License, Derivative Works shall not include works that remain
|
47 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
48 |
+
the Work and Derivative Works thereof.
|
49 |
+
|
50 |
+
"Contribution" shall mean any work of authorship, including
|
51 |
+
the original version of the Work and any modifications or additions
|
52 |
+
to that Work or Derivative Works thereof, that is intentionally
|
53 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
54 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
55 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
56 |
+
means any form of electronic, verbal, or written communication sent
|
57 |
+
to the Licensor or its representatives, including but not limited to
|
58 |
+
communication on electronic mailing lists, source code control systems,
|
59 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
60 |
+
Licensor for the purpose of discussing and improving the Work, but
|
61 |
+
excluding communication that is conspicuously marked or otherwise
|
62 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
63 |
+
|
64 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
65 |
+
on behalf of whom a Contribution has been received by Licensor and
|
66 |
+
subsequently incorporated within the Work.
|
67 |
+
|
68 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
69 |
+
this License, each Contributor hereby grants to You a perpetual,
|
70 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
71 |
+
copyright license to reproduce, prepare Derivative Works of,
|
72 |
+
publicly display, publicly perform, sublicense, and distribute the
|
73 |
+
Work and such Derivative Works in Source or Object form.
|
74 |
+
|
75 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
76 |
+
this License, each Contributor hereby grants to You a perpetual,
|
77 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
78 |
+
(except as stated in this section) patent license to make, have made,
|
79 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
80 |
+
where such license applies only to those patent claims licensable
|
81 |
+
by such Contributor that are necessarily infringed by their
|
82 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
83 |
+
with the Work to which such Contribution(s) was submitted. If You
|
84 |
+
institute patent litigation against any entity (including a
|
85 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
86 |
+
or a Contribution incorporated within the Work constitutes direct
|
87 |
+
or contributory patent infringement, then any patent licenses
|
88 |
+
granted to You under this License for that Work shall terminate
|
89 |
+
as of the date such litigation is filed.
|
90 |
+
|
91 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
92 |
+
Work or Derivative Works thereof in any medium, with or without
|
93 |
+
modifications, and in Source or Object form, provided that You
|
94 |
+
meet the following conditions:
|
95 |
+
|
96 |
+
(a) You must give any other recipients of the Work or
|
97 |
+
Derivative Works a copy of this License; and
|
98 |
+
|
99 |
+
(b) You must cause any modified files to carry prominent notices
|
100 |
+
stating that You changed the files; and
|
101 |
+
|
102 |
+
(c) You must retain, in the Source form of any Derivative Works
|
103 |
+
that You distribute, all copyright, patent, trademark, and
|
104 |
+
attribution notices from the Source form of the Work,
|
105 |
+
excluding those notices that do not pertain to any part of
|
106 |
+
the Derivative Works; and
|
107 |
+
|
108 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
109 |
+
distribution, then any Derivative Works that You distribute must
|
110 |
+
include a readable copy of the attribution notices contained
|
111 |
+
within such NOTICE file, excluding those notices that do not
|
112 |
+
pertain to any part of the Derivative Works, in at least one
|
113 |
+
of the following places: within a NOTICE text file distributed
|
114 |
+
as part of the Derivative Works; within the Source form or
|
115 |
+
documentation, if provided along with the Derivative Works; or,
|
116 |
+
within a display generated by the Derivative Works, if and
|
117 |
+
wherever such third-party notices normally appear. The contents
|
118 |
+
of the NOTICE file are for informational purposes only and
|
119 |
+
do not modify the License. You may add Your own attribution
|
120 |
+
notices within Derivative Works that You distribute, alongside
|
121 |
+
or as an addendum to the NOTICE text from the Work, provided
|
122 |
+
that such additional attribution notices cannot be construed
|
123 |
+
as modifying the License.
|
124 |
+
|
125 |
+
You may add Your own copyright statement to Your modifications and
|
126 |
+
may provide additional or different license terms and conditions
|
127 |
+
for use, reproduction, or distribution of Your modifications, or
|
128 |
+
for any such Derivative Works as a whole, provided Your use,
|
129 |
+
reproduction, and distribution of the Work otherwise complies with
|
130 |
+
the conditions stated in this License.
|
131 |
+
|
132 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
133 |
+
any Contribution intentionally submitted for inclusion in the Work
|
134 |
+
by You to the Licensor shall be under the terms and conditions of
|
135 |
+
this License, without any additional terms or conditions.
|
136 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
137 |
+
the terms of any separate license agreement you may have executed
|
138 |
+
with Licensor regarding such Contributions.
|
139 |
+
|
140 |
+
6. Trademarks. This License does not grant permission to use the trade
|
141 |
+
names, trademarks, service marks, or product names of the Licensor,
|
142 |
+
except as required for reasonable and customary use in describing the
|
143 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
144 |
+
|
145 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
146 |
+
agreed to in writing, Licensor provides the Work (and each
|
147 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
148 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
149 |
+
implied, including, without limitation, any warranties or conditions
|
150 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
151 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
152 |
+
appropriateness of using or redistributing the Work and assume any
|
153 |
+
risks associated with Your exercise of permissions under this License.
|
154 |
+
|
155 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
156 |
+
whether in tort (including negligence), contract, or otherwise,
|
157 |
+
unless required by applicable law (such as deliberate and grossly
|
158 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
159 |
+
liable to You for damages, including any direct, indirect, special,
|
160 |
+
incidental, or consequential damages of any character arising as a
|
161 |
+
result of this License or out of the use or inability to use the
|
162 |
+
Work (including but not limited to damages for loss of goodwill,
|
163 |
+
work stoppage, computer failure or malfunction, or any and all
|
164 |
+
other commercial damages or losses), even if such Contributor
|
165 |
+
has been advised of the possibility of such damages.
|
166 |
+
|
167 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
168 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
169 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
170 |
+
or other liability obligations and/or rights consistent with this
|
171 |
+
License. However, in accepting such obligations, You may act only
|
172 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
173 |
+
of any other Contributor, and only if You agree to indemnify,
|
174 |
+
defend, and hold each Contributor harmless for any liability
|
175 |
+
incurred by, or claims asserted against, such Contributor by reason
|
176 |
+
of your accepting any such warranty or additional liability.
|
177 |
+
|
178 |
+
END OF TERMS AND CONDITIONS
|
179 |
+
|
180 |
+
Copyright 2020 Fausto Morales
|
181 |
+
|
182 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
183 |
+
you may not use this file except in compliance with the License.
|
184 |
+
You may obtain a copy of the License at
|
185 |
+
|
186 |
+
https://www.apache.org/licenses/LICENSE-2.0
|
187 |
+
|
188 |
+
Unless required by applicable law or agreed to in writing, software
|
189 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
190 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
191 |
+
See the License for the specific language governing permissions and
|
192 |
+
limitations under the License.
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/README.md
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Vision Transformer for TensorFlow
|
2 |
+
|
3 |
+
This directory provides a script and recipe to train a Vision Transformer model to achieve state of the art accuracy, and is tested and maintained by Habana.
|
4 |
+
For further information on performance, refer to [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-training-models/#performance).
|
5 |
+
|
6 |
+
For further information on training deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/).
|
7 |
+
|
8 |
+
## Table of Contents
|
9 |
+
|
10 |
+
* [Model-References](../../../README.md)
|
11 |
+
* [Model overview](#model-overview)
|
12 |
+
* [Setup](#setup)
|
13 |
+
* [Training and Examples](#training-the-model)
|
14 |
+
* [Profiling Example](#profiling-example)
|
15 |
+
* [Supported Configuration](#supported-configuration)
|
16 |
+
* [Changelog](#changelog)
|
17 |
+
* [Known Issues](#known-issues)
|
18 |
+
|
19 |
+
## Model Overview
|
20 |
+
|
21 |
+
Original model was taken from [vit-keras](https://github.com/faustomorales/vit-keras/) repository.
|
22 |
+
This is a Keras implementation of the models described in [An Image is Worth 16x16 Words: Transformes For Image Recognition at Scale](https://arxiv.org/pdf/2010.11929.pdf).
|
23 |
+
It is based on an earlier implementation from [tuvovan](https://github.com/tuvovan/Vision_Transformer_Keras), modified to match the Flax implementation in the [official repository](https://github.com/google-research/vision_transformer).
|
24 |
+
|
25 |
+
The weights here are ported over from the weights provided in the official repository. For more details on implementation, refer to `utils.load_weights_numpy`.
|
26 |
+
|
27 |
+
## Setup
|
28 |
+
|
29 |
+
Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/GAUDI_Installation_Guide.html) to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/TensorFlow/Model_Optimization_TensorFlow/Optimization_Training_Platform.html).
|
30 |
+
The guides will walk you through the process of setting up your system to run the model on Gaudi.
|
31 |
+
|
32 |
+
### Clone Habana Model-References
|
33 |
+
|
34 |
+
In the docker container, clone this repository and switch to the branch that matches your SynapseAI version. You can run the [`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) utility to determine the SynapseAI version.
|
35 |
+
|
36 |
+
```bash
|
37 |
+
git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References /root/Model-References
|
38 |
+
```
|
39 |
+
|
40 |
+
**Note:** If Model-References repository path is not in the PYTHONPATH, make sure you update it:
|
41 |
+
```bash
|
42 |
+
export PYTHONPATH=$PYTHONPATH:/root/Model-References
|
43 |
+
```
|
44 |
+
|
45 |
+
### Install Model Requirements
|
46 |
+
|
47 |
+
1. In the docker container, go to the Vision Transformer directory:
|
48 |
+
|
49 |
+
```bash
|
50 |
+
cd /root/Model-References/TensorFlow/computer_vision/VisionTransformer
|
51 |
+
```
|
52 |
+
|
53 |
+
2. Install the required packages using pip:
|
54 |
+
|
55 |
+
```bash
|
56 |
+
$PYTHON -m pip install -r requirements.txt
|
57 |
+
```
|
58 |
+
|
59 |
+
### Training Data
|
60 |
+
|
61 |
+
The Vision Transformer script operates on ImageNet 1k, a widely popular image classification dataset from the ILSVRC challenge.
|
62 |
+
Post downloading the dataset, the pre-processing script will be located in ResNet folder: [preprocess_imagenet.py](../Resnets/preprocess_imagenet.py)
|
63 |
+
To obtain the dataset, perform the following steps:
|
64 |
+
1. Sign up with http://image-net.org/download-images and acquire the rights to download original images.
|
65 |
+
2. Follow the link to the 2012 ILSVRC and download `ILSVRC2012_img_val.tar` and `ILSVRC2012_img_train.tar`.
|
66 |
+
3. Use the below commands to prepare the dataset under `/data/tensorflow_datasets/imagenet/tf_records`. This is the default data directory for the training script.
|
67 |
+
|
68 |
+
```
|
69 |
+
export IMAGENET_HOME=/data/tensorflow
|
70 |
+
mkdir -p $IMAGENET_HOME/validation
|
71 |
+
mkdir -p $IMAGENET_HOME/train
|
72 |
+
tar xf ILSVRC2012_img_val.tar -C $IMAGENET_HOME/validation
|
73 |
+
tar xf ILSVRC2012_img_train.tar -C $IMAGENET_HOME/train
|
74 |
+
cd $IMAGENET_HOME/train
|
75 |
+
for f in *.tar; do
|
76 |
+
d=`basename $f .tar`
|
77 |
+
mkdir $d
|
78 |
+
tar xf $f -C $d
|
79 |
+
done
|
80 |
+
cd $IMAGENET_HOME
|
81 |
+
rm $IMAGENET_HOME/train/*.tar # optional
|
82 |
+
wget -O synset_labels.txt https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_2012_validation_synset_labels.txt
|
83 |
+
cd /root/Model-References/TensorFlow/computer_vision/Resnets
|
84 |
+
$PYTHON preprocess_imagenet.py \
|
85 |
+
--raw_data_dir=$IMAGENET_HOME \
|
86 |
+
--local_scratch_dir=$IMAGENET_HOME/imagenet/tf_records
|
87 |
+
```
|
88 |
+
|
89 |
+
## Training and Examples
|
90 |
+
|
91 |
+
### Single Card and Multi-Card Training Examples
|
92 |
+
|
93 |
+
**Run training on 1 HPU:**
|
94 |
+
|
95 |
+
Run training on 1 HPU with BF16 precision, batch size 32 and gradient accumulation every 8 steps:
|
96 |
+
|
97 |
+
```bash
|
98 |
+
$PYTHON train.py --dtype=bf16 --batch_size=32 --grad_accum_steps=8
|
99 |
+
```
|
100 |
+
|
101 |
+
**Run training on 8 HPUs:**
|
102 |
+
|
103 |
+
**NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/TensorFlow/Tensorflow_Scaling_Guide/Horovod_Scaling/index.html#mpirun-configuration).
|
104 |
+
|
105 |
+
Vision Transformer relies on mpi4py and tf.distribute to enable distributed training.
|
106 |
+
Since `batch_size` parameter is global, it must be scaled (BS of a single card times number of cards).
|
107 |
+
`distributed` flag must be used to ensure proper strategy is in use.
|
108 |
+
|
109 |
+
Run training on 8 HPUs with BF16 precision, batch size 256, 1000 warmup steps and gradient accumulation every 8 steps:
|
110 |
+
|
111 |
+
```bash
|
112 |
+
mpirun -np 8 $PYTHON train.py -d=bf16 --batch_size=256 --warmup_steps=1000 --grad_accum_steps=8 --distributed
|
113 |
+
```
|
114 |
+
|
115 |
+
## Profiling Example
|
116 |
+
|
117 |
+
### Single Card Profiling Training Example
|
118 |
+
|
119 |
+
**Run training on 1 HPU:**
|
120 |
+
|
121 |
+
Run training on 1 HPU with batch size 32, 1 epochs, 20 steps and 12-15 iterations:
|
122 |
+
|
123 |
+
```bash
|
124 |
+
$PYTHON train.py --epochs 1 --steps_per_epoch 20 --profile 12,15
|
125 |
+
```
|
126 |
+
|
127 |
+
### Parameters
|
128 |
+
|
129 |
+
You can modify the training behavior through various flags in `train.py` script.
|
130 |
+
|
131 |
+
- `dataset`, `dataset_dir`: Dataset directory.
|
132 |
+
- `optimizer`: Optimizer.
|
133 |
+
- `dtype`, `d`: Data type (FP32 or BF16).
|
134 |
+
- `batch_size`: Global batch size.
|
135 |
+
- `lr_sched`: Learning rate scheduler (linear, exp, steps, constant, WarmupCosine).
|
136 |
+
- `initial_lr`: Initial learning rate.
|
137 |
+
- `final_lr`: Final learning rate.
|
138 |
+
- `warmup_steps`: Warmup steps.
|
139 |
+
- `epochs`: Total number of epochs for training.
|
140 |
+
- `steps_per_epoch`: Number of steps for training per epoch, overrides default value.
|
141 |
+
- `validation_steps`: Number of steps for validation, overrides default value.
|
142 |
+
- `model`: Model (ViT-B_16, ViT-L_16, ViT-B_32, ViT-L_32).
|
143 |
+
- `train_subset`: Pattern to detect train subset in dataset directory.
|
144 |
+
- `val_subset`: Pattern to detect validation subset in dataset directory.
|
145 |
+
- `grad_accum_steps`: Gradient accumulation steps.
|
146 |
+
- `resume_from_checkpoint_path`: Path to checkpoint to start from.
|
147 |
+
- `resume_from_epoch`: Initial epoch index.
|
148 |
+
- `evaluate_checkpoint_path`: Checkpoint path for evaluating the model on --val_subset.
|
149 |
+
- `weights_path`: Path to weights cache directory. ~/.keras is used if not set.
|
150 |
+
- `deterministic`: Enable deterministic behavior, this will also disable data augmentation. --seed must be set.
|
151 |
+
- `seed`: Seed to be used by random functions.
|
152 |
+
- `device`: Device type (CPU or HPU).
|
153 |
+
- `distributed`: Enable distributed training.
|
154 |
+
- `base_tf_server_port`: Rank 0 port used by tf.distribute.
|
155 |
+
- `save_summary_steps`: Steps between saving summaries to TensorBoard.
|
156 |
+
- `recipe_cache`: Path to recipe cache directory. Set to empty to disable recipe cache. Externally set 'TF_RECIPE_CACHE_PATH' will override this setting.
|
157 |
+
- `dump_config`: Side-by-side config file. Internal, do not use.
|
158 |
+
|
159 |
+
## Supported Configuration
|
160 |
+
|
161 |
+
| Validated on | SynapseAI Version | TensorFlow Version(s) | Mode |
|
162 |
+
|:------:|:-----------------:|:-----:|:----------:|
|
163 |
+
| Gaudi | 1.10.0 | 2.12.0 | Training |
|
164 |
+
|
165 |
+
|
166 |
+
## Changelog
|
167 |
+
|
168 |
+
### 1.4.0
|
169 |
+
|
170 |
+
- Updated dataset paths.
|
171 |
+
- Added '--profile' parameter.
|
172 |
+
- implementation override clean-up.
|
173 |
+
|
174 |
+
### 1.3.0
|
175 |
+
|
176 |
+
- Updated 'tensorflow_addons' and 'mpi4py' in requirements.txt.
|
177 |
+
- Added implementation override of Gelu.
|
178 |
+
- Improved robustness in multi-card scenarios.
|
179 |
+
|
180 |
+
## Known Issues
|
181 |
+
|
182 |
+
### Profiling in Multi-card Scenario
|
183 |
+
|
184 |
+
To profile in multi-card scenario, habanalabs driver must be loaded with increased `timeout_locked` parameter (eg. `timeout_locked=300`).
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/config.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
###############################################################################
|
2 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
3 |
+
###############################################################################
|
4 |
+
|
5 |
+
from types import SimpleNamespace
|
6 |
+
config = SimpleNamespace()
|
7 |
+
|
8 |
+
# Subdirectory name for saving trained weights and models.
|
9 |
+
config.SAVE_DIR = 'saves'
|
10 |
+
|
11 |
+
# Subdirectory name for saving TensorBoard log files.
|
12 |
+
config.LOG_DIR = 'logs'
|
13 |
+
|
14 |
+
# Default path to the ImageNet TFRecords dataset files.
|
15 |
+
config.DEFAULT_DATASET_DIR = '/data/tensorflow/imagenet/tf_records'
|
16 |
+
|
17 |
+
# Path to weights cache directory. ~/.keras is used if None.
|
18 |
+
config.WEIGHTS_DIR = None
|
19 |
+
|
20 |
+
# Number of parallel workers for generating training/validation data.
|
21 |
+
config.NUM_DATA_WORKERS = 128
|
22 |
+
|
23 |
+
# Do image data augmentation or not.
|
24 |
+
config.DATA_AUGMENTATION = True
|
25 |
+
|
26 |
+
# Enable deterministic behavior.
|
27 |
+
config.DETERMINISTIC = False
|
28 |
+
|
29 |
+
# Seed to be used by random functions.
|
30 |
+
config.SEED = None
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/models/__init__.py
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/models/models.py
ADDED
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
###############################################################################
|
2 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
3 |
+
###############################################################################
|
4 |
+
|
5 |
+
import math
|
6 |
+
|
7 |
+
import tensorflow as tf
|
8 |
+
from tensorflow.keras import backend
|
9 |
+
from typeguard import typechecked
|
10 |
+
from typing import Union
|
11 |
+
|
12 |
+
|
13 |
+
class GradientAccumulator(tf.keras.optimizers.legacy.Optimizer):
|
14 |
+
"""Optimizer wrapper for gradient accumulation."""
|
15 |
+
|
16 |
+
@typechecked
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
optimizer: Union[tf.keras.optimizers.legacy.Optimizer, str],
|
20 |
+
accum_steps: tf.types.experimental.TensorLike = 4,
|
21 |
+
name: str = "GradientAccumulator",
|
22 |
+
**kwargs,
|
23 |
+
):
|
24 |
+
r"""Construct a new GradientAccumulator optimizer.
|
25 |
+
Args:
|
26 |
+
optimizer: str or `tf.keras.optimizers.Optimizer` that will be
|
27 |
+
used to compute and apply gradients.
|
28 |
+
accum_steps: int > 0. Update gradient in every accumulation steps.
|
29 |
+
name: Optional name for the operations created when applying
|
30 |
+
gradients. Defaults to "GradientAccumulator".
|
31 |
+
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
|
32 |
+
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by
|
33 |
+
norm; `clipvalue` is clip gradients by value, `decay` is
|
34 |
+
included for backward compatibility to allow time inverse
|
35 |
+
decay of learning rate. `lr` is included for backward
|
36 |
+
compatibility, recommended to use `learning_rate` instead.
|
37 |
+
"""
|
38 |
+
super().__init__(name, **kwargs)
|
39 |
+
self._optimizer = tf.keras.optimizers.get(optimizer)
|
40 |
+
self._gradients = []
|
41 |
+
self._accum_steps = accum_steps
|
42 |
+
|
43 |
+
def _create_slots(self, var_list):
|
44 |
+
self._optimizer._create_slots(var_list=var_list)
|
45 |
+
for var in var_list:
|
46 |
+
self.add_slot(var, "ga")
|
47 |
+
|
48 |
+
self._gradients = [self.get_slot(var, "ga") for var in var_list]
|
49 |
+
|
50 |
+
@property
|
51 |
+
def gradients(self):
|
52 |
+
"""The accumulated gradients on the current replica."""
|
53 |
+
if not self._gradients:
|
54 |
+
raise ValueError(
|
55 |
+
"The accumulator should be called first to initialize the gradients"
|
56 |
+
)
|
57 |
+
return list(
|
58 |
+
gradient.read_value() if gradient is not None else gradient
|
59 |
+
for gradient in self._gradients
|
60 |
+
)
|
61 |
+
|
62 |
+
def apply_gradients(self, grads_and_vars, name=None, **kwargs):
|
63 |
+
self._optimizer._iterations = self.iterations
|
64 |
+
return super().apply_gradients(grads_and_vars, name, **kwargs)
|
65 |
+
|
66 |
+
def _resource_apply_dense(self, grad, var, apply_state=None):
|
67 |
+
accum_gradient = self.get_slot(var, "ga")
|
68 |
+
if accum_gradient is not None and grad is not None:
|
69 |
+
accum_gradient.assign_add(
|
70 |
+
grad / self._accum_steps, use_locking=self._use_locking, read_value=False
|
71 |
+
)
|
72 |
+
|
73 |
+
def _apply():
|
74 |
+
if "apply_state" in self._optimizer._dense_apply_args:
|
75 |
+
train_op = self._optimizer._resource_apply_dense(
|
76 |
+
accum_gradient.read_value(), var, apply_state=apply_state
|
77 |
+
)
|
78 |
+
else:
|
79 |
+
train_op = self._optimizer._resource_apply_dense(
|
80 |
+
accum_gradient.read_value(), var
|
81 |
+
)
|
82 |
+
reset_op = accum_gradient.assign(
|
83 |
+
tf.zeros_like(accum_gradient),
|
84 |
+
use_locking=self._use_locking,
|
85 |
+
read_value=False,
|
86 |
+
)
|
87 |
+
return tf.group(train_op, reset_op)
|
88 |
+
|
89 |
+
apply_op = tf.cond(
|
90 |
+
(self.iterations + 1) % self._accum_steps == 0, _apply, lambda: tf.no_op()
|
91 |
+
)
|
92 |
+
return apply_op
|
93 |
+
|
94 |
+
def _resource_apply_sparse(self, grad: tf.types.experimental.TensorLike, var, indices, apply_state):
|
95 |
+
accum_gradient = self.get_slot(var, "ga")
|
96 |
+
if accum_gradient is not None and grad is not None:
|
97 |
+
self._resource_scatter_add(accum_gradient, indices, grad)
|
98 |
+
|
99 |
+
def _apply():
|
100 |
+
if "apply_state" in self._optimizer._sparse_apply_args:
|
101 |
+
train_op = self._optimizer._resource_apply_sparse(
|
102 |
+
accum_gradient.sparse_read(indices),
|
103 |
+
var,
|
104 |
+
indices,
|
105 |
+
apply_state=apply_state,
|
106 |
+
)
|
107 |
+
else:
|
108 |
+
train_op = self._optimizer._resource_apply_sparse(
|
109 |
+
accum_gradient.sparse_read(indices), var, indices
|
110 |
+
)
|
111 |
+
reset_op = accum_gradient.assign(
|
112 |
+
tf.zeros_like(accum_gradient),
|
113 |
+
use_locking=self._use_locking,
|
114 |
+
read_value=False,
|
115 |
+
)
|
116 |
+
return tf.group(train_op, reset_op)
|
117 |
+
|
118 |
+
apply_op = tf.cond(
|
119 |
+
(self.iterations + 1) % self._accum_steps == 0, _apply, lambda: tf.no_op()
|
120 |
+
)
|
121 |
+
return apply_op
|
122 |
+
|
123 |
+
def reset(self):
|
124 |
+
"""Resets the accumulated gradients on the current replica."""
|
125 |
+
assign_ops = []
|
126 |
+
if not self._gradients:
|
127 |
+
return assign_ops
|
128 |
+
|
129 |
+
for gradient in self._gradients:
|
130 |
+
if gradient is not None:
|
131 |
+
assign_ops.append(
|
132 |
+
gradient.assign(
|
133 |
+
tf.zeros_like(gradient),
|
134 |
+
use_locking=self._use_locking,
|
135 |
+
read_value=False,
|
136 |
+
)
|
137 |
+
)
|
138 |
+
|
139 |
+
return tf.group(assign_ops)
|
140 |
+
|
141 |
+
@property
|
142 |
+
def lr(self):
|
143 |
+
return self._optimizer._get_hyper("learning_rate")
|
144 |
+
|
145 |
+
@lr.setter
|
146 |
+
def lr(self, lr):
|
147 |
+
self._optimizer._set_hyper("learning_rate", lr) #
|
148 |
+
|
149 |
+
@property
|
150 |
+
def learning_rate(self):
|
151 |
+
return self._optimizer._get_hyper("learning_rate")
|
152 |
+
|
153 |
+
@learning_rate.setter
|
154 |
+
def learning_rate(self, learning_rate):
|
155 |
+
self._optimizer._set_hyper("learning_rate", learning_rate)
|
156 |
+
|
157 |
+
def get_config(self):
|
158 |
+
config = {
|
159 |
+
"accum_steps": self._accum_steps,
|
160 |
+
"optimizer": tf.keras.optimizers.serialize(self._optimizer),
|
161 |
+
}
|
162 |
+
base_config = super().get_config()
|
163 |
+
return {**base_config, **config}
|
164 |
+
|
165 |
+
@classmethod
|
166 |
+
def from_config(cls, config, custom_objects=None):
|
167 |
+
optimizer = tf.keras.optimizers.deserialize(
|
168 |
+
config.pop("optimizer"), custom_objects=custom_objects
|
169 |
+
)
|
170 |
+
return cls(optimizer, **config)
|
171 |
+
|
172 |
+
|
173 |
+
IN_SHAPE = (224, 224, 3) # shape of input image tensor
|
174 |
+
NUM_CLASSES = 1000 # number of output classes (1000 for ImageNet)
|
175 |
+
|
176 |
+
|
177 |
+
def _set_l2(model, weight_decay):
|
178 |
+
"""Add L2 regularization into layers with weights
|
179 |
+
Reference: https://jricheimer.github.io/keras/2019/02/06/keras-hack-1/
|
180 |
+
"""
|
181 |
+
for layer in model.layers:
|
182 |
+
if isinstance(layer, tf.keras.layers.DepthwiseConv2D):
|
183 |
+
layer.add_loss(lambda: tf.keras.regularizers.l2(
|
184 |
+
weight_decay)(layer.kernel))
|
185 |
+
print('added wd to layer %s' % layer.name)
|
186 |
+
elif isinstance(layer, tf.keras.layers.Conv2D):
|
187 |
+
#layer.add_loss(lambda: keras.regularizers.l2(weight_decay)(layer.kernel))
|
188 |
+
print('added wd to layer %s' % layer.name)
|
189 |
+
elif isinstance(layer, tf.keras.layers.Dense):
|
190 |
+
layer.add_loss(lambda: tf.keras.regularizers.l2(
|
191 |
+
weight_decay)(layer.kernel))
|
192 |
+
print('added wd to layer %s' % layer.name)
|
193 |
+
elif isinstance(layer, tf.keras.layers.BatchNormalization):
|
194 |
+
layer.add_loss(lambda: tf.keras.regularizers.l2(
|
195 |
+
weight_decay)(layer.kernel))
|
196 |
+
print('added wd to layer %s' % layer.name)
|
197 |
+
|
198 |
+
|
199 |
+
def get_batch_size(model_name, value):
|
200 |
+
"""get_batch_size
|
201 |
+
|
202 |
+
These default batch_size values were chosen based on available
|
203 |
+
GPU RAM (11GB) on GeForce GTX-2080Ti.
|
204 |
+
"""
|
205 |
+
if value > 0:
|
206 |
+
return value
|
207 |
+
elif 'densenet121' in model_name:
|
208 |
+
return 16
|
209 |
+
else:
|
210 |
+
raise ValueError
|
211 |
+
|
212 |
+
|
213 |
+
def get_iter_size(model_name, value):
|
214 |
+
"""get_iter_size
|
215 |
+
|
216 |
+
These default iter_size values were chosen to make 'effective'
|
217 |
+
batch_size to be 256.
|
218 |
+
"""
|
219 |
+
if value > 0:
|
220 |
+
return value
|
221 |
+
elif 'densenet121' in model_name:
|
222 |
+
return 16
|
223 |
+
else:
|
224 |
+
raise ValueError
|
225 |
+
|
226 |
+
|
227 |
+
def get_initial_lr(model_name, value):
|
228 |
+
return value if value > 0. else 3e-4
|
229 |
+
|
230 |
+
|
231 |
+
def get_final_lr(model_name, value):
|
232 |
+
return value if value > 0. else 3e-4
|
233 |
+
|
234 |
+
|
235 |
+
class CosineLearningRateScheduleWithWarmup(tf.keras.callbacks.Callback):
|
236 |
+
def __init__(self, schedule, initial_lr, warmup_steps, resume_step, total_steps, cycles=0.5, verbose=0):
|
237 |
+
super(CosineLearningRateScheduleWithWarmup, self).__init__()
|
238 |
+
|
239 |
+
self.schedule = schedule
|
240 |
+
self.verbose = verbose
|
241 |
+
self.warmup_steps = warmup_steps
|
242 |
+
self.initial_lr = initial_lr
|
243 |
+
self.resume_step = resume_step
|
244 |
+
self.total_steps = total_steps
|
245 |
+
self.cycles = cycles
|
246 |
+
|
247 |
+
def on_train_begin(self, logs=None):
|
248 |
+
self.iter = 0 + self.resume_step
|
249 |
+
|
250 |
+
def on_train_batch_end(self, batch, logs=None):
|
251 |
+
self.iter += 1
|
252 |
+
if self.iter < self.warmup_steps:
|
253 |
+
warmup_multiplier = float(self.iter) / float(self.warmup_steps)
|
254 |
+
lr = self.initial_lr * warmup_multiplier
|
255 |
+
backend.set_value(self.model.optimizer.lr, lr)
|
256 |
+
else:
|
257 |
+
progress = float(self.iter - self.warmup_steps) / \
|
258 |
+
float(max(1, self.total_steps - self.warmup_steps))
|
259 |
+
lr = max(0.0, 0.5 * (1. + tf.math.cos(math.pi *
|
260 |
+
float(self.cycles) * 2.0 * progress)))*self.initial_lr
|
261 |
+
backend.set_value(self.model.optimizer.lr, lr)
|
262 |
+
|
263 |
+
def on_epoch_begin(self, epoch, logs=None):
|
264 |
+
if not hasattr(self.model.optimizer, 'learning_rate'):
|
265 |
+
raise ValueError('Optimizer must have a "lr" attribute.')
|
266 |
+
try: # new API
|
267 |
+
lr = float(backend.get_value(self.model.optimizer.learning_rate))
|
268 |
+
except TypeError: # Support for old API for backward compatibility
|
269 |
+
print("An exception occurred")
|
270 |
+
|
271 |
+
backend.set_value(self.model.optimizer.learning_rate,
|
272 |
+
backend.get_value(lr))
|
273 |
+
if self.verbose > 0:
|
274 |
+
print('\nEpoch %05d: LearningRateScheduler reducing learning '
|
275 |
+
'rate to %s.' % (epoch + 1, lr))
|
276 |
+
|
277 |
+
def on_epoch_end(self, epoch, logs=None):
|
278 |
+
logs = logs or {}
|
279 |
+
logs['lr'] = backend.get_value(self.model.optimizer.learning_rate)
|
280 |
+
|
281 |
+
|
282 |
+
def get_lr_func(total_epochs, lr_sched='linear',
|
283 |
+
initial_lr=6e-2, final_lr=1e-5, warmup_steps=0, resume_step=0, total_steps=5004):
|
284 |
+
"""Returns a learning decay function for training.
|
285 |
+
|
286 |
+
5 types of lr_sched are supported: 'linear' or 'exp', 'steps' , 'constant' , 'WarmupCosine' (exponential).
|
287 |
+
"""
|
288 |
+
def linear_decay(epoch):
|
289 |
+
"""Decay LR linearly for each epoch."""
|
290 |
+
if total_epochs == 1:
|
291 |
+
return initial_lr
|
292 |
+
else:
|
293 |
+
ratio = max((total_epochs - epoch - 1.) / (total_epochs - 1.), 0.)
|
294 |
+
lr = final_lr + (initial_lr - final_lr) * ratio
|
295 |
+
print('Epoch %d, lr = %f' % (epoch+1, lr))
|
296 |
+
return lr
|
297 |
+
|
298 |
+
def exp_decay(epoch):
|
299 |
+
"""Decay LR exponentially for each epoch."""
|
300 |
+
if total_epochs == 1:
|
301 |
+
return initial_lr
|
302 |
+
else:
|
303 |
+
lr_decay = (final_lr / initial_lr) ** (1. / (total_epochs - 1))
|
304 |
+
lr = initial_lr * (lr_decay ** epoch)
|
305 |
+
print('Epoch %d, lr = %f' % (epoch+1, lr))
|
306 |
+
return lr
|
307 |
+
|
308 |
+
def steps_decay(epoch):
|
309 |
+
if total_epochs == 1:
|
310 |
+
# learning rate is reduced by x10 at the end of epochs: 30,60,80,110,140,...
|
311 |
+
return initial_lr
|
312 |
+
else:
|
313 |
+
if (epoch < 80):
|
314 |
+
lr_decay = pow(0.1, epoch // 30)
|
315 |
+
else:
|
316 |
+
lr_decay = pow(0.1, (epoch+10) // 30)
|
317 |
+
print(epoch)
|
318 |
+
print(lr_decay)
|
319 |
+
|
320 |
+
lr = initial_lr * lr_decay
|
321 |
+
print('Epoch %d, lr = %f' % (epoch+1, lr))
|
322 |
+
return lr
|
323 |
+
|
324 |
+
def constant(epoch):
|
325 |
+
"""Decay LR exponentially for each epoch."""
|
326 |
+
if total_epochs == 1:
|
327 |
+
return initial_lr
|
328 |
+
else:
|
329 |
+
lr = initial_lr
|
330 |
+
print('Epoch %d, lr = %f' % (epoch+1, lr))
|
331 |
+
return lr
|
332 |
+
|
333 |
+
if total_epochs < 1:
|
334 |
+
raise ValueError('bad total_epochs (%d)' % total_epochs)
|
335 |
+
if lr_sched == 'linear':
|
336 |
+
return tf.keras.callbacks.LearningRateScheduler(linear_decay)
|
337 |
+
elif lr_sched == 'exp':
|
338 |
+
return tf.keras.callbacks.LearningRateScheduler(exp_decay)
|
339 |
+
elif lr_sched == 'steps':
|
340 |
+
return tf.keras.callbacks.LearningRateScheduler(steps_decay)
|
341 |
+
elif lr_sched == 'constant':
|
342 |
+
return tf.keras.callbacks.LearningRateScheduler(constant)
|
343 |
+
elif lr_sched == 'WarmupCosine':
|
344 |
+
return CosineLearningRateScheduleWithWarmup(constant, initial_lr, warmup_steps, resume_step, total_steps)
|
345 |
+
|
346 |
+
else:
|
347 |
+
raise ValueError('bad lr_sched')
|
348 |
+
|
349 |
+
|
350 |
+
def get_weight_decay(model_name, value):
|
351 |
+
return value if value >= 0. else 1e-5
|
352 |
+
|
353 |
+
|
354 |
+
def get_optimizer(optim_name, initial_lr, accumulation_steps=1, epsilon=1e-2):
|
355 |
+
"""get_optimizer
|
356 |
+
|
357 |
+
Note:
|
358 |
+
1. Learning rate decay is implemented as a callback in model.fit(),
|
359 |
+
so I do not specify 'decay' in the optimizers here.
|
360 |
+
2. Refer to the following for information about 'epsilon' in Adam:
|
361 |
+
https://github.com/tensorflow/tensorflow/blob/v1.14.0/tensorflow/python/keras/optimizer_v2/adam.py#L93
|
362 |
+
"""
|
363 |
+
from functools import partial
|
364 |
+
if optim_name == 'sgd':
|
365 |
+
optimizer = partial(tf.keras.optimizers.legacy.SGD,
|
366 |
+
momentum=0.9, nesterov=False, global_clipnorm=1.0)
|
367 |
+
elif optim_name == 'adam':
|
368 |
+
optimizer = partial(tf.keras.optimizers.legacy.Adam, epsilon=epsilon)
|
369 |
+
elif optim_name == 'rmsprop':
|
370 |
+
optimizer = partial(tf.keras.optimizers.legacy.RMSprop,
|
371 |
+
rho=0.9, epsilon=epsilon)
|
372 |
+
else:
|
373 |
+
# implementation of 'AdamW' is removed temporarily
|
374 |
+
raise ValueError
|
375 |
+
|
376 |
+
optimizer = optimizer(learning_rate=initial_lr)
|
377 |
+
if accumulation_steps > 1:
|
378 |
+
optimizer = GradientAccumulator(
|
379 |
+
optimizer, accum_steps=accumulation_steps)
|
380 |
+
|
381 |
+
return optimizer
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
vit-keras==0.1.0
|
2 |
+
opencv-python==4.7.0.68
|
3 |
+
mpi4py==3.1.3
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/train.py
ADDED
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
###############################################################################
|
2 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
3 |
+
###############################################################################
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
import os
|
7 |
+
import random
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import tensorflow as tf
|
11 |
+
from TensorFlow.common.debug import dump_callback
|
12 |
+
from TensorFlow.common.tb_utils import (ExamplesPerSecondKerasHookV2, TensorBoardWithHParamsV2,
|
13 |
+
TimeToTrainKerasHook)
|
14 |
+
from config import config
|
15 |
+
from models.models import get_lr_func, get_optimizer
|
16 |
+
from utils.distribution_utils import configure_cluster, comm_size, comm_rank
|
17 |
+
from utils.dataset import get_dataset
|
18 |
+
from vit_keras import vit
|
19 |
+
|
20 |
+
|
21 |
+
def tf_distribute_config(base_tf_server_port: int):
|
22 |
+
"""
|
23 |
+
Generates a TensorFlow cluster information and sets it to TF_CONFIG environment variable.
|
24 |
+
TF_CONFIG won't be altered if it was externally set.
|
25 |
+
"""
|
26 |
+
hls_addresses = str(os.environ.get(
|
27 |
+
'MULTI_HLS_IPS', '127.0.0.1')).split(',')
|
28 |
+
rank = comm_rank()
|
29 |
+
size = comm_size()
|
30 |
+
|
31 |
+
worker_hosts = ",".join([",".join([address + ':' + str(base_tf_server_port + r)
|
32 |
+
for r in range(size//len(hls_addresses))])
|
33 |
+
for address in hls_addresses])
|
34 |
+
|
35 |
+
configure_cluster(worker_hosts, rank)
|
36 |
+
print(os.environ['TF_CONFIG'])
|
37 |
+
|
38 |
+
|
39 |
+
DESCRIPTION = 'VisionTransformer training script.'
|
40 |
+
|
41 |
+
|
42 |
+
def main():
|
43 |
+
parser = argparse.ArgumentParser(description=DESCRIPTION)
|
44 |
+
parser.add_argument('--dataset', '--dataset_dir', metavar='PATH',
|
45 |
+
default=config.DEFAULT_DATASET_DIR, help='Dataset directory.')
|
46 |
+
parser.add_argument('--optimizer', default='sgd',
|
47 |
+
choices=['sgd', 'adam', 'rmsprop'], help='Optimizer.')
|
48 |
+
parser.add_argument('-d', '--dtype', default='fp32',
|
49 |
+
choices=['fp32', 'bf16'], help='Data type.')
|
50 |
+
parser.add_argument('--batch_size', type=int,
|
51 |
+
default=32, help='Global batch size.')
|
52 |
+
parser.add_argument('--lr_sched', default='WarmupCosine', choices=[
|
53 |
+
'linear', 'exp', 'steps', 'constant', 'WarmupCosine'], help='Learning rate scheduler.')
|
54 |
+
parser.add_argument('--initial_lr', type=float,
|
55 |
+
default=6e-2, help='Initial learning rate.')
|
56 |
+
parser.add_argument('--final_lr', type=float,
|
57 |
+
default=1e-5, help='Final learning rate.')
|
58 |
+
parser.add_argument('--warmup_steps', type=int,
|
59 |
+
default=4000, help='Warmup steps.')
|
60 |
+
parser.add_argument('--epochs', type=int, default=10,
|
61 |
+
help='Total number of epochs for training.')
|
62 |
+
parser.add_argument('--steps_per_epoch', type=int,
|
63 |
+
help='Number of steps for training per epoch, overrides default value.')
|
64 |
+
parser.add_argument('--validation_steps', type=int,
|
65 |
+
help='Number of steps for validation, overrides default value.')
|
66 |
+
parser.add_argument('--profile', type=str, default='0',
|
67 |
+
help='Profile the batch(es) to sample compute characteristics.'
|
68 |
+
'Must be an integer or a pair of comma-separated integers. For example: --profile 4,6')
|
69 |
+
parser.add_argument('--model', default='ViT-B_16',
|
70 |
+
choices=['ViT-B_16', 'ViT-L_16', 'ViT-B_32', 'ViT-L_32'], help='Model.')
|
71 |
+
parser.add_argument('--train_subset', default='train/train',
|
72 |
+
help='Pattern to detect train subset in dataset directory.')
|
73 |
+
parser.add_argument('--val_subset', default='validation/validation',
|
74 |
+
help='Pattern to detect validation subset in dataset directory.')
|
75 |
+
parser.add_argument('--grad_accum_steps', type=int,
|
76 |
+
default=8, help='Gradient accumulation steps.')
|
77 |
+
parser.add_argument('--resume_from_checkpoint_path',
|
78 |
+
metavar='PATH', help='Path to checkpoint to start from.')
|
79 |
+
parser.add_argument('--resume_from_epoch', metavar='EPOCH_INDEX',
|
80 |
+
type=int, default=0, help='Initial epoch index.')
|
81 |
+
parser.add_argument('--evaluate_checkpoint_path', metavar='PATH',
|
82 |
+
help='Checkpoint path for evaluating the model on --val_subset')
|
83 |
+
parser.add_argument('--weights_path', metavar='PATH',
|
84 |
+
help='Path to weights cache directory. ~/.keras is used if not set.')
|
85 |
+
parser.add_argument('--deterministic', action='store_true', default=False,
|
86 |
+
help='Enable deterministic behavior, this will also disable data augmentation. --seed must be set.')
|
87 |
+
parser.add_argument('--seed', type=int,
|
88 |
+
help='Seed to be used by random functions.')
|
89 |
+
parser.add_argument('--device', default='HPU',
|
90 |
+
choices=['CPU', 'HPU'], help='Device type.')
|
91 |
+
parser.add_argument('--distributed', action='store_true',
|
92 |
+
default=False, help='Enable distributed training.')
|
93 |
+
parser.add_argument('--base_tf_server_port', type=int,
|
94 |
+
default=7850, help='Rank 0 port used by tf.distribute.')
|
95 |
+
parser.add_argument('--save_summary_steps', type=int, default=0,
|
96 |
+
help='Steps between saving summaries to TensorBoard.')
|
97 |
+
parser.add_argument('--recipe_cache', default='/tmp/vit_recipe_cache',
|
98 |
+
help='Path to recipe cache directory. Set to empty to disable recipe cache. Externally set \'TF_RECIPE_CACHE_PATH\' will override this setting.')
|
99 |
+
parser.add_argument(
|
100 |
+
'--dump_config', help='Side-by-side config file. Internal, do not use.')
|
101 |
+
args = parser.parse_args()
|
102 |
+
|
103 |
+
if args.weights_path is not None:
|
104 |
+
config.WEIGHTS_DIR = args.weights_path
|
105 |
+
|
106 |
+
if args.dtype == 'bf16':
|
107 |
+
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
|
108 |
+
|
109 |
+
if args.device == 'HPU':
|
110 |
+
if args.distributed:
|
111 |
+
os.environ['TF_HCCL_MEMORY_ALLOWANCE_MB'] = '1000'
|
112 |
+
from habana_frameworks.tensorflow import load_habana_module
|
113 |
+
load_habana_module()
|
114 |
+
|
115 |
+
# Handle recipe caching.
|
116 |
+
recipe_cache = args.recipe_cache
|
117 |
+
if 'TF_RECIPE_CACHE_PATH' not in os.environ.keys() and recipe_cache:
|
118 |
+
os.environ['TF_RECIPE_CACHE_PATH'] = recipe_cache
|
119 |
+
|
120 |
+
# Clear previous recipe cache.
|
121 |
+
if not args.distributed or comm_rank() == 0:
|
122 |
+
if os.path.exists(recipe_cache) and os.path.isdir(recipe_cache):
|
123 |
+
import shutil
|
124 |
+
shutil.rmtree(recipe_cache)
|
125 |
+
# Wait for rank 0 to remove cache.
|
126 |
+
if args.distributed:
|
127 |
+
from mpi4py import MPI
|
128 |
+
MPI.COMM_WORLD.Barrier()
|
129 |
+
|
130 |
+
# Create separate log dir directory.
|
131 |
+
if args.distributed:
|
132 |
+
config.LOG_DIR = os.path.join(
|
133 |
+
config.LOG_DIR, f'worker_{comm_rank()}')
|
134 |
+
|
135 |
+
# Handle determinism.
|
136 |
+
config.DETERMINISTIC = args.deterministic
|
137 |
+
config.SEED = args.seed
|
138 |
+
if args.deterministic:
|
139 |
+
assert args.seed is not None, "Deterministic behavior require seed to be set."
|
140 |
+
tf.config.threading.set_inter_op_parallelism_threads(1)
|
141 |
+
tf.config.threading.set_intra_op_parallelism_threads(1)
|
142 |
+
os.environ['TF_DETERMINISTIC_OPS'] = '1'
|
143 |
+
config.DATA_AUGMENTATION = False
|
144 |
+
if args.seed is not None:
|
145 |
+
random.seed(args.seed)
|
146 |
+
np.random.seed(args.seed)
|
147 |
+
tf.random.set_seed(args.seed)
|
148 |
+
|
149 |
+
# Handle distribution strategy.
|
150 |
+
if args.distributed:
|
151 |
+
tf_distribute_config(args.base_tf_server_port)
|
152 |
+
if args.device == 'HPU':
|
153 |
+
from habana_frameworks.tensorflow.distribute import HPUStrategy
|
154 |
+
strategy = HPUStrategy()
|
155 |
+
else:
|
156 |
+
strategy = tf.distribute.MultiWorkerMirroredStrategy()
|
157 |
+
else:
|
158 |
+
strategy = tf.distribute.OneDeviceStrategy(f'device:{args.device}:0')
|
159 |
+
|
160 |
+
if not args.distributed or comm_rank() == 0:
|
161 |
+
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
|
162 |
+
|
163 |
+
num_classes = 1000
|
164 |
+
batch_size = args.batch_size
|
165 |
+
nb_epoch = args.epochs
|
166 |
+
dataset = args.dataset
|
167 |
+
resume_from_checkpoint_path = args.resume_from_checkpoint_path
|
168 |
+
resume_from_epoch = args.resume_from_epoch
|
169 |
+
optim_name = args.optimizer
|
170 |
+
initial_lr = args.initial_lr
|
171 |
+
final_lr = args.final_lr
|
172 |
+
lr_sched = args.lr_sched
|
173 |
+
warmup_steps = args.warmup_steps
|
174 |
+
model_name = args.model
|
175 |
+
grad_accum_steps = args.grad_accum_steps
|
176 |
+
|
177 |
+
ds_train = get_dataset(dataset, args.train_subset, batch_size,
|
178 |
+
is_training=True, distributed=args.distributed)
|
179 |
+
ds_valid = get_dataset(dataset, args.val_subset,
|
180 |
+
batch_size, False, distributed=args.distributed)
|
181 |
+
|
182 |
+
if args.dump_config is not None:
|
183 |
+
vit.CONFIG_B['dropout'] = 0.0
|
184 |
+
vit.CONFIG_L['dropout'] = 0.0
|
185 |
+
|
186 |
+
# Load our model
|
187 |
+
with strategy.scope():
|
188 |
+
image_size = 384
|
189 |
+
if model_name == 'ViT-B_16':
|
190 |
+
model = vit.vit_b16(
|
191 |
+
image_size=image_size,
|
192 |
+
activation='softmax',
|
193 |
+
pretrained=True,
|
194 |
+
include_top=True,
|
195 |
+
pretrained_top=False,
|
196 |
+
classes=num_classes,
|
197 |
+
weights="imagenet21k")
|
198 |
+
elif model_name == 'ViT-L_16':
|
199 |
+
model = vit.vit_l16(
|
200 |
+
image_size=image_size,
|
201 |
+
activation='softmax',
|
202 |
+
pretrained=True,
|
203 |
+
include_top=True,
|
204 |
+
pretrained_top=False,
|
205 |
+
classes=num_classes,
|
206 |
+
weights="imagenet21k")
|
207 |
+
elif model_name == 'ViT-B_32':
|
208 |
+
model = vit.vit_b32(
|
209 |
+
image_size=image_size,
|
210 |
+
activation='softmax',
|
211 |
+
pretrained=True,
|
212 |
+
include_top=True,
|
213 |
+
pretrained_top=False,
|
214 |
+
classes=num_classes,
|
215 |
+
weights="imagenet21k")
|
216 |
+
elif model_name == 'ViT-L_32':
|
217 |
+
model = vit.vit_l32(
|
218 |
+
image_size=image_size,
|
219 |
+
activation='softmax',
|
220 |
+
pretrained=True,
|
221 |
+
include_top=True,
|
222 |
+
pretrained_top=False,
|
223 |
+
classes=num_classes,
|
224 |
+
weights="imagenet21k")
|
225 |
+
else:
|
226 |
+
print(
|
227 |
+
"Model is not supported, please use either ViT-B_16 or ViT-L_16 or ViT-B_32 or ViT-L_32")
|
228 |
+
exit(0)
|
229 |
+
|
230 |
+
optimizer = get_optimizer(
|
231 |
+
optim_name, initial_lr, accumulation_steps=grad_accum_steps, epsilon=1e-2)
|
232 |
+
model.compile(optimizer=optimizer, loss='categorical_crossentropy',
|
233 |
+
metrics=['accuracy'], run_eagerly=False)
|
234 |
+
|
235 |
+
# Start training
|
236 |
+
|
237 |
+
steps_per_epoch = 1281167 // batch_size
|
238 |
+
if args.steps_per_epoch is not None:
|
239 |
+
steps_per_epoch = args.steps_per_epoch
|
240 |
+
validation_steps = 50000 // batch_size
|
241 |
+
if args.validation_steps is not None:
|
242 |
+
validation_steps = args.validation_steps
|
243 |
+
|
244 |
+
total_steps = nb_epoch * steps_per_epoch
|
245 |
+
resume_step = resume_from_epoch * steps_per_epoch
|
246 |
+
|
247 |
+
lrate = get_lr_func(nb_epoch, lr_sched, initial_lr,
|
248 |
+
final_lr, warmup_steps, resume_step, total_steps)
|
249 |
+
|
250 |
+
save_name = model_name if not model_name.endswith('.h5') else \
|
251 |
+
os.path.split(model_name)[-1].split('.')[0].split('-')[0]
|
252 |
+
model_ckpt = tf.keras.callbacks.ModelCheckpoint(
|
253 |
+
os.path.join(config.SAVE_DIR, save_name) + '-ckpt-{epoch:03d}.h5',
|
254 |
+
monitor='train_loss')
|
255 |
+
|
256 |
+
callbacks = [lrate, model_ckpt]
|
257 |
+
|
258 |
+
profile_batch = 0
|
259 |
+
if not args.distributed or comm_rank() == 0:
|
260 |
+
profile_batch = tuple(int(i) for i in args.profile.split(','))
|
261 |
+
if len(profile_batch) == 1:
|
262 |
+
profile_batch = profile_batch[0]
|
263 |
+
callbacks += [TensorBoardWithHParamsV2(
|
264 |
+
vars(args), log_dir=config.LOG_DIR, update_freq=args.save_summary_steps, profile_batch=profile_batch)]
|
265 |
+
callbacks += [TimeToTrainKerasHook(output_dir=config.LOG_DIR)]
|
266 |
+
|
267 |
+
if args.save_summary_steps > 0:
|
268 |
+
callbacks += [ExamplesPerSecondKerasHookV2(
|
269 |
+
output_dir=config.LOG_DIR, every_n_steps=args.save_summary_steps, batch_size=args.batch_size)]
|
270 |
+
|
271 |
+
if (args.evaluate_checkpoint_path is not None):
|
272 |
+
model.load_weights(args.evaluate_checkpoint_path)
|
273 |
+
results = model.evaluate(x=ds_valid, steps=validation_steps)
|
274 |
+
print("Test loss, Test acc:", results)
|
275 |
+
exit()
|
276 |
+
|
277 |
+
if ((resume_from_epoch is not None) and (resume_from_checkpoint_path is not None)):
|
278 |
+
model.load_weights(resume_from_checkpoint_path)
|
279 |
+
|
280 |
+
with dump_callback(args.dump_config):
|
281 |
+
model.fit(x=ds_train, y=None,
|
282 |
+
steps_per_epoch=steps_per_epoch,
|
283 |
+
callbacks=callbacks,
|
284 |
+
initial_epoch=resume_from_epoch,
|
285 |
+
epochs=nb_epoch,
|
286 |
+
shuffle=not args.deterministic,
|
287 |
+
verbose=1 if not args.distributed else comm_rank() == 0,
|
288 |
+
validation_data=(ds_valid, None),
|
289 |
+
validation_steps=validation_steps,
|
290 |
+
)
|
291 |
+
|
292 |
+
if not args.distributed or comm_rank() == 0:
|
293 |
+
model.save(f'{config.SAVE_DIR}/{save_name}-model-final.h5')
|
294 |
+
|
295 |
+
|
296 |
+
if __name__ == '__main__':
|
297 |
+
main()
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/utils/__init__.py
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/utils/dataset.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
###############################################################################
|
2 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
3 |
+
###############################################################################
|
4 |
+
"""dataset.py
|
5 |
+
|
6 |
+
This module implements functions for reading ImageNet (ILSVRC2012)
|
7 |
+
dataset in TFRecords format.
|
8 |
+
"""
|
9 |
+
|
10 |
+
import os
|
11 |
+
from functools import partial
|
12 |
+
|
13 |
+
import tensorflow as tf
|
14 |
+
|
15 |
+
from config import config
|
16 |
+
from utils.image_processing import preprocess_image, resize_and_rescale_image
|
17 |
+
|
18 |
+
|
19 |
+
def decode_jpeg(image_buffer, scope=None):
|
20 |
+
"""Decode a JPEG string into one 3-D float image Tensor.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
image_buffer: scalar string Tensor.
|
24 |
+
scope: Optional scope for name_scope.
|
25 |
+
Returns:
|
26 |
+
3-D float Tensor with values ranging from [0, 1).
|
27 |
+
"""
|
28 |
+
with tf.compat.v1.name_scope(values=[image_buffer], name=scope,
|
29 |
+
default_name='decode_jpeg'):
|
30 |
+
# Decode the string as an RGB JPEG.
|
31 |
+
# Note that the resulting image contains an unknown height
|
32 |
+
# and width that is set dynamically by decode_jpeg. In other
|
33 |
+
# words, the height and width of image is unknown at compile-i
|
34 |
+
# time.
|
35 |
+
image = tf.image.decode_jpeg(image_buffer, channels=3)
|
36 |
+
|
37 |
+
# After this point, all image pixels reside in [0,1)
|
38 |
+
# until the very end, when they're rescaled to (-1, 1).
|
39 |
+
# The various adjust_* ops all require this range for dtype
|
40 |
+
# float.
|
41 |
+
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
|
42 |
+
return image
|
43 |
+
|
44 |
+
|
45 |
+
def _parse_fn(example_serialized, is_training):
|
46 |
+
"""Helper function for parse_fn_train() and parse_fn_valid()
|
47 |
+
|
48 |
+
Each Example proto (TFRecord) contains the following fields:
|
49 |
+
|
50 |
+
image/height: 462
|
51 |
+
image/width: 581
|
52 |
+
image/colorspace: 'RGB'
|
53 |
+
image/channels: 3
|
54 |
+
image/class/label: 615
|
55 |
+
image/class/synset: 'n03623198'
|
56 |
+
image/class/text: 'knee pad'
|
57 |
+
image/format: 'JPEG'
|
58 |
+
image/filename: 'ILSVRC2012_val_00041207.JPEG'
|
59 |
+
image/encoded: <JPEG encoded string>
|
60 |
+
|
61 |
+
Args:
|
62 |
+
example_serialized: scalar Tensor tf.string containing a
|
63 |
+
serialized Example protocol buffer.
|
64 |
+
is_training: training (True) or validation (False).
|
65 |
+
|
66 |
+
Returns:
|
67 |
+
image_buffer: Tensor tf.string containing the contents of
|
68 |
+
a JPEG file.
|
69 |
+
label: Tensor tf.int32 containing the label.
|
70 |
+
text: Tensor tf.string containing the human-readable label.
|
71 |
+
"""
|
72 |
+
feature_map = {
|
73 |
+
'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,
|
74 |
+
default_value=''),
|
75 |
+
'image/class/label': tf.io.FixedLenFeature([], dtype=tf.int64,
|
76 |
+
default_value=-1),
|
77 |
+
'image/class/text': tf.io.FixedLenFeature([], dtype=tf.string,
|
78 |
+
default_value=''),
|
79 |
+
}
|
80 |
+
parsed = tf.io.parse_single_example(
|
81 |
+
serialized=example_serialized, features=feature_map)
|
82 |
+
image = decode_jpeg(parsed['image/encoded'])
|
83 |
+
if config.DATA_AUGMENTATION:
|
84 |
+
image = preprocess_image(image, 384, 384, is_training=is_training)
|
85 |
+
else:
|
86 |
+
image = resize_and_rescale_image(image, 384, 384)
|
87 |
+
# The label in the tfrecords is 1~1000 (0 not used).
|
88 |
+
# So I think the minus 1 (of class label) is needed below.
|
89 |
+
label = tf.one_hot(parsed['image/class/label'] - 1, 1000, dtype=tf.float32)
|
90 |
+
return (image, label)
|
91 |
+
|
92 |
+
|
93 |
+
def get_dataset(tfrecords_dir, subset, batch_size, is_training, distributed):
|
94 |
+
"""Read TFRecords files and turn them into a TFRecordDataset.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
tfrecords_dir: dataset directory
|
98 |
+
subset: pattern to detect subset in dataset directory
|
99 |
+
batch_size: Global batch size
|
100 |
+
is_training (bool): use True if dataset will be used for training
|
101 |
+
distributed (bool): use True if used in distributed environment
|
102 |
+
|
103 |
+
Returns:
|
104 |
+
TFRecordDataset: Dataset.
|
105 |
+
"""
|
106 |
+
filenames = tf.io.matching_files(
|
107 |
+
os.path.join(tfrecords_dir, '%s-*' % subset))
|
108 |
+
ds = tf.data.Dataset.from_tensor_slices(filenames)
|
109 |
+
|
110 |
+
# Sharding should be used only for training and in distributed environments.
|
111 |
+
if distributed and is_training:
|
112 |
+
from utils.distribution_utils import comm_size, comm_rank
|
113 |
+
ds = ds.shard(comm_size(), comm_rank())
|
114 |
+
|
115 |
+
if is_training:
|
116 |
+
num_files = tf.cast(tf.shape(input=filenames)[0], tf.int64)
|
117 |
+
ds = ds.shuffle(buffer_size=num_files)
|
118 |
+
|
119 |
+
ds = ds.interleave(tf.data.TFRecordDataset, cycle_length=10)
|
120 |
+
|
121 |
+
if is_training:
|
122 |
+
ds = ds.shuffle(buffer_size=10000)
|
123 |
+
ds = ds.repeat()
|
124 |
+
|
125 |
+
parser = partial(_parse_fn, is_training=is_training)
|
126 |
+
ds = ds.map(map_func=parser,
|
127 |
+
num_parallel_calls=config.NUM_DATA_WORKERS, deterministic=False)
|
128 |
+
ds = ds.batch(batch_size=batch_size, drop_remainder=True)
|
129 |
+
|
130 |
+
# Sharding is already done, so disable autosharding.
|
131 |
+
options = tf.data.Options()
|
132 |
+
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
|
133 |
+
|
134 |
+
return ds.with_options(options)
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/utils/distribution_utils.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
###############################################################################
|
16 |
+
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company
|
17 |
+
###############################################################################
|
18 |
+
"""Helper functions for running models in a distributed setting."""
|
19 |
+
|
20 |
+
|
21 |
+
import json
|
22 |
+
import os
|
23 |
+
|
24 |
+
|
25 |
+
def comm_size():
|
26 |
+
return int(os.environ.get("OMPI_COMM_WORLD_SIZE", 1))
|
27 |
+
|
28 |
+
|
29 |
+
def comm_rank():
|
30 |
+
return int(os.environ.get("OMPI_COMM_WORLD_RANK", 0))
|
31 |
+
|
32 |
+
|
33 |
+
def configure_cluster(worker_hosts=None, task_index=-1):
|
34 |
+
"""Set multi-worker cluster spec in TF_CONFIG environment variable.
|
35 |
+
|
36 |
+
Args:
|
37 |
+
worker_hosts: comma-separated list of worker ip:port pairs.
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
Number of workers in the cluster.
|
41 |
+
"""
|
42 |
+
tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
|
43 |
+
if tf_config:
|
44 |
+
num_workers = (len(tf_config['cluster'].get('chief', [])) +
|
45 |
+
len(tf_config['cluster'].get('worker', [])))
|
46 |
+
elif worker_hosts:
|
47 |
+
workers = worker_hosts.split(',')
|
48 |
+
num_workers = len(workers)
|
49 |
+
if num_workers > 1 and task_index < 0:
|
50 |
+
raise ValueError(
|
51 |
+
'Must specify task_index when number of workers > 1')
|
52 |
+
task_index = 0 if num_workers == 1 else task_index
|
53 |
+
os.environ['TF_CONFIG'] = json.dumps({
|
54 |
+
'cluster': {
|
55 |
+
'worker': workers
|
56 |
+
},
|
57 |
+
'task': {'type': 'worker', 'index': task_index}
|
58 |
+
})
|
59 |
+
else:
|
60 |
+
num_workers = 1
|
61 |
+
return num_workers
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/utils/image_processing.py
ADDED
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
|
16 |
+
# The code was taken from:
|
17 |
+
# https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py
|
18 |
+
#
|
19 |
+
# I've renamed the file and modify the code to suit my own need.
|
20 |
+
# JK Jung, <[email protected]>
|
21 |
+
#
|
22 |
+
# Changed usage of tensorflow-addons rotate to use keras implementation instead
|
23 |
+
|
24 |
+
"""Provides utilities to preprocess images for the Inception networks."""
|
25 |
+
|
26 |
+
from __future__ import absolute_import
|
27 |
+
from __future__ import division
|
28 |
+
from __future__ import print_function
|
29 |
+
|
30 |
+
import math
|
31 |
+
import random
|
32 |
+
|
33 |
+
import tensorflow as tf
|
34 |
+
|
35 |
+
from tensorflow.python.ops import control_flow_ops
|
36 |
+
from keras.layers import RandomRotation
|
37 |
+
|
38 |
+
def _smallest_size_at_least(height, width, smallest_side):
|
39 |
+
"""Computes new shape with the smallest side equal to `smallest_side`.
|
40 |
+
|
41 |
+
Computes new shape with the smallest side equal to `smallest_side` while
|
42 |
+
preserving the original aspect ratio.
|
43 |
+
|
44 |
+
Args:
|
45 |
+
height: an int32 scalar tensor indicating the current height.
|
46 |
+
width: an int32 scalar tensor indicating the current width.
|
47 |
+
smallest_side: A python integer or scalar `Tensor` indicating the size of
|
48 |
+
the smallest side after resize.
|
49 |
+
|
50 |
+
Returns:
|
51 |
+
new_height: an int32 scalar tensor indicating the new height.
|
52 |
+
new_width: and int32 scalar tensor indicating the new width.
|
53 |
+
"""
|
54 |
+
smallest_side = tf.convert_to_tensor(value=smallest_side, dtype=tf.int32)
|
55 |
+
|
56 |
+
height = tf.cast(height, dtype=tf.float32)
|
57 |
+
width = tf.cast(width, dtype=tf.float32)
|
58 |
+
smallest_side = tf.cast(smallest_side, dtype=tf.float32)
|
59 |
+
|
60 |
+
scale = tf.cond(pred=tf.greater(height, width),
|
61 |
+
true_fn=lambda: smallest_side / width,
|
62 |
+
false_fn=lambda: smallest_side / height)
|
63 |
+
new_height = tf.cast(tf.math.rint(height * scale), dtype=tf.int32)
|
64 |
+
new_width = tf.cast(tf.math.rint(width * scale), dtype=tf.int32)
|
65 |
+
return new_height, new_width
|
66 |
+
|
67 |
+
|
68 |
+
def _aspect_preserving_resize(image, smallest_side):
|
69 |
+
"""Resize images preserving the original aspect ratio.
|
70 |
+
|
71 |
+
Args:
|
72 |
+
image: A 3-D image `Tensor`.
|
73 |
+
smallest_side: A python integer or scalar `Tensor` indicating the size of
|
74 |
+
the smallest side after resize.
|
75 |
+
|
76 |
+
Returns:
|
77 |
+
resized_image: A 3-D tensor containing the resized image.
|
78 |
+
"""
|
79 |
+
smallest_side = tf.convert_to_tensor(value=smallest_side, dtype=tf.int32)
|
80 |
+
|
81 |
+
shape = tf.shape(input=image)
|
82 |
+
height = shape[0]
|
83 |
+
width = shape[1]
|
84 |
+
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
|
85 |
+
image = tf.expand_dims(image, 0)
|
86 |
+
resized_image = tf.image.resize(image, [new_height, new_width],
|
87 |
+
method=tf.image.ResizeMethod.BILINEAR)
|
88 |
+
resized_image = tf.squeeze(resized_image)
|
89 |
+
resized_image.set_shape([None, None, 3])
|
90 |
+
return resized_image
|
91 |
+
|
92 |
+
|
93 |
+
def _crop(image, offset_height, offset_width, crop_height, crop_width):
|
94 |
+
"""Crops the given image using the provided offsets and sizes.
|
95 |
+
|
96 |
+
Note that the method doesn't assume we know the input image size but it does
|
97 |
+
assume we know the input image rank.
|
98 |
+
|
99 |
+
Args:
|
100 |
+
image: an image of shape [height, width, channels].
|
101 |
+
offset_height: a scalar tensor indicating the height offset.
|
102 |
+
offset_width: a scalar tensor indicating the width offset.
|
103 |
+
crop_height: the height of the cropped image.
|
104 |
+
crop_width: the width of the cropped image.
|
105 |
+
|
106 |
+
Returns:
|
107 |
+
the cropped (and resized) image.
|
108 |
+
|
109 |
+
Raises:
|
110 |
+
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
|
111 |
+
less than the crop size.
|
112 |
+
"""
|
113 |
+
original_shape = tf.shape(input=image)
|
114 |
+
|
115 |
+
rank_assertion = tf.Assert(
|
116 |
+
tf.equal(tf.rank(image), 3),
|
117 |
+
['Rank of image must be equal to 3.'])
|
118 |
+
with tf.control_dependencies([rank_assertion]):
|
119 |
+
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
|
120 |
+
|
121 |
+
size_assertion = tf.Assert(
|
122 |
+
tf.logical_and(
|
123 |
+
tf.greater_equal(original_shape[0], crop_height),
|
124 |
+
tf.greater_equal(original_shape[1], crop_width)),
|
125 |
+
['Crop size greater than the image size.'])
|
126 |
+
|
127 |
+
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), dtype=tf.int32)
|
128 |
+
|
129 |
+
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
|
130 |
+
# define the crop size.
|
131 |
+
with tf.control_dependencies([size_assertion]):
|
132 |
+
image = tf.slice(image, offsets, cropped_shape)
|
133 |
+
return tf.reshape(image, cropped_shape)
|
134 |
+
|
135 |
+
|
136 |
+
def _central_crop(image_list, crop_height, crop_width):
|
137 |
+
"""Performs central crops of the given image list.
|
138 |
+
|
139 |
+
Args:
|
140 |
+
image_list: a list of image tensors of the same dimension but possibly
|
141 |
+
varying channel.
|
142 |
+
crop_height: the height of the image following the crop.
|
143 |
+
crop_width: the width of the image following the crop.
|
144 |
+
|
145 |
+
Returns:
|
146 |
+
the list of cropped images.
|
147 |
+
"""
|
148 |
+
outputs = []
|
149 |
+
for image in image_list:
|
150 |
+
image_height = tf.shape(input=image)[0]
|
151 |
+
image_width = tf.shape(input=image)[1]
|
152 |
+
|
153 |
+
offset_height = (image_height - crop_height) / 2
|
154 |
+
offset_width = (image_width - crop_width) / 2
|
155 |
+
|
156 |
+
outputs.append(_crop(image, offset_height, offset_width,
|
157 |
+
crop_height, crop_width))
|
158 |
+
return outputs
|
159 |
+
|
160 |
+
|
161 |
+
def apply_with_random_selector(x, func, num_cases):
|
162 |
+
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
|
163 |
+
|
164 |
+
Args:
|
165 |
+
x: input Tensor.
|
166 |
+
func: Python function to apply.
|
167 |
+
num_cases: Python int32, number of cases to sample sel from.
|
168 |
+
|
169 |
+
Returns:
|
170 |
+
The result of func(x, sel), where func receives the value of the
|
171 |
+
selector as a python integer, but sel is sampled dynamically.
|
172 |
+
"""
|
173 |
+
sel = tf.random.uniform([], maxval=num_cases, dtype=tf.int32)
|
174 |
+
# Pass the real x only to one of the func calls.
|
175 |
+
return control_flow_ops.merge([
|
176 |
+
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
|
177 |
+
for case in range(num_cases)])[0]
|
178 |
+
|
179 |
+
|
180 |
+
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
|
181 |
+
"""Distort the color of a Tensor image.
|
182 |
+
|
183 |
+
Each color distortion is non-commutative and thus ordering of the color ops
|
184 |
+
matters. Ideally we would randomly permute the ordering of the color ops.
|
185 |
+
Rather then adding that level of complication, we select a distinct ordering
|
186 |
+
of color ops for each preprocessing thread.
|
187 |
+
|
188 |
+
Args:
|
189 |
+
image: 3-D Tensor containing single image in [0, 1].
|
190 |
+
color_ordering: Python int, a type of distortion (valid values: 0-3).
|
191 |
+
fast_mode: Avoids slower ops (random_hue and random_contrast)
|
192 |
+
scope: Optional scope for name_scope.
|
193 |
+
Returns:
|
194 |
+
3-D Tensor color-distorted image on range [0, 1]
|
195 |
+
Raises:
|
196 |
+
ValueError: if color_ordering not in [0, 3]
|
197 |
+
"""
|
198 |
+
with tf.compat.v1.name_scope(scope, 'distort_color', [image]):
|
199 |
+
if fast_mode:
|
200 |
+
if color_ordering == 0:
|
201 |
+
image = tf.image.random_brightness(image, max_delta=32. / 255.)
|
202 |
+
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
|
203 |
+
else:
|
204 |
+
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
|
205 |
+
image = tf.image.random_brightness(image, max_delta=32. / 255.)
|
206 |
+
else:
|
207 |
+
if color_ordering == 0:
|
208 |
+
image = tf.image.random_brightness(image, max_delta=32. / 255.)
|
209 |
+
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
|
210 |
+
image = tf.image.random_hue(image, max_delta=0.2)
|
211 |
+
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
|
212 |
+
elif color_ordering == 1:
|
213 |
+
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
|
214 |
+
image = tf.image.random_brightness(image, max_delta=32. / 255.)
|
215 |
+
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
|
216 |
+
image = tf.image.random_hue(image, max_delta=0.2)
|
217 |
+
elif color_ordering == 2:
|
218 |
+
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
|
219 |
+
image = tf.image.random_hue(image, max_delta=0.2)
|
220 |
+
image = tf.image.random_brightness(image, max_delta=32. / 255.)
|
221 |
+
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
|
222 |
+
elif color_ordering == 3:
|
223 |
+
image = tf.image.random_hue(image, max_delta=0.2)
|
224 |
+
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
|
225 |
+
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
|
226 |
+
image = tf.image.random_brightness(image, max_delta=32. / 255.)
|
227 |
+
else:
|
228 |
+
raise ValueError('color_ordering must be in [0, 3]')
|
229 |
+
|
230 |
+
# The random_* ops do not necessarily clamp.
|
231 |
+
return tf.clip_by_value(image, 0.0, 1.0)
|
232 |
+
|
233 |
+
|
234 |
+
def distorted_bounding_box_crop(image,
|
235 |
+
bbox,
|
236 |
+
min_object_covered=0.1,
|
237 |
+
aspect_ratio_range=(0.75, 1.33),
|
238 |
+
area_range=(0.05, 1.0),
|
239 |
+
max_attempts=100,
|
240 |
+
scope=None):
|
241 |
+
"""Generates cropped_image using a one of the bboxes randomly distorted.
|
242 |
+
|
243 |
+
See `tf.image.sample_distorted_bounding_box` for more documentation.
|
244 |
+
|
245 |
+
Args:
|
246 |
+
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
|
247 |
+
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
|
248 |
+
where each coordinate is [0, 1) and the coordinates are arranged
|
249 |
+
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
|
250 |
+
image.
|
251 |
+
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
|
252 |
+
area of the image must contain at least this fraction of any bounding box
|
253 |
+
supplied.
|
254 |
+
aspect_ratio_range: An optional list of `floats`. The cropped area of the
|
255 |
+
image must have an aspect ratio = width / height within this range.
|
256 |
+
area_range: An optional list of `floats`. The cropped area of the image
|
257 |
+
must contain a fraction of the supplied image within in this range.
|
258 |
+
max_attempts: An optional `int`. Number of attempts at generating a cropped
|
259 |
+
region of the image of the specified constraints. After `max_attempts`
|
260 |
+
failures, return the entire image.
|
261 |
+
scope: Optional scope for name_scope.
|
262 |
+
Returns:
|
263 |
+
A tuple, a 3-D Tensor cropped_image and the distorted bbox
|
264 |
+
"""
|
265 |
+
with tf.compat.v1.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
|
266 |
+
# Each bounding box has shape [1, num_boxes, box coords] and
|
267 |
+
# the coordinates are ordered [ymin, xmin, ymax, xmax].
|
268 |
+
|
269 |
+
# A large fraction of image datasets contain a human-annotated bounding
|
270 |
+
# box delineating the region of the image containing the object of interest.
|
271 |
+
# We choose to create a new bounding box for the object which is a randomly
|
272 |
+
# distorted version of the human-annotated bounding box that obeys an
|
273 |
+
# allowed range of aspect ratios, sizes and overlap with the human-annotated
|
274 |
+
# bounding box. If no box is supplied, then we assume the bounding box is
|
275 |
+
# the entire image.
|
276 |
+
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
|
277 |
+
image_size=tf.shape(input=image),
|
278 |
+
bounding_boxes=bbox,
|
279 |
+
min_object_covered=min_object_covered,
|
280 |
+
aspect_ratio_range=aspect_ratio_range,
|
281 |
+
area_range=area_range,
|
282 |
+
max_attempts=max_attempts,
|
283 |
+
use_image_if_no_bounding_boxes=True)
|
284 |
+
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
|
285 |
+
|
286 |
+
# Crop the image to the specified bounding box.
|
287 |
+
cropped_image = tf.slice(image, bbox_begin, bbox_size)
|
288 |
+
return cropped_image, distort_bbox
|
289 |
+
|
290 |
+
|
291 |
+
def resize_and_rescale_image(image, height, width,
|
292 |
+
do_mean_subtraction=True, scope=None):
|
293 |
+
"""Prepare one image for training/evaluation.
|
294 |
+
|
295 |
+
Args:
|
296 |
+
image: 3-D float Tensor
|
297 |
+
height: integer
|
298 |
+
width: integer
|
299 |
+
scope: Optional scope for name_scope.
|
300 |
+
Returns:
|
301 |
+
3-D float Tensor of prepared image.
|
302 |
+
"""
|
303 |
+
with tf.compat.v1.name_scope(values=[image, height, width], name=scope,
|
304 |
+
default_name='resize_image'):
|
305 |
+
image = tf.expand_dims(image, 0)
|
306 |
+
image = tf.image.resize(image, [height, width],
|
307 |
+
method=tf.image.ResizeMethod.BILINEAR)
|
308 |
+
image = tf.squeeze(image, [0])
|
309 |
+
if do_mean_subtraction:
|
310 |
+
# rescale to [-1,1]
|
311 |
+
image = tf.subtract(image, 0.5)
|
312 |
+
image = tf.multiply(image, 2.0)
|
313 |
+
return image
|
314 |
+
|
315 |
+
def rotate_image (image, angle):
|
316 |
+
return RandomRotation(factor=math.radians(angle))(image)
|
317 |
+
|
318 |
+
def preprocess_for_train(image,
|
319 |
+
height,
|
320 |
+
width,
|
321 |
+
bbox,
|
322 |
+
max_angle=15.,
|
323 |
+
fast_mode=True,
|
324 |
+
scope=None,
|
325 |
+
add_image_summaries=False):
|
326 |
+
"""Distort one image for training a network.
|
327 |
+
|
328 |
+
Distorting images provides a useful technique for augmenting the data
|
329 |
+
set during training in order to make the network invariant to aspects
|
330 |
+
of the image that do not effect the label.
|
331 |
+
|
332 |
+
Additionally it would create image_summaries to display the different
|
333 |
+
transformations applied to the image.
|
334 |
+
|
335 |
+
Args:
|
336 |
+
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
|
337 |
+
[0, 1], otherwise it would converted to tf.float32 assuming that the range
|
338 |
+
is [0, MAX], where MAX is largest positive representable number for
|
339 |
+
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
|
340 |
+
height: integer
|
341 |
+
width: integer
|
342 |
+
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
|
343 |
+
where each coordinate is [0, 1) and the coordinates are arranged
|
344 |
+
as [ymin, xmin, ymax, xmax].
|
345 |
+
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
|
346 |
+
bi-cubic resizing, random_hue or random_contrast).
|
347 |
+
scope: Optional scope for name_scope.
|
348 |
+
add_image_summaries: Enable image summaries.
|
349 |
+
Returns:
|
350 |
+
3-D float Tensor of distorted image used for training with range [-1, 1].
|
351 |
+
"""
|
352 |
+
with tf.compat.v1.name_scope(scope, 'distort_image', [image, height, width, bbox]):
|
353 |
+
assert image.dtype == tf.float32
|
354 |
+
# random rotatation of image between -15 to 15 degrees with 0.75 prob
|
355 |
+
angle = random.uniform(-max_angle, max_angle) \
|
356 |
+
if random.random() < 0.75 else 0.
|
357 |
+
|
358 |
+
#original tf1.x code uses tf.contrib, replaced with tf2.x tfa.image
|
359 |
+
#rotated_image = tf.contrib.image.rotate(image, math.radians(angle),
|
360 |
+
# interpolation='BILINEAR')
|
361 |
+
rotated_image = tf.py_function(rotate_image, [image, angle], tf.float32)
|
362 |
+
|
363 |
+
# random cropping
|
364 |
+
distorted_image, distorted_bbox = distorted_bounding_box_crop(
|
365 |
+
rotated_image,
|
366 |
+
bbox,
|
367 |
+
min_object_covered=0.6,
|
368 |
+
area_range=(0.6, 1.0))
|
369 |
+
# Restore the shape since the dynamic slice based upon the bbox_size loses
|
370 |
+
# the third dimension.
|
371 |
+
distorted_image.set_shape([None, None, 3])
|
372 |
+
|
373 |
+
# This resizing operation may distort the images because the aspect
|
374 |
+
# ratio is not respected. We select a resize method in a round robin
|
375 |
+
# fashion based on the thread number.
|
376 |
+
# Note that ResizeMethod contains 4 enumerated resizing methods.
|
377 |
+
|
378 |
+
# We select only 1 case for fast_mode bilinear.
|
379 |
+
|
380 |
+
#in tf2.x only bilinear interpolation is supported
|
381 |
+
num_resize_cases = 1 if fast_mode else 4
|
382 |
+
distorted_image = apply_with_random_selector(
|
383 |
+
distorted_image,
|
384 |
+
#lambda x, method: tf.image.resize(x, [height, width], method),
|
385 |
+
lambda x, method: tf.image.resize(x, [height, width], tf.image.ResizeMethod.BILINEAR),
|
386 |
+
num_cases=num_resize_cases)
|
387 |
+
|
388 |
+
#if add_image_summaries:
|
389 |
+
# tf.summary.image('training_image',
|
390 |
+
# tf.expand_dims(distorted_image, 0))
|
391 |
+
|
392 |
+
# Randomly flip the image horizontally.
|
393 |
+
distorted_image = tf.image.random_flip_left_right(distorted_image)
|
394 |
+
|
395 |
+
# Randomly distort the colors. There are 1 or 4 ways to do it.
|
396 |
+
num_distort_cases = 1 if fast_mode else 4
|
397 |
+
distorted_image = apply_with_random_selector(
|
398 |
+
distorted_image,
|
399 |
+
lambda x, ordering: distort_color(x, ordering, fast_mode),
|
400 |
+
num_cases=num_distort_cases)
|
401 |
+
|
402 |
+
#if add_image_summaries:
|
403 |
+
# tf.summary.image('final_distorted_image',
|
404 |
+
# tf.expand_dims(distorted_image, 0))
|
405 |
+
|
406 |
+
distorted_image = tf.subtract(distorted_image, 0.5)
|
407 |
+
distorted_image = tf.multiply(distorted_image, 2.0)
|
408 |
+
return distorted_image
|
409 |
+
|
410 |
+
|
411 |
+
def preprocess_for_eval(image,
|
412 |
+
height,
|
413 |
+
width,
|
414 |
+
scope=None,
|
415 |
+
add_image_summaries=False):
|
416 |
+
"""Prepare one image for evaluation.
|
417 |
+
|
418 |
+
Args:
|
419 |
+
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
|
420 |
+
[0, 1], otherwise it would converted to tf.float32 assuming that the range
|
421 |
+
is [0, MAX], where MAX is largest positive representable number for
|
422 |
+
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
|
423 |
+
height: integer
|
424 |
+
width: integer
|
425 |
+
scope: Optional scope for name_scope.
|
426 |
+
Returns:
|
427 |
+
3-D float Tensor of prepared image.
|
428 |
+
"""
|
429 |
+
with tf.compat.v1.name_scope(scope, 'eval_image', [image, height, width]):
|
430 |
+
assert image.dtype == tf.float32
|
431 |
+
#image = resize_and_rescale_image(image, 256, 256,
|
432 |
+
# do_mean_subtraction=False)
|
433 |
+
image = _aspect_preserving_resize(image, max(height, width))
|
434 |
+
image = _central_crop([image], height, width)[0]
|
435 |
+
image.set_shape([height, width, 3])
|
436 |
+
|
437 |
+
#if add_image_summaries:
|
438 |
+
# tf.summary.image('validation_image', tf.expand_dims(image, 0))
|
439 |
+
|
440 |
+
image = tf.subtract(image, 0.5)
|
441 |
+
image = tf.multiply(image, 2.0)
|
442 |
+
return image
|
443 |
+
|
444 |
+
|
445 |
+
def preprocess_image(image,
|
446 |
+
height,
|
447 |
+
width,
|
448 |
+
is_training=False):
|
449 |
+
"""Pre-process one image for training or evaluation.
|
450 |
+
|
451 |
+
Args:
|
452 |
+
image: 3-D Tensor [height, width, channels] with the image.
|
453 |
+
height: integer, image expected height.
|
454 |
+
width: integer, image expected width.
|
455 |
+
is_training: Boolean. If true it would transform an image for train,
|
456 |
+
otherwise it would transform it for evaluation.
|
457 |
+
fast_mode: Optional boolean, if True avoids slower transformations.
|
458 |
+
|
459 |
+
Returns:
|
460 |
+
3-D float Tensor containing an appropriately scaled image
|
461 |
+
"""
|
462 |
+
if is_training:
|
463 |
+
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
|
464 |
+
dtype=tf.float32,
|
465 |
+
shape=[1, 1, 4])
|
466 |
+
return preprocess_for_train(image, height, width, bbox, fast_mode=True)
|
467 |
+
else:
|
468 |
+
return preprocess_for_eval(image, height, width)
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/vit_keras/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
__version__ = "0.0.0"
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/vit_keras/layers.py
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pylint: disable=arguments-differ,missing-function-docstring,missing-class-docstring,unexpected-keyword-arg,no-value-for-parameter
|
2 |
+
import tensorflow as tf
|
3 |
+
|
4 |
+
|
5 |
+
@tf.keras.utils.register_keras_serializable()
|
6 |
+
class ClassToken(tf.keras.layers.Layer):
|
7 |
+
"""Append a class token to an input layer."""
|
8 |
+
|
9 |
+
def build(self, input_shape):
|
10 |
+
cls_init = tf.zeros_initializer()
|
11 |
+
self.hidden_size = input_shape[-1]
|
12 |
+
self.cls = tf.Variable(
|
13 |
+
name="cls",
|
14 |
+
initial_value=cls_init(shape=(1, 1, self.hidden_size), dtype="float32"),
|
15 |
+
trainable=True,
|
16 |
+
)
|
17 |
+
|
18 |
+
def call(self, inputs):
|
19 |
+
batch_size = tf.shape(inputs)[0]
|
20 |
+
cls_broadcasted = tf.cast(
|
21 |
+
tf.broadcast_to(self.cls, [batch_size, 1, self.hidden_size]),
|
22 |
+
dtype=inputs.dtype,
|
23 |
+
)
|
24 |
+
return tf.concat([cls_broadcasted, inputs], 1)
|
25 |
+
|
26 |
+
def get_config(self):
|
27 |
+
config = super().get_config()
|
28 |
+
return config
|
29 |
+
|
30 |
+
@classmethod
|
31 |
+
def from_config(cls, config):
|
32 |
+
return cls(**config)
|
33 |
+
|
34 |
+
|
35 |
+
@tf.keras.utils.register_keras_serializable()
|
36 |
+
class AddPositionEmbs(tf.keras.layers.Layer):
|
37 |
+
"""Adds (optionally learned) positional embeddings to the inputs."""
|
38 |
+
|
39 |
+
def build(self, input_shape):
|
40 |
+
assert (
|
41 |
+
len(input_shape) == 3
|
42 |
+
), f"Number of dimensions should be 3, got {len(input_shape)}"
|
43 |
+
self.pe = tf.Variable(
|
44 |
+
name="pos_embedding",
|
45 |
+
initial_value=tf.random_normal_initializer(stddev=0.06)(
|
46 |
+
shape=(1, input_shape[1], input_shape[2])
|
47 |
+
),
|
48 |
+
dtype="float32",
|
49 |
+
trainable=True,
|
50 |
+
)
|
51 |
+
|
52 |
+
def call(self, inputs):
|
53 |
+
return inputs + tf.cast(self.pe, dtype=inputs.dtype)
|
54 |
+
|
55 |
+
def get_config(self):
|
56 |
+
config = super().get_config()
|
57 |
+
return config
|
58 |
+
|
59 |
+
@classmethod
|
60 |
+
def from_config(cls, config):
|
61 |
+
return cls(**config)
|
62 |
+
|
63 |
+
|
64 |
+
@tf.keras.utils.register_keras_serializable()
|
65 |
+
class MultiHeadSelfAttention(tf.keras.layers.Layer):
|
66 |
+
def __init__(self, *args, num_heads, **kwargs):
|
67 |
+
super().__init__(*args, **kwargs)
|
68 |
+
self.num_heads = num_heads
|
69 |
+
|
70 |
+
def build(self, input_shape):
|
71 |
+
hidden_size = input_shape[-1]
|
72 |
+
num_heads = self.num_heads
|
73 |
+
if hidden_size % num_heads != 0:
|
74 |
+
raise ValueError(
|
75 |
+
f"embedding dimension = {hidden_size} should be divisible by number of heads = {num_heads}"
|
76 |
+
)
|
77 |
+
self.hidden_size = hidden_size
|
78 |
+
self.projection_dim = hidden_size // num_heads
|
79 |
+
self.query_dense = tf.keras.layers.Dense(hidden_size, name="query")
|
80 |
+
self.key_dense = tf.keras.layers.Dense(hidden_size, name="key")
|
81 |
+
self.value_dense = tf.keras.layers.Dense(hidden_size, name="value")
|
82 |
+
self.combine_heads = tf.keras.layers.Dense(hidden_size, name="out")
|
83 |
+
|
84 |
+
# pylint: disable=no-self-use
|
85 |
+
def attention(self, query, key, value):
|
86 |
+
score = tf.matmul(query, key, transpose_b=True)
|
87 |
+
dim_key = tf.cast(tf.shape(key)[-1], score.dtype)
|
88 |
+
scaled_score = score / tf.math.sqrt(dim_key)
|
89 |
+
weights = tf.nn.softmax(scaled_score, axis=-1)
|
90 |
+
output = tf.matmul(weights, value)
|
91 |
+
return output, weights
|
92 |
+
|
93 |
+
def separate_heads(self, x, batch_size):
|
94 |
+
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim))
|
95 |
+
return tf.transpose(x, perm=[0, 2, 1, 3])
|
96 |
+
|
97 |
+
def call(self, inputs):
|
98 |
+
batch_size = tf.shape(inputs)[0]
|
99 |
+
query = self.query_dense(inputs)
|
100 |
+
key = self.key_dense(inputs)
|
101 |
+
value = self.value_dense(inputs)
|
102 |
+
query = self.separate_heads(query, batch_size)
|
103 |
+
key = self.separate_heads(key, batch_size)
|
104 |
+
value = self.separate_heads(value, batch_size)
|
105 |
+
|
106 |
+
attention, weights = self.attention(query, key, value)
|
107 |
+
attention = tf.transpose(attention, perm=[0, 2, 1, 3])
|
108 |
+
concat_attention = tf.reshape(attention, (batch_size, -1, self.hidden_size))
|
109 |
+
output = self.combine_heads(concat_attention)
|
110 |
+
return output, weights
|
111 |
+
|
112 |
+
def get_config(self):
|
113 |
+
config = super().get_config()
|
114 |
+
config.update({"num_heads": self.num_heads})
|
115 |
+
return config
|
116 |
+
|
117 |
+
@classmethod
|
118 |
+
def from_config(cls, config):
|
119 |
+
return cls(**config)
|
120 |
+
|
121 |
+
|
122 |
+
# pylint: disable=too-many-instance-attributes
|
123 |
+
@tf.keras.utils.register_keras_serializable()
|
124 |
+
class TransformerBlock(tf.keras.layers.Layer):
|
125 |
+
"""Implements a Transformer block."""
|
126 |
+
|
127 |
+
def __init__(self, *args, num_heads, mlp_dim, dropout, **kwargs):
|
128 |
+
super().__init__(*args, **kwargs)
|
129 |
+
self.num_heads = num_heads
|
130 |
+
self.mlp_dim = mlp_dim
|
131 |
+
self.dropout = dropout
|
132 |
+
|
133 |
+
def build(self, input_shape):
|
134 |
+
self.att = MultiHeadSelfAttention(
|
135 |
+
num_heads=self.num_heads,
|
136 |
+
name="MultiHeadDotProductAttention_1",
|
137 |
+
)
|
138 |
+
self.mlpblock = tf.keras.Sequential(
|
139 |
+
[
|
140 |
+
tf.keras.layers.Dense(
|
141 |
+
self.mlp_dim,
|
142 |
+
activation="linear",
|
143 |
+
name=f"{self.name}/Dense_0",
|
144 |
+
),
|
145 |
+
tf.keras.layers.Lambda(
|
146 |
+
lambda x: tf.keras.activations.gelu(x, approximate=False)
|
147 |
+
),
|
148 |
+
tf.keras.layers.Dropout(self.dropout),
|
149 |
+
tf.keras.layers.Dense(input_shape[-1], name=f"{self.name}/Dense_1"),
|
150 |
+
tf.keras.layers.Dropout(self.dropout),
|
151 |
+
],
|
152 |
+
name="MlpBlock_3",
|
153 |
+
)
|
154 |
+
self.layernorm1 = tf.keras.layers.LayerNormalization(
|
155 |
+
epsilon=1e-6, name="LayerNorm_0"
|
156 |
+
)
|
157 |
+
self.layernorm2 = tf.keras.layers.LayerNormalization(
|
158 |
+
epsilon=1e-6, name="LayerNorm_2"
|
159 |
+
)
|
160 |
+
self.dropout_layer = tf.keras.layers.Dropout(self.dropout)
|
161 |
+
|
162 |
+
def call(self, inputs, training):
|
163 |
+
x = self.layernorm1(inputs)
|
164 |
+
x, weights = self.att(x)
|
165 |
+
x = self.dropout_layer(x, training=training)
|
166 |
+
x = x + inputs
|
167 |
+
y = self.layernorm2(x)
|
168 |
+
y = self.mlpblock(y)
|
169 |
+
return x + y, weights
|
170 |
+
|
171 |
+
def get_config(self):
|
172 |
+
config = super().get_config()
|
173 |
+
config.update(
|
174 |
+
{
|
175 |
+
"num_heads": self.num_heads,
|
176 |
+
"mlp_dim": self.mlp_dim,
|
177 |
+
"dropout": self.dropout,
|
178 |
+
}
|
179 |
+
)
|
180 |
+
return config
|
181 |
+
|
182 |
+
@classmethod
|
183 |
+
def from_config(cls, config):
|
184 |
+
return cls(**config)
|
docker/intel_code/llama13b/Model-References/TensorFlow/computer_vision/VisionTransformer/vit_keras/utils.py
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import typing
|
3 |
+
import warnings
|
4 |
+
from urllib import request
|
5 |
+
from http import client
|
6 |
+
import io
|
7 |
+
import pkg_resources
|
8 |
+
#import validators
|
9 |
+
import numpy as np
|
10 |
+
import scipy as sp
|
11 |
+
import cv2
|
12 |
+
|
13 |
+
try:
|
14 |
+
import PIL
|
15 |
+
import PIL.Image
|
16 |
+
except ImportError: # pragma: no cover
|
17 |
+
PIL = None
|
18 |
+
|
19 |
+
ImageInputType = typing.Union[str, np.ndarray, "PIL.Image.Image", io.BytesIO]
|
20 |
+
|
21 |
+
|
22 |
+
def get_imagenet_classes() -> typing.List[str]:
|
23 |
+
"""Get the list of ImageNet 2012 classes."""
|
24 |
+
filepath = pkg_resources.resource_filename("vit_keras", "imagenet2012.txt")
|
25 |
+
with open(filepath) as f:
|
26 |
+
classes = [l.strip() for l in f.readlines()]
|
27 |
+
return classes
|
28 |
+
|
29 |
+
|
30 |
+
def read(filepath_or_buffer: ImageInputType, size, timeout=None):
|
31 |
+
"""Read a file into an image object
|
32 |
+
Args:
|
33 |
+
filepath_or_buffer: The path to the file or any object
|
34 |
+
with a `read` method (such as `io.BytesIO`)
|
35 |
+
size: The size to resize the image to.
|
36 |
+
timeout: If filepath_or_buffer is a URL, the timeout to
|
37 |
+
use for making the HTTP request.
|
38 |
+
"""
|
39 |
+
if PIL is not None and isinstance(filepath_or_buffer, PIL.Image.Image):
|
40 |
+
return np.array(filepath_or_buffer.convert("RGB"))
|
41 |
+
if isinstance(filepath_or_buffer, (io.BytesIO, client.HTTPResponse)):
|
42 |
+
image = np.asarray(bytearray(filepath_or_buffer.read()), dtype=np.uint8)
|
43 |
+
image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)
|
44 |
+
elif isinstance(filepath_or_buffer, str) and validators.url(filepath_or_buffer):
|
45 |
+
return read(request.urlopen(filepath_or_buffer, timeout=timeout), size=size)
|
46 |
+
else:
|
47 |
+
if not os.path.isfile(filepath_or_buffer):
|
48 |
+
raise FileNotFoundError(
|
49 |
+
"Could not find image at path: " + filepath_or_buffer
|
50 |
+
)
|
51 |
+
image = cv2.imread(filepath_or_buffer)
|
52 |
+
if image is None:
|
53 |
+
raise ValueError(f"An error occurred reading {filepath_or_buffer}.")
|
54 |
+
# We use cvtColor here instead of just ret[..., ::-1]
|
55 |
+
# in order to ensure that we provide a contiguous
|
56 |
+
# array for later processing. Some hashers use ctypes
|
57 |
+
# to pass the array and non-contiguous arrays can lead
|
58 |
+
# to erroneous results.
|
59 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
60 |
+
return cv2.resize(image, (size, size))
|
61 |
+
|
62 |
+
|
63 |
+
def apply_embedding_weights(target_layer, source_weights):
|
64 |
+
"""Apply embedding weights to a target layer.
|
65 |
+
|
66 |
+
Args:
|
67 |
+
target_layer: The target layer to which weights will
|
68 |
+
be applied.
|
69 |
+
source_weights: The source weights, which will be
|
70 |
+
resized as necessary.
|
71 |
+
"""
|
72 |
+
expected_shape = target_layer.weights[0].shape
|
73 |
+
if expected_shape != source_weights.shape:
|
74 |
+
token, grid = source_weights[0, :1], source_weights[0, 1:]
|
75 |
+
sin = int(np.sqrt(grid.shape[0]))
|
76 |
+
sout = int(np.sqrt(expected_shape[1] - 1))
|
77 |
+
warnings.warn(
|
78 |
+
"Resizing position embeddings from " f"{sin} to {sout}",
|
79 |
+
UserWarning,
|
80 |
+
)
|
81 |
+
zoom = (sout / sin, sout / sin, 1)
|
82 |
+
grid = sp.ndimage.zoom(grid.reshape(sin, sin, -1), zoom, order=1).reshape(
|
83 |
+
sout * sout, -1
|
84 |
+
)
|
85 |
+
source_weights = np.concatenate([token, grid], axis=0)[np.newaxis]
|
86 |
+
target_layer.set_weights([source_weights])
|
87 |
+
|
88 |
+
|
89 |
+
def load_weights_numpy(model, params_path, pretrained_top):
|
90 |
+
"""Load weights saved using Flax as a numpy array.
|
91 |
+
|
92 |
+
Args:
|
93 |
+
model: A Keras model to load the weights into.
|
94 |
+
params_path: Filepath to a numpy archive.
|
95 |
+
pretrained_top: Whether to load the top layer weights.
|
96 |
+
"""
|
97 |
+
params_dict = np.load(
|
98 |
+
params_path, allow_pickle=False
|
99 |
+
) # pylint: disable=unexpected-keyword-arg
|
100 |
+
source_keys = list(params_dict.keys())
|
101 |
+
pre_logits = any(l.name == "pre_logits" for l in model.layers)
|
102 |
+
source_keys_used = []
|
103 |
+
n_transformers = len(
|
104 |
+
set(
|
105 |
+
"/".join(k.split("/")[:2])
|
106 |
+
for k in source_keys
|
107 |
+
if k.startswith("Transformer/encoderblock_")
|
108 |
+
)
|
109 |
+
)
|
110 |
+
n_transformers_out = sum(
|
111 |
+
l.name.startswith("Transformer/encoderblock_") for l in model.layers
|
112 |
+
)
|
113 |
+
assert n_transformers == n_transformers_out, (
|
114 |
+
f"Wrong number of transformers ("
|
115 |
+
f"{n_transformers_out} in model vs. {n_transformers} in weights)."
|
116 |
+
)
|
117 |
+
|
118 |
+
matches = []
|
119 |
+
for tidx in range(n_transformers):
|
120 |
+
encoder = model.get_layer(f"Transformer/encoderblock_{tidx}")
|
121 |
+
source_prefix = f"Transformer/encoderblock_{tidx}"
|
122 |
+
matches.extend(
|
123 |
+
[
|
124 |
+
{
|
125 |
+
"layer": layer,
|
126 |
+
"keys": [
|
127 |
+
f"{source_prefix}/{norm}/{name}" for name in ["scale", "bias"]
|
128 |
+
],
|
129 |
+
}
|
130 |
+
for norm, layer in [
|
131 |
+
("LayerNorm_0", encoder.layernorm1),
|
132 |
+
("LayerNorm_2", encoder.layernorm2),
|
133 |
+
]
|
134 |
+
]
|
135 |
+
+ [
|
136 |
+
{
|
137 |
+
"layer": encoder.mlpblock.get_layer(
|
138 |
+
f"{source_prefix}/Dense_{mlpdense}"
|
139 |
+
),
|
140 |
+
"keys": [
|
141 |
+
f"{source_prefix}/MlpBlock_3/Dense_{mlpdense}/{name}"
|
142 |
+
for name in ["kernel", "bias"]
|
143 |
+
],
|
144 |
+
}
|
145 |
+
for mlpdense in [0, 1]
|
146 |
+
]
|
147 |
+
+ [
|
148 |
+
{
|
149 |
+
"layer": layer,
|
150 |
+
"keys": [
|
151 |
+
f"{source_prefix}/MultiHeadDotProductAttention_1/{attvar}/{name}"
|
152 |
+
for name in ["kernel", "bias"]
|
153 |
+
],
|
154 |
+
"reshape": True,
|
155 |
+
}
|
156 |
+
for attvar, layer in [
|
157 |
+
("query", encoder.att.query_dense),
|
158 |
+
("key", encoder.att.key_dense),
|
159 |
+
("value", encoder.att.value_dense),
|
160 |
+
("out", encoder.att.combine_heads),
|
161 |
+
]
|
162 |
+
]
|
163 |
+
)
|
164 |
+
for layer_name in ["embedding", "head", "pre_logits"]:
|
165 |
+
if layer_name == "head" and not pretrained_top:
|
166 |
+
source_keys_used.extend(["head/kernel", "head/bias"])
|
167 |
+
continue
|
168 |
+
if layer_name == "pre_logits" and not pre_logits:
|
169 |
+
continue
|
170 |
+
matches.append(
|
171 |
+
{
|
172 |
+
"layer": model.get_layer(layer_name),
|
173 |
+
"keys": [f"{layer_name}/{name}" for name in ["kernel", "bias"]],
|
174 |
+
}
|
175 |
+
)
|
176 |
+
matches.append({"layer": model.get_layer("class_token"), "keys": ["cls"]})
|
177 |
+
matches.append(
|
178 |
+
{
|
179 |
+
"layer": model.get_layer("Transformer/encoder_norm"),
|
180 |
+
"keys": [f"Transformer/encoder_norm/{name}" for name in ["scale", "bias"]],
|
181 |
+
}
|
182 |
+
)
|
183 |
+
apply_embedding_weights(
|
184 |
+
target_layer=model.get_layer("Transformer/posembed_input"),
|
185 |
+
source_weights=params_dict["Transformer/posembed_input/pos_embedding"],
|
186 |
+
)
|
187 |
+
source_keys_used.append("Transformer/posembed_input/pos_embedding")
|
188 |
+
for match in matches:
|
189 |
+
source_keys_used.extend(match["keys"])
|
190 |
+
source_weights = [params_dict[k] for k in match["keys"]]
|
191 |
+
if match.get("reshape", False):
|
192 |
+
source_weights = [
|
193 |
+
source.reshape(expected.shape)
|
194 |
+
for source, expected in zip(
|
195 |
+
source_weights, match["layer"].get_weights()
|
196 |
+
)
|
197 |
+
]
|
198 |
+
match["layer"].set_weights(source_weights)
|
199 |
+
unused = set(source_keys).difference(source_keys_used)
|
200 |
+
if unused:
|
201 |
+
warnings.warn(f"Did not use the following weights: {unused}", UserWarning)
|
202 |
+
target_keys_set = len(source_keys_used)
|
203 |
+
target_keys_all = len(model.weights)
|
204 |
+
if target_keys_set < target_keys_all:
|
205 |
+
warnings.warn(
|
206 |
+
f"Only set {target_keys_set} of {target_keys_all} weights.", UserWarning
|
207 |
+
)
|