applied-ai-018 commited on
Commit
5c93e5c
·
verified ·
1 Parent(s): c032b9d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/README.md +19 -0
  2. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/.gitignore +165 -0
  3. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE +22 -0
  4. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE-MODEL +85 -0
  5. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/README.md +105 -0
  6. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/environment.yaml +19 -0
  7. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/data/__init__.py +0 -0
  8. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/autoencoder.py +219 -0
  9. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/__init__.py +0 -0
  10. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddim.py +158 -0
  11. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddpm.py +1795 -0
  12. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/__init__.py +1 -0
  13. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/dpm_solver.py +1154 -0
  14. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/sampler.py +87 -0
  15. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpmpp_2m.py +78 -0
  16. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/plms.py +244 -0
  17. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampler.py +19 -0
  18. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampling_util.py +22 -0
  19. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/openaimodel.py +786 -0
  20. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/distributions/__init__.py +0 -0
  21. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/distributions/distributions.py +92 -0
  22. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/base_model.py +16 -0
  23. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/blocks.py +342 -0
  24. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/dpt_depth.py +109 -0
  25. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net.py +76 -0
  26. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net_custom.py +128 -0
  27. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/transforms.py +234 -0
  28. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/vit.py +491 -0
  29. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/util.py +197 -0
  30. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_bf16.txt +27 -0
  31. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_fp32.txt +44 -0
  32. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/requirements.txt +9 -0
  33. docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/setup.py +13 -0
  34. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/LICENSE +202 -0
  35. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/README.md +145 -0
  36. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/bin/simple-viewer +14 -0
  37. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/data_download.sh +7 -0
  38. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/train.py +520 -0
  39. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/utils.py +36 -0
  40. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/hb_utils.py +238 -0
  41. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_bf16_bart.txt +15 -0
  42. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_fp32_bart.txt +7 -0
  43. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/requirements.txt +7 -0
  44. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.cfg +18 -0
  45. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.py +41 -0
  46. docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/simpletransformers/seq2seq/seq2seq_utils.py +554 -0
  47. docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/LICENSE +204 -0
  48. docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/README.md +898 -0
  49. docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config.json +13 -0
  50. docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config_1.2B.json +13 -0
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Diffusion Model Details
2
+
3
+ This directory contains four stable diffusion projects. Each folder contains detailed instructions on how to use them. The stable-diffusion-v-2-1 project is specifically designed for inference, the stable-diffusion project is suitable for both inference and training and stable-diffusion-finetuning is for only training.
4
+
5
+ Since Stable-diffusion-v-2-1 is newer version, it is recommended to consider it as the option for running `inference`. To run `training`, you can use the stable-diffusion project.
6
+
7
+ ### Overview:
8
+
9
+ * stable-diffusion-v-2-1: is the latest Habana-optimized version of stable diffusion (v2.1) and is based on https://github.com/Stability-AI/stablediffusion/tree/d55bcd4d31d0316fcbdf552f2fd2628fdc812500.
10
+ * stable-diffusion: is designed for both inference and training, based on the first version of stable diffusion https://github.com/pesser/stable-diffusion/tree/a166aa7fbf578f41f855efeab2e14001d6732563.
11
+ * stable-diffusion-finetuning: is designed for training on stable diffusion (v2.1) and is based on https://github.com/cloneofsimo/lora/tree/bdd51b04c49fa90a88919a19850ec3b4cf3c5ecd
12
+
13
+ ### Supported Configuration
14
+ | Project | SynapseAI Version | Mode |
15
+ |:---------|-------------------|-------|
16
+ | stable-diffusion-v-2-1 | 1.14.0 | Inference |
17
+ | stable-diffusion | 1.14.0 | Training |
18
+ | stable-diffusion | 1.7.1 | Inference |
19
+ | stable-diffusion-finetuning | 1.14.0 | Training |
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/.gitignore ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by project
2
+ outputs/
3
+
4
+ # Byte-compiled / optimized / DLL files
5
+ __pycache__/
6
+ *.py[cod]
7
+ *$py.class
8
+
9
+ # C extensions
10
+ *.so
11
+
12
+ # General MacOS
13
+ .DS_Store
14
+ .AppleDouble
15
+ .LSOverride
16
+
17
+ # Distribution / packaging
18
+ .Python
19
+ build/
20
+ develop-eggs/
21
+ dist/
22
+ downloads/
23
+ eggs/
24
+ .eggs/
25
+ lib/
26
+ lib64/
27
+ parts/
28
+ sdist/
29
+ var/
30
+ wheels/
31
+ share/python-wheels/
32
+ *.egg-info/
33
+ .installed.cfg
34
+ *.egg
35
+ MANIFEST
36
+
37
+ # PyInstaller
38
+ # Usually these files are written by a python script from a template
39
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
40
+ *.manifest
41
+ *.spec
42
+
43
+ # Installer logs
44
+ pip-log.txt
45
+ pip-delete-this-directory.txt
46
+
47
+ # Unit test / coverage reports
48
+ htmlcov/
49
+ .tox/
50
+ .nox/
51
+ .coverage
52
+ .coverage.*
53
+ .cache
54
+ nosetests.xml
55
+ coverage.xml
56
+ *.cover
57
+ *.py,cover
58
+ .hypothesis/
59
+ .pytest_cache/
60
+ cover/
61
+
62
+ # Translations
63
+ *.mo
64
+ *.pot
65
+
66
+ # Django stuff:
67
+ *.log
68
+ local_settings.py
69
+ db.sqlite3
70
+ db.sqlite3-journal
71
+
72
+ # Flask stuff:
73
+ instance/
74
+ .webassets-cache
75
+
76
+ # Scrapy stuff:
77
+ .scrapy
78
+
79
+ # Sphinx documentation
80
+ docs/_build/
81
+
82
+ # PyBuilder
83
+ .pybuilder/
84
+ target/
85
+
86
+ # Jupyter Notebook
87
+ .ipynb_checkpoints
88
+
89
+ # IPython
90
+ profile_default/
91
+ ipython_config.py
92
+
93
+ # pyenv
94
+ # For a library or package, you might want to ignore these files since the code is
95
+ # intended to run in multiple environments; otherwise, check them in:
96
+ # .python-version
97
+
98
+ # pipenv
99
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
100
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
101
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
102
+ # install all needed dependencies.
103
+ #Pipfile.lock
104
+
105
+ # poetry
106
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
107
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
108
+ # commonly ignored for libraries.
109
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
110
+ #poetry.lock
111
+
112
+ # pdm
113
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
114
+ #pdm.lock
115
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
116
+ # in version control.
117
+ # https://pdm.fming.dev/#use-with-ide
118
+ .pdm.toml
119
+
120
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121
+ __pypackages__/
122
+
123
+ # Celery stuff
124
+ celerybeat-schedule
125
+ celerybeat.pid
126
+
127
+ # SageMath parsed files
128
+ *.sage.py
129
+
130
+ # Environments
131
+ .env
132
+ .venv
133
+ env/
134
+ venv/
135
+ ENV/
136
+ env.bak/
137
+ venv.bak/
138
+
139
+ # Spyder project settings
140
+ .spyderproject
141
+ .spyproject
142
+
143
+ # Rope project settings
144
+ .ropeproject
145
+
146
+ # mkdocs documentation
147
+ /site
148
+
149
+ # mypy
150
+ .mypy_cache/
151
+ .dmypy.json
152
+ dmypy.json
153
+
154
+ # Pyre type checker
155
+ .pyre/
156
+
157
+ # pytype static type analyzer
158
+ .pytype/
159
+
160
+ # Cython debug symbols
161
+ cython_debug/
162
+
163
+ # IDEs
164
+ .idea/
165
+ .vscode/
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (C) 2022 Habana Labs, Ltd. an Intel Company
4
+ Copyright (c) 2022 Stability AI
5
+
6
+ Permission is hereby granted, free of charge, to any person obtaining a copy
7
+ of this software and associated documentation files (the "Software"), to deal
8
+ in the Software without restriction, including without limitation the rights
9
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
+ copies of the Software, and to permit persons to whom the Software is
11
+ furnished to do so, subject to the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be included in all
14
+ copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
+ SOFTWARE.
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE-MODEL ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (C) 2022 Habana Labs, Ltd. an Intel Company
2
+ Copyright (c) 2022 Stability AI and contributors
3
+
4
+ CreativeML Open RAIL++-M License
5
+ dated November 24, 2022
6
+
7
+ Section I: PREAMBLE
8
+
9
+ Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
10
+
11
+ Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
12
+
13
+ In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
14
+
15
+ Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
16
+
17
+ This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
18
+
19
+ NOW THEREFORE, You and Licensor agree as follows:
20
+
21
+ 1. Definitions
22
+
23
+ - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
24
+ - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
25
+ - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
26
+ - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
27
+ - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
28
+ - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
29
+ - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
30
+ - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
31
+ - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
32
+ - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
33
+ - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
34
+ - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
35
+
36
+ Section II: INTELLECTUAL PROPERTY RIGHTS
37
+
38
+ Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
39
+
40
+ 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
41
+ 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
42
+
43
+ Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
44
+
45
+ 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
46
+ Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
47
+ You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
48
+ You must cause any modified files to carry prominent notices stating that You changed the files;
49
+ You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
50
+ You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
51
+ 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
52
+ 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
53
+
54
+ Section IV: OTHER PROVISIONS
55
+
56
+ 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License.
57
+ 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
58
+ 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
59
+ 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
60
+ 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
61
+ 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
62
+
63
+ END OF TERMS AND CONDITIONS
64
+
65
+
66
+
67
+
68
+ Attachment A
69
+
70
+ Use Restrictions
71
+
72
+ You agree not to use the Model or Derivatives of the Model:
73
+
74
+ - In any way that violates any applicable national, federal, state, local or international law or regulation;
75
+ - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
76
+ - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
77
+ - To generate or disseminate personal identifiable information that can be used to harm an individual;
78
+ - To defame, disparage or otherwise harass others;
79
+ - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
80
+ - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
81
+ - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
82
+ - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
83
+ - To provide medical advice and medical results interpretation;
84
+ - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
85
+
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/README.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Stable Diffusion 2.1 for PyTorch
2
+
3
+ This directory provides scripts to perform text-to-image inference on a stable diffusion 2.1 model and is tested and maintained by Habana.
4
+
5
+ For more information on training and inference of deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/).
6
+
7
+ ## Table of Contents
8
+
9
+ * [Model-References](../../../README.md)
10
+ * [Model Overview](#model-overview)
11
+ * [Setup](#setup)
12
+ * [Model Checkpoint](#model-checkpoint)
13
+ * [Inference and Examples](#inference-and-examples)
14
+ * [Supported Configuration](#supported-configuration)
15
+ * [Changelog](#changelog)
16
+ * [Known Issues](#known-issues)
17
+
18
+ ## Model Overview
19
+ This implementation is based on the following paper - [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752).
20
+
21
+ ### How to use
22
+ Users acknowledge and understand that the models referenced by Habana are mere examples for models that can be run on Gaudi.
23
+ Users bear sole liability and responsibility to follow and comply with any third party licenses pertaining to such models,
24
+ and Habana Labs disclaims and will bear no any warranty or liability with respect to users' use or compliance with such third party licenses.
25
+
26
+ ## Setup
27
+ Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html)
28
+ to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/PyTorch/Model_Optimization_PyTorch/Optimization_in_Training_Platform.html).
29
+ The guides will walk you through the process of setting up your system to run the model on Gaudi.
30
+
31
+ ### Clone Habana Model-References
32
+ In the docker container, clone this repository and switch to the branch that matches your SynapseAI version.
33
+ You can run the [`hl-smi`](https://docs.habana.ai/en/latest/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) utility to determine the SynapseAI version.
34
+ ```bash
35
+ git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References
36
+ ```
37
+
38
+ ### Install Model Requirements
39
+ 1. In the docker container, go to the model directory:
40
+ ```bash
41
+ cd Model-References/PyTorch/generative_models/stable-diffusion-v-2-1
42
+ ```
43
+
44
+ 2. Install the required packages using pip.
45
+ ```bash
46
+ pip install -r requirements.txt --user
47
+ ```
48
+
49
+ ## Model Checkpoint
50
+ ### Text-to-Image
51
+ Download the pre-trained weights for 768x768 images (4.9GB)
52
+ ```bash
53
+ wget https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt
54
+ ```
55
+ and/or 512x512 images (4.9GB).
56
+ ```bash
57
+ wget https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.ckpt
58
+ ```
59
+
60
+ ## Inference and Examples
61
+ The following command generates a total of 3 images of size 768x768 and saves each sample individually as well as a grid of size `n_iter` x `n_samples` at the specified output location (default: `outputs/txt2img-samples`).
62
+
63
+ ```bash
64
+ $PYTHON scripts/txt2img.py --prompt "a professional photograph of an astronaut riding a horse" --ckpt v2-1_768-ema-pruned.ckpt --config configs/stable-diffusion/v2-inference-v.yaml --H 768 --W 768 --n_samples 1 --n_iter 3 --use_hpu_graph
65
+ ```
66
+ To generate 3 images of a 512x512 size using a k-diffusion dpmpp_2m sampler with 35 steps, use the command:
67
+ ```bash
68
+ $PYTHON scripts/txt2img.py --prompt "a professional photograph of an astronaut riding a horse" --ckpt v2-1_512-ema-pruned.ckpt --config configs/stable-diffusion/v2-inference.yaml --H 512 --W 512 --n_samples 1 --n_iter 3 --steps 35 --k_sampler dpmpp_2m --use_hpu_graph
69
+ ```
70
+
71
+ For a more detailed description of parameters, please use the following command to see a help message:
72
+ ```bash
73
+ $PYTHON scripts/txt2img.py -h
74
+ ```
75
+
76
+ ## Performance
77
+ The first two batches of images generate a performance penalty.
78
+ All subsequent batches will be generated much faster.
79
+
80
+ ## Supported Configuration
81
+ | Validated on | SynapseAI Version | PyTorch Version | Mode |
82
+ |---------|-------------------|-----------------|----------------|
83
+ | Gaudi | 1.14.0 | 2.1.1 | Inference |
84
+ | Gaudi2 | 1.14.0 | 2.1.1 | Inference |
85
+
86
+ ## Changelog
87
+ ### 1.8.0
88
+ Initial release.
89
+
90
+ ### 1.10.0
91
+ Decreased host overhead to minimum by rewriting samplers and the main sampling loop.
92
+
93
+ ### Script Modifications
94
+ Major changes done to the original model from [Stability-AI/stablediffusion](https://github.com/Stability-AI/stablediffusion/tree/d55bcd4d31d0316fcbdf552f2fd2628fdc812500) repository:
95
+ * Changed README.
96
+ * Added HPU support.
97
+ * Modified configs/stable-diffusion/v2-inference-v.yaml and configs/stable-diffusion/v2-inference.yaml
98
+ * Changed code around einsum operation in ldm/modules/attention.py
99
+ * randn moved to cpu in scripts/txt2img.py and ldm/models/diffusion/ddim.py
100
+ * sampling is rewritten in an accelerator-friendly way
101
+
102
+ ## Known Issues
103
+ * Initial random noise generation has been moved to CPU.
104
+ Contrary to when noise is generated on Gaudi, CPU-generated random noise produces consistent output regardless of whether HPU Graph API is used or not.
105
+ * The model supports batch sizes up to 16 on Gaudi and up to 8 on Gaudi2 for output images 512x512px, and batch size 1 for images 768x768px on Gaudi and Gaudi2.
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/environment.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: ldm
2
+ channels:
3
+ - pytorch
4
+ - defaults
5
+ dependencies:
6
+ - python=3.8.5
7
+ - pip=20.3
8
+ - cudatoolkit=11.3
9
+ - pytorch=1.12.1
10
+ - torchvision=0.13.1
11
+ - numpy=1.23.1
12
+ - pip:
13
+ - pytorch-lightning==1.7.7
14
+ - omegaconf==2.1.1
15
+ - test-tube>=0.7.5
16
+ - einops==0.3.0
17
+ - transformers==4.19.2
18
+ - open_clip_torch==2.7.0
19
+ - -e .
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/data/__init__.py ADDED
File without changes
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/autoencoder.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pytorch_lightning as pl
3
+ import torch.nn.functional as F
4
+ from contextlib import contextmanager
5
+
6
+ from ldm.modules.diffusionmodules.model import Encoder, Decoder
7
+ from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
8
+
9
+ from ldm.util import instantiate_from_config
10
+ from ldm.modules.ema import LitEma
11
+
12
+
13
+ class AutoencoderKL(pl.LightningModule):
14
+ def __init__(self,
15
+ ddconfig,
16
+ lossconfig,
17
+ embed_dim,
18
+ ckpt_path=None,
19
+ ignore_keys=[],
20
+ image_key="image",
21
+ colorize_nlabels=None,
22
+ monitor=None,
23
+ ema_decay=None,
24
+ learn_logvar=False
25
+ ):
26
+ super().__init__()
27
+ self.learn_logvar = learn_logvar
28
+ self.image_key = image_key
29
+ self.encoder = Encoder(**ddconfig)
30
+ self.decoder = Decoder(**ddconfig)
31
+ self.loss = instantiate_from_config(lossconfig)
32
+ assert ddconfig["double_z"]
33
+ self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
34
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
35
+ self.embed_dim = embed_dim
36
+ if colorize_nlabels is not None:
37
+ assert type(colorize_nlabels)==int
38
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
39
+ if monitor is not None:
40
+ self.monitor = monitor
41
+
42
+ self.use_ema = ema_decay is not None
43
+ if self.use_ema:
44
+ self.ema_decay = ema_decay
45
+ assert 0. < ema_decay < 1.
46
+ self.model_ema = LitEma(self, decay=ema_decay)
47
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
48
+
49
+ if ckpt_path is not None:
50
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
51
+
52
+ def init_from_ckpt(self, path, ignore_keys=list()):
53
+ sd = torch.load(path, map_location="cpu")["state_dict"]
54
+ keys = list(sd.keys())
55
+ for k in keys:
56
+ for ik in ignore_keys:
57
+ if k.startswith(ik):
58
+ print("Deleting key {} from state_dict.".format(k))
59
+ del sd[k]
60
+ self.load_state_dict(sd, strict=False)
61
+ print(f"Restored from {path}")
62
+
63
+ @contextmanager
64
+ def ema_scope(self, context=None):
65
+ if self.use_ema:
66
+ self.model_ema.store(self.parameters())
67
+ self.model_ema.copy_to(self)
68
+ if context is not None:
69
+ print(f"{context}: Switched to EMA weights")
70
+ try:
71
+ yield None
72
+ finally:
73
+ if self.use_ema:
74
+ self.model_ema.restore(self.parameters())
75
+ if context is not None:
76
+ print(f"{context}: Restored training weights")
77
+
78
+ def on_train_batch_end(self, *args, **kwargs):
79
+ if self.use_ema:
80
+ self.model_ema(self)
81
+
82
+ def encode(self, x):
83
+ h = self.encoder(x)
84
+ moments = self.quant_conv(h)
85
+ posterior = DiagonalGaussianDistribution(moments)
86
+ return posterior
87
+
88
+ def decode(self, z):
89
+ z = self.post_quant_conv(z)
90
+ dec = self.decoder(z)
91
+ return dec
92
+
93
+ def forward(self, input, sample_posterior=True):
94
+ posterior = self.encode(input)
95
+ if sample_posterior:
96
+ z = posterior.sample()
97
+ else:
98
+ z = posterior.mode()
99
+ dec = self.decode(z)
100
+ return dec, posterior
101
+
102
+ def get_input(self, batch, k):
103
+ x = batch[k]
104
+ if len(x.shape) == 3:
105
+ x = x[..., None]
106
+ x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
107
+ return x
108
+
109
+ def training_step(self, batch, batch_idx, optimizer_idx):
110
+ inputs = self.get_input(batch, self.image_key)
111
+ reconstructions, posterior = self(inputs)
112
+
113
+ if optimizer_idx == 0:
114
+ # train encoder+decoder+logvar
115
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
116
+ last_layer=self.get_last_layer(), split="train")
117
+ self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
118
+ self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
119
+ return aeloss
120
+
121
+ if optimizer_idx == 1:
122
+ # train the discriminator
123
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
124
+ last_layer=self.get_last_layer(), split="train")
125
+
126
+ self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
127
+ self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
128
+ return discloss
129
+
130
+ def validation_step(self, batch, batch_idx):
131
+ log_dict = self._validation_step(batch, batch_idx)
132
+ with self.ema_scope():
133
+ log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
134
+ return log_dict
135
+
136
+ def _validation_step(self, batch, batch_idx, postfix=""):
137
+ inputs = self.get_input(batch, self.image_key)
138
+ reconstructions, posterior = self(inputs)
139
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
140
+ last_layer=self.get_last_layer(), split="val"+postfix)
141
+
142
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
143
+ last_layer=self.get_last_layer(), split="val"+postfix)
144
+
145
+ self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
146
+ self.log_dict(log_dict_ae)
147
+ self.log_dict(log_dict_disc)
148
+ return self.log_dict
149
+
150
+ def configure_optimizers(self):
151
+ lr = self.learning_rate
152
+ ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
153
+ self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
154
+ if self.learn_logvar:
155
+ print(f"{self.__class__.__name__}: Learning logvar")
156
+ ae_params_list.append(self.loss.logvar)
157
+ opt_ae = torch.optim.Adam(ae_params_list,
158
+ lr=lr, betas=(0.5, 0.9))
159
+ opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
160
+ lr=lr, betas=(0.5, 0.9))
161
+ return [opt_ae, opt_disc], []
162
+
163
+ def get_last_layer(self):
164
+ return self.decoder.conv_out.weight
165
+
166
+ @torch.no_grad()
167
+ def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
168
+ log = dict()
169
+ x = self.get_input(batch, self.image_key)
170
+ x = x.to(self.device)
171
+ if not only_inputs:
172
+ xrec, posterior = self(x)
173
+ if x.shape[1] > 3:
174
+ # colorize with random projection
175
+ assert xrec.shape[1] > 3
176
+ x = self.to_rgb(x)
177
+ xrec = self.to_rgb(xrec)
178
+ log["samples"] = self.decode(torch.randn_like(posterior.sample()))
179
+ log["reconstructions"] = xrec
180
+ if log_ema or self.use_ema:
181
+ with self.ema_scope():
182
+ xrec_ema, posterior_ema = self(x)
183
+ if x.shape[1] > 3:
184
+ # colorize with random projection
185
+ assert xrec_ema.shape[1] > 3
186
+ xrec_ema = self.to_rgb(xrec_ema)
187
+ log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample()))
188
+ log["reconstructions_ema"] = xrec_ema
189
+ log["inputs"] = x
190
+ return log
191
+
192
+ def to_rgb(self, x):
193
+ assert self.image_key == "segmentation"
194
+ if not hasattr(self, "colorize"):
195
+ self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
196
+ x = F.conv2d(x, weight=self.colorize)
197
+ x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
198
+ return x
199
+
200
+
201
+ class IdentityFirstStage(torch.nn.Module):
202
+ def __init__(self, *args, vq_interface=False, **kwargs):
203
+ self.vq_interface = vq_interface
204
+ super().__init__()
205
+
206
+ def encode(self, x, *args, **kwargs):
207
+ return x
208
+
209
+ def decode(self, x, *args, **kwargs):
210
+ return x
211
+
212
+ def quantize(self, x, *args, **kwargs):
213
+ if self.vq_interface:
214
+ return x, None, [None, None, None]
215
+ return x
216
+
217
+ def forward(self, x, *args, **kwargs):
218
+ return x
219
+
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/__init__.py ADDED
File without changes
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddim.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import torch
5
+ import numpy as np
6
+
7
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
8
+ from ldm.models.diffusion.sampler import Sampler
9
+
10
+
11
+ class DDIMSampler(Sampler):
12
+ def __init__(self, model, schedule="linear", **kwargs):
13
+ super().__init__()
14
+ self.model = model
15
+ self.model_wrap = model.apply_model
16
+ self.ddpm_num_timesteps = model.num_timesteps
17
+ self.schedule = schedule
18
+ self.rand_scale = 1.0
19
+
20
+ def register_buffer(self, name, attr):
21
+ if self.model.device == "cuda":
22
+ if type(attr) == torch.Tensor:
23
+ if attr.device != torch.device("cuda"):
24
+ attr = attr.to(torch.device("cuda"))
25
+ setattr(self, name, attr)
26
+
27
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
28
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
29
+ num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
30
+ alphas_cumprod = self.model.alphas_cumprod
31
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
32
+
33
+ def to_torch(x): return x.clone().detach().to(
34
+ torch.float32).to(self.model.device)
35
+
36
+ self.register_buffer('betas', to_torch(self.model.betas))
37
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
38
+ self.register_buffer('alphas_cumprod_prev', to_torch(
39
+ self.model.alphas_cumprod_prev))
40
+
41
+ # calculations for diffusion q(x_t | x_{t-1}) and others
42
+ self.register_buffer('sqrt_alphas_cumprod',
43
+ to_torch(np.sqrt(alphas_cumprod.cpu())))
44
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(
45
+ np.sqrt(1. - alphas_cumprod.cpu())))
46
+ self.register_buffer('log_one_minus_alphas_cumprod',
47
+ to_torch(np.log(1. - alphas_cumprod.cpu())))
48
+ self.register_buffer('sqrt_recip_alphas_cumprod',
49
+ to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
50
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(
51
+ np.sqrt(1. / alphas_cumprod.cpu() - 1)))
52
+
53
+ # ddim sampling parameters
54
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
55
+ ddim_timesteps=self.ddim_timesteps,
56
+ eta=ddim_eta, verbose=verbose)
57
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
58
+ self.register_buffer('ddim_alphas', ddim_alphas)
59
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
60
+ self.register_buffer('ddim_sqrt_one_minus_alphas',
61
+ np.sqrt(1. - ddim_alphas))
62
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
63
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
64
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
65
+ self.register_buffer('ddim_sigmas_for_original_num_steps',
66
+ sigmas_for_original_sampling_steps)
67
+
68
+ @torch.no_grad()
69
+ def compile(self,
70
+ S,
71
+ shape,
72
+ batch_size=1,
73
+ eta=0.,
74
+ temperature=1.,
75
+ verbose=False,
76
+ unconditional_guidance_scale=1.,
77
+ use_original_steps=False,
78
+ **kwargs
79
+ ):
80
+
81
+ self.steps = S
82
+ self.batch_size = batch_size
83
+ self.shape = shape
84
+ self.eta = eta
85
+ self.temperature = temperature
86
+ self.cond_scale = unconditional_guidance_scale
87
+ self.x_shape = (self.batch_size,
88
+ self.shape[0], self.shape[1], self.shape[2])
89
+
90
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
91
+ # sampling
92
+ C, H, W = shape
93
+ size = (batch_size, C, H, W)
94
+ print(f'Data shape for DDIM sampling is {size}, eta {eta}')
95
+
96
+ self.ts_list = torch.Tensor(
97
+ np.expand_dims(self.ddim_timesteps, axis=0))
98
+ self.ts_list = self.ts_list.fliplr().to(torch.int32).to(self.model.device)
99
+
100
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
101
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
102
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
103
+ sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
104
+ alphas_prev = torch.Tensor(alphas_prev)
105
+
106
+ self.params_init = [
107
+ ('alpha', alphas),
108
+ ('alpha_prev', alphas_prev),
109
+ ('rsqrt(alpha)', alphas.rsqrt()),
110
+ ('sqrt(alpha_prev)', alphas_prev.sqrt()),
111
+ ('sqrt(1-alpha)', sqrt_one_minus_alphas),
112
+ ('sigma', torch.Tensor(sigmas)),
113
+ ('dir', torch.sqrt(1. - alphas_prev - sigmas**2))
114
+ ]
115
+
116
+ self.params = torch.stack(list(map(lambda x: x[1], self.params_init)))
117
+ self.params = self.params.fliplr().to(
118
+ self.model.betas.dtype).to(self.model.device)
119
+
120
+ def one_step(self, x, c_in, ts_t, param_t):
121
+ ts = ts_t[0].broadcast_to((self.batch_size)).contiguous()
122
+
123
+ param = {}
124
+ for idx, val in enumerate(self.params_init):
125
+ param[val[0]] = param_t[idx].broadcast_to(
126
+ (self.batch_size, 1, 1, 1)).contiguous()
127
+
128
+ model_output = self.run_model(x, c_in, ts)
129
+
130
+ if self.model.parameterization == "v":
131
+ e_t = self.model.predict_eps_from_z_and_v(x, ts, model_output)
132
+ else:
133
+ e_t = model_output
134
+
135
+ # current prediction for x_0
136
+ if self.model.parameterization != "v":
137
+ pred_x0 = (x - param['sqrt(1-alpha)'] *
138
+ e_t) * param['rsqrt(alpha)']
139
+ else:
140
+ pred_x0 = self.model.predict_start_from_z_and_v(
141
+ x, ts, model_output)
142
+
143
+ # direction pointing to x_t
144
+ dir_xt = param['dir'] * e_t
145
+ noise = param['sigma'] * \
146
+ noise_like(x.shape, self.model.device, False) * self.temperature
147
+ x = param['sqrt(alpha_prev)'] * pred_x0 + dir_xt + noise
148
+ return x
149
+
150
+ def sampler_step(self, arg):
151
+ x, c_in, ts, params = arg
152
+ x = self.one_step(x, c_in, ts[:, 0], params[:, 0])
153
+ ts = torch.roll(ts, shifts=-1, dims=1)
154
+ params = torch.roll(params, shifts=-1, dims=1)
155
+ return [x, c_in, ts, params]
156
+
157
+ def init_loop(self, x, c_in):
158
+ return [x, c_in, self.ts_list.clone(), self.params.clone()]
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddpm.py ADDED
@@ -0,0 +1,1795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ wild mixture of
3
+ https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
+ https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
+ https://github.com/CompVis/taming-transformers
6
+ -- merci
7
+ """
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import numpy as np
12
+ import pytorch_lightning as pl
13
+ from torch.optim.lr_scheduler import LambdaLR
14
+ from einops import rearrange, repeat
15
+ from contextlib import contextmanager, nullcontext
16
+ from functools import partial
17
+ import itertools
18
+ from tqdm import tqdm
19
+ from torchvision.utils import make_grid
20
+ from pytorch_lightning.utilities.rank_zero import rank_zero_only
21
+ from omegaconf import ListConfig
22
+
23
+ from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
24
+ from ldm.modules.ema import LitEma
25
+ from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
26
+ from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
27
+ from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
28
+ from ldm.models.diffusion.ddim import DDIMSampler
29
+
30
+
31
+ __conditioning_keys__ = {'concat': 'c_concat',
32
+ 'crossattn': 'c_crossattn',
33
+ 'adm': 'y'}
34
+
35
+
36
+ def disabled_train(self, mode=True):
37
+ """Overwrite model.train with this function to make sure train/eval mode
38
+ does not change anymore."""
39
+ return self
40
+
41
+
42
+ def uniform_on_device(r1, r2, shape, device):
43
+ return (r1 - r2) * torch.rand(*shape, device=device) + r2
44
+
45
+
46
+ class DDPM(pl.LightningModule):
47
+ # classic DDPM with Gaussian diffusion, in image space
48
+ def __init__(self,
49
+ unet_config,
50
+ timesteps=1000,
51
+ beta_schedule="linear",
52
+ loss_type="l2",
53
+ ckpt_path=None,
54
+ ignore_keys=[],
55
+ load_only_unet=False,
56
+ monitor="val/loss",
57
+ use_ema=True,
58
+ first_stage_key="image",
59
+ image_size=256,
60
+ channels=3,
61
+ log_every_t=100,
62
+ clip_denoised=True,
63
+ linear_start=1e-4,
64
+ linear_end=2e-2,
65
+ cosine_s=8e-3,
66
+ given_betas=None,
67
+ original_elbo_weight=0.,
68
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
69
+ l_simple_weight=1.,
70
+ conditioning_key=None,
71
+ parameterization="eps", # all assuming fixed variance schedules
72
+ scheduler_config=None,
73
+ use_positional_encodings=False,
74
+ learn_logvar=False,
75
+ logvar_init=0.,
76
+ make_it_fit=False,
77
+ ucg_training=None,
78
+ reset_ema=False,
79
+ reset_num_ema_updates=False,
80
+ ):
81
+ super().__init__()
82
+ assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
83
+ self.parameterization = parameterization
84
+ print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
85
+ self.cond_stage_model = None
86
+ self.clip_denoised = clip_denoised
87
+ self.log_every_t = log_every_t
88
+ self.first_stage_key = first_stage_key
89
+ self.image_size = image_size # try conv?
90
+ self.channels = channels
91
+ self.use_positional_encodings = use_positional_encodings
92
+ self.model = DiffusionWrapper(unet_config, conditioning_key)
93
+ count_params(self.model, verbose=True)
94
+ self.use_ema = use_ema
95
+ if self.use_ema:
96
+ self.model_ema = LitEma(self.model)
97
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
98
+
99
+ self.use_scheduler = scheduler_config is not None
100
+ if self.use_scheduler:
101
+ self.scheduler_config = scheduler_config
102
+
103
+ self.v_posterior = v_posterior
104
+ self.original_elbo_weight = original_elbo_weight
105
+ self.l_simple_weight = l_simple_weight
106
+
107
+ if monitor is not None:
108
+ self.monitor = monitor
109
+ self.make_it_fit = make_it_fit
110
+ if reset_ema: assert exists(ckpt_path)
111
+ if ckpt_path is not None:
112
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
113
+ if reset_ema:
114
+ assert self.use_ema
115
+ print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
116
+ self.model_ema = LitEma(self.model)
117
+ if reset_num_ema_updates:
118
+ print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
119
+ assert self.use_ema
120
+ self.model_ema.reset_num_updates()
121
+
122
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
123
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
124
+
125
+ self.loss_type = loss_type
126
+
127
+ self.learn_logvar = learn_logvar
128
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
129
+ if self.learn_logvar:
130
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
131
+
132
+ self.ucg_training = ucg_training or dict()
133
+ if self.ucg_training:
134
+ self.ucg_prng = np.random.RandomState()
135
+
136
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
137
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
138
+ if exists(given_betas):
139
+ betas = given_betas
140
+ else:
141
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
142
+ cosine_s=cosine_s)
143
+ alphas = 1. - betas
144
+ alphas_cumprod = np.cumprod(alphas, axis=0)
145
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
146
+
147
+ timesteps, = betas.shape
148
+ self.num_timesteps = int(timesteps)
149
+ self.linear_start = linear_start
150
+ self.linear_end = linear_end
151
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
152
+
153
+ to_torch = partial(torch.tensor, dtype=torch.float32)
154
+
155
+ self.register_buffer('betas', to_torch(betas))
156
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
157
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
158
+
159
+ # calculations for diffusion q(x_t | x_{t-1}) and others
160
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
161
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
162
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
163
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
164
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
165
+
166
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
167
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
168
+ 1. - alphas_cumprod) + self.v_posterior * betas
169
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
170
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
171
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
172
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
173
+ self.register_buffer('posterior_mean_coef1', to_torch(
174
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
175
+ self.register_buffer('posterior_mean_coef2', to_torch(
176
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
177
+
178
+ if self.parameterization == "eps":
179
+ lvlb_weights = self.betas ** 2 / (
180
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
181
+ elif self.parameterization == "x0":
182
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
183
+ elif self.parameterization == "v":
184
+ lvlb_weights = torch.ones_like(self.betas ** 2 / (
185
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
186
+ else:
187
+ raise NotImplementedError("mu not supported")
188
+ lvlb_weights[0] = lvlb_weights[1]
189
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
190
+ assert not torch.isnan(self.lvlb_weights).all()
191
+
192
+ @contextmanager
193
+ def ema_scope(self, context=None):
194
+ if self.use_ema:
195
+ self.model_ema.store(self.model.parameters())
196
+ self.model_ema.copy_to(self.model)
197
+ if context is not None:
198
+ print(f"{context}: Switched to EMA weights")
199
+ try:
200
+ yield None
201
+ finally:
202
+ if self.use_ema:
203
+ self.model_ema.restore(self.model.parameters())
204
+ if context is not None:
205
+ print(f"{context}: Restored training weights")
206
+
207
+ @torch.no_grad()
208
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
209
+ sd = torch.load(path, map_location="cpu")
210
+ if "state_dict" in list(sd.keys()):
211
+ sd = sd["state_dict"]
212
+ keys = list(sd.keys())
213
+ for k in keys:
214
+ for ik in ignore_keys:
215
+ if k.startswith(ik):
216
+ print("Deleting key {} from state_dict.".format(k))
217
+ del sd[k]
218
+ if self.make_it_fit:
219
+ n_params = len([name for name, _ in
220
+ itertools.chain(self.named_parameters(),
221
+ self.named_buffers())])
222
+ for name, param in tqdm(
223
+ itertools.chain(self.named_parameters(),
224
+ self.named_buffers()),
225
+ desc="Fitting old weights to new weights",
226
+ total=n_params
227
+ ):
228
+ if not name in sd:
229
+ continue
230
+ old_shape = sd[name].shape
231
+ new_shape = param.shape
232
+ assert len(old_shape) == len(new_shape)
233
+ if len(new_shape) > 2:
234
+ # we only modify first two axes
235
+ assert new_shape[2:] == old_shape[2:]
236
+ # assumes first axis corresponds to output dim
237
+ if not new_shape == old_shape:
238
+ new_param = param.clone()
239
+ old_param = sd[name]
240
+ if len(new_shape) == 1:
241
+ for i in range(new_param.shape[0]):
242
+ new_param[i] = old_param[i % old_shape[0]]
243
+ elif len(new_shape) >= 2:
244
+ for i in range(new_param.shape[0]):
245
+ for j in range(new_param.shape[1]):
246
+ new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
247
+
248
+ n_used_old = torch.ones(old_shape[1])
249
+ for j in range(new_param.shape[1]):
250
+ n_used_old[j % old_shape[1]] += 1
251
+ n_used_new = torch.zeros(new_shape[1])
252
+ for j in range(new_param.shape[1]):
253
+ n_used_new[j] = n_used_old[j % old_shape[1]]
254
+
255
+ n_used_new = n_used_new[None, :]
256
+ while len(n_used_new.shape) < len(new_shape):
257
+ n_used_new = n_used_new.unsqueeze(-1)
258
+ new_param /= n_used_new
259
+
260
+ sd[name] = new_param
261
+
262
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
263
+ sd, strict=False)
264
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
265
+ if len(missing) > 0:
266
+ print(f"Missing Keys:\n {missing}")
267
+ if len(unexpected) > 0:
268
+ print(f"\nUnexpected Keys:\n {unexpected}")
269
+
270
+ def q_mean_variance(self, x_start, t):
271
+ """
272
+ Get the distribution q(x_t | x_0).
273
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
274
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
275
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
276
+ """
277
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
278
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
279
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
280
+ return mean, variance, log_variance
281
+
282
+ def predict_start_from_noise(self, x_t, t, noise):
283
+ return (
284
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
285
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
286
+ )
287
+
288
+ def predict_start_from_z_and_v(self, x_t, t, v):
289
+ # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
290
+ # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
291
+ return (
292
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
293
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
294
+ )
295
+
296
+ def predict_eps_from_z_and_v(self, x_t, t, v):
297
+ return (
298
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
299
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
300
+ )
301
+
302
+ def q_posterior(self, x_start, x_t, t):
303
+ posterior_mean = (
304
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
305
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
306
+ )
307
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
308
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
309
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
310
+
311
+ def p_mean_variance(self, x, t, clip_denoised: bool):
312
+ model_out = self.model(x, t)
313
+ if self.parameterization == "eps":
314
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
315
+ elif self.parameterization == "x0":
316
+ x_recon = model_out
317
+ if clip_denoised:
318
+ x_recon.clamp_(-1., 1.)
319
+
320
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
321
+ return model_mean, posterior_variance, posterior_log_variance
322
+
323
+ @torch.no_grad()
324
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
325
+ b, *_, device = *x.shape, x.device
326
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
327
+ noise = noise_like(x.shape, device, repeat_noise)
328
+ # no noise when t == 0
329
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
330
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
331
+
332
+ @torch.no_grad()
333
+ def p_sample_loop(self, shape, return_intermediates=False):
334
+ device = self.betas.device
335
+ b = shape[0]
336
+ img = torch.randn(shape, device=device)
337
+ intermediates = [img]
338
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
339
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
340
+ clip_denoised=self.clip_denoised)
341
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
342
+ intermediates.append(img)
343
+ if return_intermediates:
344
+ return img, intermediates
345
+ return img
346
+
347
+ @torch.no_grad()
348
+ def sample(self, batch_size=16, return_intermediates=False):
349
+ image_size = self.image_size
350
+ channels = self.channels
351
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
352
+ return_intermediates=return_intermediates)
353
+
354
+ def q_sample(self, x_start, t, noise=None):
355
+ noise = default(noise, lambda: torch.randn_like(x_start))
356
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
357
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
358
+
359
+ def get_v(self, x, noise, t):
360
+ return (
361
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
362
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
363
+ )
364
+
365
+ def get_loss(self, pred, target, mean=True):
366
+ if self.loss_type == 'l1':
367
+ loss = (target - pred).abs()
368
+ if mean:
369
+ loss = loss.mean()
370
+ elif self.loss_type == 'l2':
371
+ if mean:
372
+ loss = torch.nn.functional.mse_loss(target, pred)
373
+ else:
374
+ loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
375
+ else:
376
+ raise NotImplementedError("unknown loss type '{loss_type}'")
377
+
378
+ return loss
379
+
380
+ def p_losses(self, x_start, t, noise=None):
381
+ noise = default(noise, lambda: torch.randn_like(x_start))
382
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
383
+ model_out = self.model(x_noisy, t)
384
+
385
+ loss_dict = {}
386
+ if self.parameterization == "eps":
387
+ target = noise
388
+ elif self.parameterization == "x0":
389
+ target = x_start
390
+ elif self.parameterization == "v":
391
+ target = self.get_v(x_start, noise, t)
392
+ else:
393
+ raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
394
+
395
+ loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
396
+
397
+ log_prefix = 'train' if self.training else 'val'
398
+
399
+ loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
400
+ loss_simple = loss.mean() * self.l_simple_weight
401
+
402
+ loss_vlb = (self.lvlb_weights[t] * loss).mean()
403
+ loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
404
+
405
+ loss = loss_simple + self.original_elbo_weight * loss_vlb
406
+
407
+ loss_dict.update({f'{log_prefix}/loss': loss})
408
+
409
+ return loss, loss_dict
410
+
411
+ def forward(self, x, *args, **kwargs):
412
+ # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
413
+ # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
414
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
415
+ return self.p_losses(x, t, *args, **kwargs)
416
+
417
+ def get_input(self, batch, k):
418
+ x = batch[k]
419
+ if len(x.shape) == 3:
420
+ x = x[..., None]
421
+ x = rearrange(x, 'b h w c -> b c h w')
422
+ x = x.to(memory_format=torch.contiguous_format).float()
423
+ return x
424
+
425
+ def shared_step(self, batch):
426
+ x = self.get_input(batch, self.first_stage_key)
427
+ loss, loss_dict = self(x)
428
+ return loss, loss_dict
429
+
430
+ def training_step(self, batch, batch_idx):
431
+ for k in self.ucg_training:
432
+ p = self.ucg_training[k]["p"]
433
+ val = self.ucg_training[k]["val"]
434
+ if val is None:
435
+ val = ""
436
+ for i in range(len(batch[k])):
437
+ if self.ucg_prng.choice(2, p=[1 - p, p]):
438
+ batch[k][i] = val
439
+
440
+ loss, loss_dict = self.shared_step(batch)
441
+
442
+ self.log_dict(loss_dict, prog_bar=True,
443
+ logger=True, on_step=True, on_epoch=True)
444
+
445
+ self.log("global_step", self.global_step,
446
+ prog_bar=True, logger=True, on_step=True, on_epoch=False)
447
+
448
+ if self.use_scheduler:
449
+ lr = self.optimizers().param_groups[0]['lr']
450
+ self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
451
+
452
+ return loss
453
+
454
+ @torch.no_grad()
455
+ def validation_step(self, batch, batch_idx):
456
+ _, loss_dict_no_ema = self.shared_step(batch)
457
+ with self.ema_scope():
458
+ _, loss_dict_ema = self.shared_step(batch)
459
+ loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
460
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
461
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
462
+
463
+ def on_train_batch_end(self, *args, **kwargs):
464
+ if self.use_ema:
465
+ self.model_ema(self.model)
466
+
467
+ def _get_rows_from_list(self, samples):
468
+ n_imgs_per_row = len(samples)
469
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
470
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
471
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
472
+ return denoise_grid
473
+
474
+ @torch.no_grad()
475
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
476
+ log = dict()
477
+ x = self.get_input(batch, self.first_stage_key)
478
+ N = min(x.shape[0], N)
479
+ n_row = min(x.shape[0], n_row)
480
+ x = x.to(self.device)[:N]
481
+ log["inputs"] = x
482
+
483
+ # get diffusion row
484
+ diffusion_row = list()
485
+ x_start = x[:n_row]
486
+
487
+ for t in range(self.num_timesteps):
488
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
489
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
490
+ t = t.to(self.device).long()
491
+ noise = torch.randn_like(x_start)
492
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
493
+ diffusion_row.append(x_noisy)
494
+
495
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
496
+
497
+ if sample:
498
+ # get denoise row
499
+ with self.ema_scope("Plotting"):
500
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
501
+
502
+ log["samples"] = samples
503
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
504
+
505
+ if return_keys:
506
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
507
+ return log
508
+ else:
509
+ return {key: log[key] for key in return_keys}
510
+ return log
511
+
512
+ def configure_optimizers(self):
513
+ lr = self.learning_rate
514
+ params = list(self.model.parameters())
515
+ if self.learn_logvar:
516
+ params = params + [self.logvar]
517
+ opt = torch.optim.AdamW(params, lr=lr)
518
+ return opt
519
+
520
+
521
+ class LatentDiffusion(DDPM):
522
+ """main class"""
523
+
524
+ def __init__(self,
525
+ first_stage_config,
526
+ cond_stage_config,
527
+ num_timesteps_cond=None,
528
+ cond_stage_key="image",
529
+ cond_stage_trainable=False,
530
+ concat_mode=True,
531
+ cond_stage_forward=None,
532
+ conditioning_key=None,
533
+ scale_factor=1.0,
534
+ scale_by_std=False,
535
+ force_null_conditioning=False,
536
+ *args, **kwargs):
537
+ self.force_null_conditioning = force_null_conditioning
538
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
539
+ self.scale_by_std = scale_by_std
540
+ assert self.num_timesteps_cond <= kwargs['timesteps']
541
+ # for backwards compatibility after implementation of DiffusionWrapper
542
+ if conditioning_key is None:
543
+ conditioning_key = 'concat' if concat_mode else 'crossattn'
544
+ if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:
545
+ conditioning_key = None
546
+ ckpt_path = kwargs.pop("ckpt_path", None)
547
+ reset_ema = kwargs.pop("reset_ema", False)
548
+ reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
549
+ ignore_keys = kwargs.pop("ignore_keys", [])
550
+ super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
551
+ self.concat_mode = concat_mode
552
+ self.cond_stage_trainable = cond_stage_trainable
553
+ self.cond_stage_key = cond_stage_key
554
+ try:
555
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
556
+ except:
557
+ self.num_downs = 0
558
+ if not scale_by_std:
559
+ self.scale_factor = scale_factor
560
+ else:
561
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
562
+ self.instantiate_first_stage(first_stage_config)
563
+ self.instantiate_cond_stage(cond_stage_config)
564
+ self.cond_stage_forward = cond_stage_forward
565
+ self.clip_denoised = False
566
+ self.bbox_tokenizer = None
567
+
568
+ self.restarted_from_ckpt = False
569
+ if ckpt_path is not None:
570
+ self.init_from_ckpt(ckpt_path, ignore_keys)
571
+ self.restarted_from_ckpt = True
572
+ if reset_ema:
573
+ assert self.use_ema
574
+ print(
575
+ f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
576
+ self.model_ema = LitEma(self.model)
577
+ if reset_num_ema_updates:
578
+ print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
579
+ assert self.use_ema
580
+ self.model_ema.reset_num_updates()
581
+
582
+ def make_cond_schedule(self, ):
583
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
584
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
585
+ self.cond_ids[:self.num_timesteps_cond] = ids
586
+
587
+ @rank_zero_only
588
+ @torch.no_grad()
589
+ def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
590
+ # only for very first batch
591
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
592
+ assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
593
+ # set rescale weight to 1./std of encodings
594
+ print("### USING STD-RESCALING ###")
595
+ x = super().get_input(batch, self.first_stage_key)
596
+ x = x.to(self.device)
597
+ encoder_posterior = self.encode_first_stage(x)
598
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
599
+ del self.scale_factor
600
+ self.register_buffer('scale_factor', 1. / z.flatten().std())
601
+ print(f"setting self.scale_factor to {self.scale_factor}")
602
+ print("### USING STD-RESCALING ###")
603
+
604
+ def register_schedule(self,
605
+ given_betas=None, beta_schedule="linear", timesteps=1000,
606
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
607
+ super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
608
+
609
+ self.shorten_cond_schedule = self.num_timesteps_cond > 1
610
+ if self.shorten_cond_schedule:
611
+ self.make_cond_schedule()
612
+
613
+ def instantiate_first_stage(self, config):
614
+ model = instantiate_from_config(config)
615
+ self.first_stage_model = model.eval()
616
+ self.first_stage_model.train = disabled_train
617
+ for param in self.first_stage_model.parameters():
618
+ param.requires_grad = False
619
+
620
+ def instantiate_cond_stage(self, config):
621
+ if not self.cond_stage_trainable:
622
+ if config == "__is_first_stage__":
623
+ print("Using first stage also as cond stage.")
624
+ self.cond_stage_model = self.first_stage_model
625
+ elif config == "__is_unconditional__":
626
+ print(f"Training {self.__class__.__name__} as an unconditional model.")
627
+ self.cond_stage_model = None
628
+ # self.be_unconditional = True
629
+ else:
630
+ model = instantiate_from_config(config)
631
+ self.cond_stage_model = model.eval()
632
+ self.cond_stage_model.train = disabled_train
633
+ for param in self.cond_stage_model.parameters():
634
+ param.requires_grad = False
635
+ else:
636
+ assert config != '__is_first_stage__'
637
+ assert config != '__is_unconditional__'
638
+ model = instantiate_from_config(config)
639
+ self.cond_stage_model = model
640
+
641
+ def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
642
+ denoise_row = []
643
+ for zd in tqdm(samples, desc=desc):
644
+ denoise_row.append(self.decode_first_stage(zd.to(self.device),
645
+ force_not_quantize=force_no_decoder_quantization))
646
+ n_imgs_per_row = len(denoise_row)
647
+ denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
648
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
649
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
650
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
651
+ return denoise_grid
652
+
653
+ def get_first_stage_encoding(self, encoder_posterior):
654
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
655
+ z = encoder_posterior.sample()
656
+ elif isinstance(encoder_posterior, torch.Tensor):
657
+ z = encoder_posterior
658
+ else:
659
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
660
+ return self.scale_factor * z
661
+
662
+ def get_learned_conditioning(self, c):
663
+ if self.cond_stage_forward is None:
664
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
665
+ c = self.cond_stage_model.encode(c)
666
+ if isinstance(c, DiagonalGaussianDistribution):
667
+ c = c.mode()
668
+ else:
669
+ c = self.cond_stage_model(c)
670
+ else:
671
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
672
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
673
+ return c
674
+
675
+ def meshgrid(self, h, w):
676
+ y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
677
+ x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
678
+
679
+ arr = torch.cat([y, x], dim=-1)
680
+ return arr
681
+
682
+ def delta_border(self, h, w):
683
+ """
684
+ :param h: height
685
+ :param w: width
686
+ :return: normalized distance to image border,
687
+ wtith min distance = 0 at border and max dist = 0.5 at image center
688
+ """
689
+ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
690
+ arr = self.meshgrid(h, w) / lower_right_corner
691
+ dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
692
+ dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
693
+ edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
694
+ return edge_dist
695
+
696
+ def get_weighting(self, h, w, Ly, Lx, device):
697
+ weighting = self.delta_border(h, w)
698
+ weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
699
+ self.split_input_params["clip_max_weight"], )
700
+ weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
701
+
702
+ if self.split_input_params["tie_braker"]:
703
+ L_weighting = self.delta_border(Ly, Lx)
704
+ L_weighting = torch.clip(L_weighting,
705
+ self.split_input_params["clip_min_tie_weight"],
706
+ self.split_input_params["clip_max_tie_weight"])
707
+
708
+ L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
709
+ weighting = weighting * L_weighting
710
+ return weighting
711
+
712
+ def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
713
+ """
714
+ :param x: img of size (bs, c, h, w)
715
+ :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
716
+ """
717
+ bs, nc, h, w = x.shape
718
+
719
+ # number of crops in image
720
+ Ly = (h - kernel_size[0]) // stride[0] + 1
721
+ Lx = (w - kernel_size[1]) // stride[1] + 1
722
+
723
+ if uf == 1 and df == 1:
724
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
725
+ unfold = torch.nn.Unfold(**fold_params)
726
+
727
+ fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
728
+
729
+ weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
730
+ normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
731
+ weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
732
+
733
+ elif uf > 1 and df == 1:
734
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
735
+ unfold = torch.nn.Unfold(**fold_params)
736
+
737
+ fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
738
+ dilation=1, padding=0,
739
+ stride=(stride[0] * uf, stride[1] * uf))
740
+ fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
741
+
742
+ weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
743
+ normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
744
+ weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
745
+
746
+ elif df > 1 and uf == 1:
747
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
748
+ unfold = torch.nn.Unfold(**fold_params)
749
+
750
+ fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
751
+ dilation=1, padding=0,
752
+ stride=(stride[0] // df, stride[1] // df))
753
+ fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
754
+
755
+ weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
756
+ normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
757
+ weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
758
+
759
+ else:
760
+ raise NotImplementedError
761
+
762
+ return fold, unfold, normalization, weighting
763
+
764
+ @torch.no_grad()
765
+ def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
766
+ cond_key=None, return_original_cond=False, bs=None, return_x=False):
767
+ x = super().get_input(batch, k)
768
+ if bs is not None:
769
+ x = x[:bs]
770
+ x = x.to(self.device)
771
+ encoder_posterior = self.encode_first_stage(x)
772
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
773
+
774
+ if self.model.conditioning_key is not None and not self.force_null_conditioning:
775
+ if cond_key is None:
776
+ cond_key = self.cond_stage_key
777
+ if cond_key != self.first_stage_key:
778
+ if cond_key in ['caption', 'coordinates_bbox', "txt"]:
779
+ xc = batch[cond_key]
780
+ elif cond_key in ['class_label', 'cls']:
781
+ xc = batch
782
+ else:
783
+ xc = super().get_input(batch, cond_key).to(self.device)
784
+ else:
785
+ xc = x
786
+ if not self.cond_stage_trainable or force_c_encode:
787
+ if isinstance(xc, dict) or isinstance(xc, list):
788
+ c = self.get_learned_conditioning(xc)
789
+ else:
790
+ c = self.get_learned_conditioning(xc.to(self.device))
791
+ else:
792
+ c = xc
793
+ if bs is not None:
794
+ c = c[:bs]
795
+
796
+ if self.use_positional_encodings:
797
+ pos_x, pos_y = self.compute_latent_shifts(batch)
798
+ ckey = __conditioning_keys__[self.model.conditioning_key]
799
+ c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
800
+
801
+ else:
802
+ c = None
803
+ xc = None
804
+ if self.use_positional_encodings:
805
+ pos_x, pos_y = self.compute_latent_shifts(batch)
806
+ c = {'pos_x': pos_x, 'pos_y': pos_y}
807
+ out = [z, c]
808
+ if return_first_stage_outputs:
809
+ xrec = self.decode_first_stage(z)
810
+ out.extend([x, xrec])
811
+ if return_x:
812
+ out.extend([x])
813
+ if return_original_cond:
814
+ out.append(xc)
815
+ return out
816
+
817
+ @torch.no_grad()
818
+ def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
819
+ if predict_cids:
820
+ if z.dim() == 4:
821
+ z = torch.argmax(z.exp(), dim=1).long()
822
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
823
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
824
+
825
+ z = 1. / self.scale_factor * z
826
+ return self.first_stage_model.decode(z)
827
+
828
+ @torch.no_grad()
829
+ def encode_first_stage(self, x):
830
+ return self.first_stage_model.encode(x)
831
+
832
+ def shared_step(self, batch, **kwargs):
833
+ x, c = self.get_input(batch, self.first_stage_key)
834
+ loss = self(x, c)
835
+ return loss
836
+
837
+ def forward(self, x, c, *args, **kwargs):
838
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
839
+ if self.model.conditioning_key is not None:
840
+ assert c is not None
841
+ if self.cond_stage_trainable:
842
+ c = self.get_learned_conditioning(c)
843
+ if self.shorten_cond_schedule: # TODO: drop this option
844
+ tc = self.cond_ids[t].to(self.device)
845
+ c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
846
+ return self.p_losses(x, c, t, *args, **kwargs)
847
+
848
+ def apply_model(self, x_noisy, t, cond, return_ids=False):
849
+ if isinstance(cond, dict):
850
+ # hybrid case, cond is expected to be a dict
851
+ pass
852
+ else:
853
+ if not isinstance(cond, list):
854
+ cond = [cond]
855
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
856
+ cond = {key: cond}
857
+
858
+ x_recon = self.model(x_noisy, t, **cond)
859
+
860
+ if isinstance(x_recon, tuple) and not return_ids:
861
+ return x_recon[0]
862
+ else:
863
+ return x_recon
864
+
865
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
866
+ return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
867
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
868
+
869
+ def _prior_bpd(self, x_start):
870
+ """
871
+ Get the prior KL term for the variational lower-bound, measured in
872
+ bits-per-dim.
873
+ This term can't be optimized, as it only depends on the encoder.
874
+ :param x_start: the [N x C x ...] tensor of inputs.
875
+ :return: a batch of [N] KL values (in bits), one per batch element.
876
+ """
877
+ batch_size = x_start.shape[0]
878
+ t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
879
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
880
+ kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
881
+ return mean_flat(kl_prior) / np.log(2.0)
882
+
883
+ def p_losses(self, x_start, cond, t, noise=None):
884
+ noise = default(noise, lambda: torch.randn_like(x_start))
885
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
886
+ model_output = self.apply_model(x_noisy, t, cond)
887
+
888
+ loss_dict = {}
889
+ prefix = 'train' if self.training else 'val'
890
+
891
+ if self.parameterization == "x0":
892
+ target = x_start
893
+ elif self.parameterization == "eps":
894
+ target = noise
895
+ elif self.parameterization == "v":
896
+ target = self.get_v(x_start, noise, t)
897
+ else:
898
+ raise NotImplementedError()
899
+
900
+ loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
901
+ loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
902
+
903
+ logvar_t = self.logvar[t].to(self.device)
904
+ loss = loss_simple / torch.exp(logvar_t) + logvar_t
905
+ # loss = loss_simple / torch.exp(self.logvar) + self.logvar
906
+ if self.learn_logvar:
907
+ loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
908
+ loss_dict.update({'logvar': self.logvar.data.mean()})
909
+
910
+ loss = self.l_simple_weight * loss.mean()
911
+
912
+ loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
913
+ loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
914
+ loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
915
+ loss += (self.original_elbo_weight * loss_vlb)
916
+ loss_dict.update({f'{prefix}/loss': loss})
917
+
918
+ return loss, loss_dict
919
+
920
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
921
+ return_x0=False, score_corrector=None, corrector_kwargs=None):
922
+ t_in = t
923
+ model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
924
+
925
+ if score_corrector is not None:
926
+ assert self.parameterization == "eps"
927
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
928
+
929
+ if return_codebook_ids:
930
+ model_out, logits = model_out
931
+
932
+ if self.parameterization == "eps":
933
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
934
+ elif self.parameterization == "x0":
935
+ x_recon = model_out
936
+ else:
937
+ raise NotImplementedError()
938
+
939
+ if clip_denoised:
940
+ x_recon.clamp_(-1., 1.)
941
+ if quantize_denoised:
942
+ x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
943
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
944
+ if return_codebook_ids:
945
+ return model_mean, posterior_variance, posterior_log_variance, logits
946
+ elif return_x0:
947
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
948
+ else:
949
+ return model_mean, posterior_variance, posterior_log_variance
950
+
951
+ @torch.no_grad()
952
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
953
+ return_codebook_ids=False, quantize_denoised=False, return_x0=False,
954
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
955
+ b, *_, device = *x.shape, x.device
956
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
957
+ return_codebook_ids=return_codebook_ids,
958
+ quantize_denoised=quantize_denoised,
959
+ return_x0=return_x0,
960
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
961
+ if return_codebook_ids:
962
+ raise DeprecationWarning("Support dropped.")
963
+ model_mean, _, model_log_variance, logits = outputs
964
+ elif return_x0:
965
+ model_mean, _, model_log_variance, x0 = outputs
966
+ else:
967
+ model_mean, _, model_log_variance = outputs
968
+
969
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
970
+ if noise_dropout > 0.:
971
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
972
+ # no noise when t == 0
973
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
974
+
975
+ if return_codebook_ids:
976
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
977
+ if return_x0:
978
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
979
+ else:
980
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
981
+
982
+ @torch.no_grad()
983
+ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
984
+ img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
985
+ score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
986
+ log_every_t=None):
987
+ if not log_every_t:
988
+ log_every_t = self.log_every_t
989
+ timesteps = self.num_timesteps
990
+ if batch_size is not None:
991
+ b = batch_size if batch_size is not None else shape[0]
992
+ shape = [batch_size] + list(shape)
993
+ else:
994
+ b = batch_size = shape[0]
995
+ if x_T is None:
996
+ img = torch.randn(shape, device=self.device)
997
+ else:
998
+ img = x_T
999
+ intermediates = []
1000
+ if cond is not None:
1001
+ if isinstance(cond, dict):
1002
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1003
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1004
+ else:
1005
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1006
+
1007
+ if start_T is not None:
1008
+ timesteps = min(timesteps, start_T)
1009
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1010
+ total=timesteps) if verbose else reversed(
1011
+ range(0, timesteps))
1012
+ if type(temperature) == float:
1013
+ temperature = [temperature] * timesteps
1014
+
1015
+ for i in iterator:
1016
+ ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1017
+ if self.shorten_cond_schedule:
1018
+ assert self.model.conditioning_key != 'hybrid'
1019
+ tc = self.cond_ids[ts].to(cond.device)
1020
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1021
+
1022
+ img, x0_partial = self.p_sample(img, cond, ts,
1023
+ clip_denoised=self.clip_denoised,
1024
+ quantize_denoised=quantize_denoised, return_x0=True,
1025
+ temperature=temperature[i], noise_dropout=noise_dropout,
1026
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1027
+ if mask is not None:
1028
+ assert x0 is not None
1029
+ img_orig = self.q_sample(x0, ts)
1030
+ img = img_orig * mask + (1. - mask) * img
1031
+
1032
+ if i % log_every_t == 0 or i == timesteps - 1:
1033
+ intermediates.append(x0_partial)
1034
+ if callback: callback(i)
1035
+ if img_callback: img_callback(img, i)
1036
+ return img, intermediates
1037
+
1038
+ @torch.no_grad()
1039
+ def p_sample_loop(self, cond, shape, return_intermediates=False,
1040
+ x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1041
+ mask=None, x0=None, img_callback=None, start_T=None,
1042
+ log_every_t=None):
1043
+
1044
+ if not log_every_t:
1045
+ log_every_t = self.log_every_t
1046
+ device = self.betas.device
1047
+ b = shape[0]
1048
+ if x_T is None:
1049
+ img = torch.randn(shape, device=device)
1050
+ else:
1051
+ img = x_T
1052
+
1053
+ intermediates = [img]
1054
+ if timesteps is None:
1055
+ timesteps = self.num_timesteps
1056
+
1057
+ if start_T is not None:
1058
+ timesteps = min(timesteps, start_T)
1059
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1060
+ range(0, timesteps))
1061
+
1062
+ if mask is not None:
1063
+ assert x0 is not None
1064
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1065
+
1066
+ for i in iterator:
1067
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
1068
+ if self.shorten_cond_schedule:
1069
+ assert self.model.conditioning_key != 'hybrid'
1070
+ tc = self.cond_ids[ts].to(cond.device)
1071
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1072
+
1073
+ img = self.p_sample(img, cond, ts,
1074
+ clip_denoised=self.clip_denoised,
1075
+ quantize_denoised=quantize_denoised)
1076
+ if mask is not None:
1077
+ img_orig = self.q_sample(x0, ts)
1078
+ img = img_orig * mask + (1. - mask) * img
1079
+
1080
+ if i % log_every_t == 0 or i == timesteps - 1:
1081
+ intermediates.append(img)
1082
+ if callback: callback(i)
1083
+ if img_callback: img_callback(img, i)
1084
+
1085
+ if return_intermediates:
1086
+ return img, intermediates
1087
+ return img
1088
+
1089
+ @torch.no_grad()
1090
+ def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1091
+ verbose=True, timesteps=None, quantize_denoised=False,
1092
+ mask=None, x0=None, shape=None, **kwargs):
1093
+ if shape is None:
1094
+ shape = (batch_size, self.channels, self.image_size, self.image_size)
1095
+ if cond is not None:
1096
+ if isinstance(cond, dict):
1097
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1098
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1099
+ else:
1100
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1101
+ return self.p_sample_loop(cond,
1102
+ shape,
1103
+ return_intermediates=return_intermediates, x_T=x_T,
1104
+ verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1105
+ mask=mask, x0=x0)
1106
+
1107
+ @torch.no_grad()
1108
+ def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
1109
+ if ddim:
1110
+ ddim_sampler = DDIMSampler(self)
1111
+ shape = (self.channels, self.image_size, self.image_size)
1112
+ samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
1113
+ shape, cond, verbose=False, **kwargs)
1114
+
1115
+ else:
1116
+ samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1117
+ return_intermediates=True, **kwargs)
1118
+
1119
+ return samples, intermediates
1120
+
1121
+ @torch.no_grad()
1122
+ def get_unconditional_conditioning(self, batch_size, null_label=None):
1123
+ if null_label is not None:
1124
+ xc = null_label
1125
+ if isinstance(xc, ListConfig):
1126
+ xc = list(xc)
1127
+ if isinstance(xc, dict) or isinstance(xc, list):
1128
+ c = self.get_learned_conditioning(xc)
1129
+ else:
1130
+ if hasattr(xc, "to"):
1131
+ xc = xc.to(self.device)
1132
+ c = self.get_learned_conditioning(xc)
1133
+ else:
1134
+ if self.cond_stage_key in ["class_label", "cls"]:
1135
+ xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
1136
+ return self.get_learned_conditioning(xc)
1137
+ else:
1138
+ raise NotImplementedError("todo")
1139
+ if isinstance(c, list): # in case the encoder gives us a list
1140
+ for i in range(len(c)):
1141
+ c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
1142
+ else:
1143
+ c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
1144
+ return c
1145
+
1146
+ @torch.no_grad()
1147
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
1148
+ quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1149
+ plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1150
+ use_ema_scope=True,
1151
+ **kwargs):
1152
+ ema_scope = self.ema_scope if use_ema_scope else nullcontext
1153
+ use_ddim = ddim_steps is not None
1154
+
1155
+ log = dict()
1156
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1157
+ return_first_stage_outputs=True,
1158
+ force_c_encode=True,
1159
+ return_original_cond=True,
1160
+ bs=N)
1161
+ N = min(x.shape[0], N)
1162
+ n_row = min(x.shape[0], n_row)
1163
+ log["inputs"] = x
1164
+ log["reconstruction"] = xrec
1165
+ if self.model.conditioning_key is not None:
1166
+ if hasattr(self.cond_stage_model, "decode"):
1167
+ xc = self.cond_stage_model.decode(c)
1168
+ log["conditioning"] = xc
1169
+ elif self.cond_stage_key in ["caption", "txt"]:
1170
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1171
+ log["conditioning"] = xc
1172
+ elif self.cond_stage_key in ['class_label', "cls"]:
1173
+ try:
1174
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1175
+ log['conditioning'] = xc
1176
+ except KeyError:
1177
+ # probably no "human_label" in batch
1178
+ pass
1179
+ elif isimage(xc):
1180
+ log["conditioning"] = xc
1181
+ if ismap(xc):
1182
+ log["original_conditioning"] = self.to_rgb(xc)
1183
+
1184
+ if plot_diffusion_rows:
1185
+ # get diffusion row
1186
+ diffusion_row = list()
1187
+ z_start = z[:n_row]
1188
+ for t in range(self.num_timesteps):
1189
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1190
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1191
+ t = t.to(self.device).long()
1192
+ noise = torch.randn_like(z_start)
1193
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1194
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1195
+
1196
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1197
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1198
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1199
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1200
+ log["diffusion_row"] = diffusion_grid
1201
+
1202
+ if sample:
1203
+ # get denoise row
1204
+ with ema_scope("Sampling"):
1205
+ samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1206
+ ddim_steps=ddim_steps, eta=ddim_eta)
1207
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1208
+ x_samples = self.decode_first_stage(samples)
1209
+ log["samples"] = x_samples
1210
+ if plot_denoise_rows:
1211
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1212
+ log["denoise_row"] = denoise_grid
1213
+
1214
+ if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1215
+ self.first_stage_model, IdentityFirstStage):
1216
+ # also display when quantizing x0 while sampling
1217
+ with ema_scope("Plotting Quantized Denoised"):
1218
+ samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1219
+ ddim_steps=ddim_steps, eta=ddim_eta,
1220
+ quantize_denoised=True)
1221
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1222
+ # quantize_denoised=True)
1223
+ x_samples = self.decode_first_stage(samples.to(self.device))
1224
+ log["samples_x0_quantized"] = x_samples
1225
+
1226
+ if unconditional_guidance_scale > 1.0:
1227
+ uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1228
+ if self.model.conditioning_key == "crossattn-adm":
1229
+ uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
1230
+ with ema_scope("Sampling with classifier-free guidance"):
1231
+ samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1232
+ ddim_steps=ddim_steps, eta=ddim_eta,
1233
+ unconditional_guidance_scale=unconditional_guidance_scale,
1234
+ unconditional_conditioning=uc,
1235
+ )
1236
+ x_samples_cfg = self.decode_first_stage(samples_cfg)
1237
+ log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1238
+
1239
+ if inpaint:
1240
+ # make a simple center square
1241
+ b, h, w = z.shape[0], z.shape[2], z.shape[3]
1242
+ mask = torch.ones(N, h, w).to(self.device)
1243
+ # zeros will be filled in
1244
+ mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1245
+ mask = mask[:, None, ...]
1246
+ with ema_scope("Plotting Inpaint"):
1247
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1248
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1249
+ x_samples = self.decode_first_stage(samples.to(self.device))
1250
+ log["samples_inpainting"] = x_samples
1251
+ log["mask"] = mask
1252
+
1253
+ # outpaint
1254
+ mask = 1. - mask
1255
+ with ema_scope("Plotting Outpaint"):
1256
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1257
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1258
+ x_samples = self.decode_first_stage(samples.to(self.device))
1259
+ log["samples_outpainting"] = x_samples
1260
+
1261
+ if plot_progressive_rows:
1262
+ with ema_scope("Plotting Progressives"):
1263
+ img, progressives = self.progressive_denoising(c,
1264
+ shape=(self.channels, self.image_size, self.image_size),
1265
+ batch_size=N)
1266
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1267
+ log["progressive_row"] = prog_row
1268
+
1269
+ if return_keys:
1270
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1271
+ return log
1272
+ else:
1273
+ return {key: log[key] for key in return_keys}
1274
+ return log
1275
+
1276
+ def configure_optimizers(self):
1277
+ lr = self.learning_rate
1278
+ params = list(self.model.parameters())
1279
+ if self.cond_stage_trainable:
1280
+ print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1281
+ params = params + list(self.cond_stage_model.parameters())
1282
+ if self.learn_logvar:
1283
+ print('Diffusion model optimizing logvar')
1284
+ params.append(self.logvar)
1285
+ opt = torch.optim.AdamW(params, lr=lr)
1286
+ if self.use_scheduler:
1287
+ assert 'target' in self.scheduler_config
1288
+ scheduler = instantiate_from_config(self.scheduler_config)
1289
+
1290
+ print("Setting up LambdaLR scheduler...")
1291
+ scheduler = [
1292
+ {
1293
+ 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1294
+ 'interval': 'step',
1295
+ 'frequency': 1
1296
+ }]
1297
+ return [opt], scheduler
1298
+ return opt
1299
+
1300
+ @torch.no_grad()
1301
+ def to_rgb(self, x):
1302
+ x = x.float()
1303
+ if not hasattr(self, "colorize"):
1304
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1305
+ x = nn.functional.conv2d(x, weight=self.colorize)
1306
+ x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1307
+ return x
1308
+
1309
+
1310
+ class DiffusionWrapper(pl.LightningModule):
1311
+ def __init__(self, diff_model_config, conditioning_key):
1312
+ super().__init__()
1313
+ self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False)
1314
+ self.diffusion_model = instantiate_from_config(diff_model_config)
1315
+ self.conditioning_key = conditioning_key
1316
+ assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
1317
+
1318
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None):
1319
+ if self.conditioning_key is None:
1320
+ out = self.diffusion_model(x, t)
1321
+ elif self.conditioning_key == 'concat':
1322
+ xc = torch.cat([x] + c_concat, dim=1)
1323
+ out = self.diffusion_model(xc, t)
1324
+ elif self.conditioning_key == 'crossattn':
1325
+ if not self.sequential_cross_attn:
1326
+ cc = torch.cat(c_crossattn, 1)
1327
+ else:
1328
+ cc = c_crossattn
1329
+ out = self.diffusion_model(x, t, context=cc)
1330
+ elif self.conditioning_key == 'hybrid':
1331
+ xc = torch.cat([x] + c_concat, dim=1)
1332
+ cc = torch.cat(c_crossattn, 1)
1333
+ out = self.diffusion_model(xc, t, context=cc)
1334
+ elif self.conditioning_key == 'hybrid-adm':
1335
+ assert c_adm is not None
1336
+ xc = torch.cat([x] + c_concat, dim=1)
1337
+ cc = torch.cat(c_crossattn, 1)
1338
+ out = self.diffusion_model(xc, t, context=cc, y=c_adm)
1339
+ elif self.conditioning_key == 'crossattn-adm':
1340
+ assert c_adm is not None
1341
+ cc = torch.cat(c_crossattn, 1)
1342
+ out = self.diffusion_model(x, t, context=cc, y=c_adm)
1343
+ elif self.conditioning_key == 'adm':
1344
+ cc = c_crossattn[0]
1345
+ out = self.diffusion_model(x, t, y=cc)
1346
+ else:
1347
+ raise NotImplementedError()
1348
+
1349
+ return out
1350
+
1351
+
1352
+ class LatentUpscaleDiffusion(LatentDiffusion):
1353
+ def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs):
1354
+ super().__init__(*args, **kwargs)
1355
+ # assumes that neither the cond_stage nor the low_scale_model contain trainable params
1356
+ assert not self.cond_stage_trainable
1357
+ self.instantiate_low_stage(low_scale_config)
1358
+ self.low_scale_key = low_scale_key
1359
+ self.noise_level_key = noise_level_key
1360
+
1361
+ def instantiate_low_stage(self, config):
1362
+ model = instantiate_from_config(config)
1363
+ self.low_scale_model = model.eval()
1364
+ self.low_scale_model.train = disabled_train
1365
+ for param in self.low_scale_model.parameters():
1366
+ param.requires_grad = False
1367
+
1368
+ @torch.no_grad()
1369
+ def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False):
1370
+ if not log_mode:
1371
+ z, c = super().get_input(batch, k, force_c_encode=True, bs=bs)
1372
+ else:
1373
+ z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1374
+ force_c_encode=True, return_original_cond=True, bs=bs)
1375
+ x_low = batch[self.low_scale_key][:bs]
1376
+ x_low = rearrange(x_low, 'b h w c -> b c h w')
1377
+ x_low = x_low.to(memory_format=torch.contiguous_format).float()
1378
+ zx, noise_level = self.low_scale_model(x_low)
1379
+ if self.noise_level_key is not None:
1380
+ # get noise level from batch instead, e.g. when extracting a custom noise level for bsr
1381
+ raise NotImplementedError('TODO')
1382
+
1383
+ all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level}
1384
+ if log_mode:
1385
+ # TODO: maybe disable if too expensive
1386
+ x_low_rec = self.low_scale_model.decode(zx)
1387
+ return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level
1388
+ return z, all_conds
1389
+
1390
+ @torch.no_grad()
1391
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1392
+ plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True,
1393
+ unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True,
1394
+ **kwargs):
1395
+ ema_scope = self.ema_scope if use_ema_scope else nullcontext
1396
+ use_ddim = ddim_steps is not None
1397
+
1398
+ log = dict()
1399
+ z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N,
1400
+ log_mode=True)
1401
+ N = min(x.shape[0], N)
1402
+ n_row = min(x.shape[0], n_row)
1403
+ log["inputs"] = x
1404
+ log["reconstruction"] = xrec
1405
+ log["x_lr"] = x_low
1406
+ log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec
1407
+ if self.model.conditioning_key is not None:
1408
+ if hasattr(self.cond_stage_model, "decode"):
1409
+ xc = self.cond_stage_model.decode(c)
1410
+ log["conditioning"] = xc
1411
+ elif self.cond_stage_key in ["caption", "txt"]:
1412
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1413
+ log["conditioning"] = xc
1414
+ elif self.cond_stage_key in ['class_label', 'cls']:
1415
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1416
+ log['conditioning'] = xc
1417
+ elif isimage(xc):
1418
+ log["conditioning"] = xc
1419
+ if ismap(xc):
1420
+ log["original_conditioning"] = self.to_rgb(xc)
1421
+
1422
+ if plot_diffusion_rows:
1423
+ # get diffusion row
1424
+ diffusion_row = list()
1425
+ z_start = z[:n_row]
1426
+ for t in range(self.num_timesteps):
1427
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1428
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1429
+ t = t.to(self.device).long()
1430
+ noise = torch.randn_like(z_start)
1431
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1432
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1433
+
1434
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1435
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1436
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1437
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1438
+ log["diffusion_row"] = diffusion_grid
1439
+
1440
+ if sample:
1441
+ # get denoise row
1442
+ with ema_scope("Sampling"):
1443
+ samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1444
+ ddim_steps=ddim_steps, eta=ddim_eta)
1445
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1446
+ x_samples = self.decode_first_stage(samples)
1447
+ log["samples"] = x_samples
1448
+ if plot_denoise_rows:
1449
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1450
+ log["denoise_row"] = denoise_grid
1451
+
1452
+ if unconditional_guidance_scale > 1.0:
1453
+ uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1454
+ # TODO explore better "unconditional" choices for the other keys
1455
+ # maybe guide away from empty text label and highest noise level and maximally degraded zx?
1456
+ uc = dict()
1457
+ for k in c:
1458
+ if k == "c_crossattn":
1459
+ assert isinstance(c[k], list) and len(c[k]) == 1
1460
+ uc[k] = [uc_tmp]
1461
+ elif k == "c_adm": # todo: only run with text-based guidance?
1462
+ assert isinstance(c[k], torch.Tensor)
1463
+ #uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level
1464
+ uc[k] = c[k]
1465
+ elif isinstance(c[k], list):
1466
+ uc[k] = [c[k][i] for i in range(len(c[k]))]
1467
+ else:
1468
+ uc[k] = c[k]
1469
+
1470
+ with ema_scope("Sampling with classifier-free guidance"):
1471
+ samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1472
+ ddim_steps=ddim_steps, eta=ddim_eta,
1473
+ unconditional_guidance_scale=unconditional_guidance_scale,
1474
+ unconditional_conditioning=uc,
1475
+ )
1476
+ x_samples_cfg = self.decode_first_stage(samples_cfg)
1477
+ log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1478
+
1479
+ if plot_progressive_rows:
1480
+ with ema_scope("Plotting Progressives"):
1481
+ img, progressives = self.progressive_denoising(c,
1482
+ shape=(self.channels, self.image_size, self.image_size),
1483
+ batch_size=N)
1484
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1485
+ log["progressive_row"] = prog_row
1486
+
1487
+ return log
1488
+
1489
+
1490
+ class LatentFinetuneDiffusion(LatentDiffusion):
1491
+ """
1492
+ Basis for different finetunas, such as inpainting or depth2image
1493
+ To disable finetuning mode, set finetune_keys to None
1494
+ """
1495
+
1496
+ def __init__(self,
1497
+ concat_keys: tuple,
1498
+ finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
1499
+ "model_ema.diffusion_modelinput_blocks00weight"
1500
+ ),
1501
+ keep_finetune_dims=4,
1502
+ # if model was trained without concat mode before and we would like to keep these channels
1503
+ c_concat_log_start=None, # to log reconstruction of c_concat codes
1504
+ c_concat_log_end=None,
1505
+ *args, **kwargs
1506
+ ):
1507
+ ckpt_path = kwargs.pop("ckpt_path", None)
1508
+ ignore_keys = kwargs.pop("ignore_keys", list())
1509
+ super().__init__(*args, **kwargs)
1510
+ self.finetune_keys = finetune_keys
1511
+ self.concat_keys = concat_keys
1512
+ self.keep_dims = keep_finetune_dims
1513
+ self.c_concat_log_start = c_concat_log_start
1514
+ self.c_concat_log_end = c_concat_log_end
1515
+ if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
1516
+ if exists(ckpt_path):
1517
+ self.init_from_ckpt(ckpt_path, ignore_keys)
1518
+
1519
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
1520
+ sd = torch.load(path, map_location="cpu")
1521
+ if "state_dict" in list(sd.keys()):
1522
+ sd = sd["state_dict"]
1523
+ keys = list(sd.keys())
1524
+ for k in keys:
1525
+ for ik in ignore_keys:
1526
+ if k.startswith(ik):
1527
+ print("Deleting key {} from state_dict.".format(k))
1528
+ del sd[k]
1529
+
1530
+ # make it explicit, finetune by including extra input channels
1531
+ if exists(self.finetune_keys) and k in self.finetune_keys:
1532
+ new_entry = None
1533
+ for name, param in self.named_parameters():
1534
+ if name in self.finetune_keys:
1535
+ print(
1536
+ f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
1537
+ new_entry = torch.zeros_like(param) # zero init
1538
+ assert exists(new_entry), 'did not find matching parameter to modify'
1539
+ new_entry[:, :self.keep_dims, ...] = sd[k]
1540
+ sd[k] = new_entry
1541
+
1542
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
1543
+ sd, strict=False)
1544
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
1545
+ if len(missing) > 0:
1546
+ print(f"Missing Keys: {missing}")
1547
+ if len(unexpected) > 0:
1548
+ print(f"Unexpected Keys: {unexpected}")
1549
+
1550
+ @torch.no_grad()
1551
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1552
+ quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1553
+ plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1554
+ use_ema_scope=True,
1555
+ **kwargs):
1556
+ ema_scope = self.ema_scope if use_ema_scope else nullcontext
1557
+ use_ddim = ddim_steps is not None
1558
+
1559
+ log = dict()
1560
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
1561
+ c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
1562
+ N = min(x.shape[0], N)
1563
+ n_row = min(x.shape[0], n_row)
1564
+ log["inputs"] = x
1565
+ log["reconstruction"] = xrec
1566
+ if self.model.conditioning_key is not None:
1567
+ if hasattr(self.cond_stage_model, "decode"):
1568
+ xc = self.cond_stage_model.decode(c)
1569
+ log["conditioning"] = xc
1570
+ elif self.cond_stage_key in ["caption", "txt"]:
1571
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1572
+ log["conditioning"] = xc
1573
+ elif self.cond_stage_key in ['class_label', 'cls']:
1574
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1575
+ log['conditioning'] = xc
1576
+ elif isimage(xc):
1577
+ log["conditioning"] = xc
1578
+ if ismap(xc):
1579
+ log["original_conditioning"] = self.to_rgb(xc)
1580
+
1581
+ if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
1582
+ log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
1583
+
1584
+ if plot_diffusion_rows:
1585
+ # get diffusion row
1586
+ diffusion_row = list()
1587
+ z_start = z[:n_row]
1588
+ for t in range(self.num_timesteps):
1589
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1590
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1591
+ t = t.to(self.device).long()
1592
+ noise = torch.randn_like(z_start)
1593
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1594
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1595
+
1596
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1597
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1598
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1599
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1600
+ log["diffusion_row"] = diffusion_grid
1601
+
1602
+ if sample:
1603
+ # get denoise row
1604
+ with ema_scope("Sampling"):
1605
+ samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1606
+ batch_size=N, ddim=use_ddim,
1607
+ ddim_steps=ddim_steps, eta=ddim_eta)
1608
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1609
+ x_samples = self.decode_first_stage(samples)
1610
+ log["samples"] = x_samples
1611
+ if plot_denoise_rows:
1612
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1613
+ log["denoise_row"] = denoise_grid
1614
+
1615
+ if unconditional_guidance_scale > 1.0:
1616
+ uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1617
+ uc_cat = c_cat
1618
+ uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
1619
+ with ema_scope("Sampling with classifier-free guidance"):
1620
+ samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1621
+ batch_size=N, ddim=use_ddim,
1622
+ ddim_steps=ddim_steps, eta=ddim_eta,
1623
+ unconditional_guidance_scale=unconditional_guidance_scale,
1624
+ unconditional_conditioning=uc_full,
1625
+ )
1626
+ x_samples_cfg = self.decode_first_stage(samples_cfg)
1627
+ log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1628
+
1629
+ return log
1630
+
1631
+
1632
+ class LatentInpaintDiffusion(LatentFinetuneDiffusion):
1633
+ """
1634
+ can either run as pure inpainting model (only concat mode) or with mixed conditionings,
1635
+ e.g. mask as concat and text via cross-attn.
1636
+ To disable finetuning mode, set finetune_keys to None
1637
+ """
1638
+
1639
+ def __init__(self,
1640
+ concat_keys=("mask", "masked_image"),
1641
+ masked_image_key="masked_image",
1642
+ *args, **kwargs
1643
+ ):
1644
+ super().__init__(concat_keys, *args, **kwargs)
1645
+ self.masked_image_key = masked_image_key
1646
+ assert self.masked_image_key in concat_keys
1647
+
1648
+ @torch.no_grad()
1649
+ def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1650
+ # note: restricted to non-trainable encoders currently
1651
+ assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
1652
+ z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1653
+ force_c_encode=True, return_original_cond=True, bs=bs)
1654
+
1655
+ assert exists(self.concat_keys)
1656
+ c_cat = list()
1657
+ for ck in self.concat_keys:
1658
+ cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1659
+ if bs is not None:
1660
+ cc = cc[:bs]
1661
+ cc = cc.to(self.device)
1662
+ bchw = z.shape
1663
+ if ck != self.masked_image_key:
1664
+ cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
1665
+ else:
1666
+ cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
1667
+ c_cat.append(cc)
1668
+ c_cat = torch.cat(c_cat, dim=1)
1669
+ all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1670
+ if return_first_stage_outputs:
1671
+ return z, all_conds, x, xrec, xc
1672
+ return z, all_conds
1673
+
1674
+ @torch.no_grad()
1675
+ def log_images(self, *args, **kwargs):
1676
+ log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
1677
+ log["masked_image"] = rearrange(args[0]["masked_image"],
1678
+ 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1679
+ return log
1680
+
1681
+
1682
+ class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion):
1683
+ """
1684
+ condition on monocular depth estimation
1685
+ """
1686
+
1687
+ def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs):
1688
+ super().__init__(concat_keys=concat_keys, *args, **kwargs)
1689
+ self.depth_model = instantiate_from_config(depth_stage_config)
1690
+ self.depth_stage_key = concat_keys[0]
1691
+
1692
+ @torch.no_grad()
1693
+ def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1694
+ # note: restricted to non-trainable encoders currently
1695
+ assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img'
1696
+ z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1697
+ force_c_encode=True, return_original_cond=True, bs=bs)
1698
+
1699
+ assert exists(self.concat_keys)
1700
+ assert len(self.concat_keys) == 1
1701
+ c_cat = list()
1702
+ for ck in self.concat_keys:
1703
+ cc = batch[ck]
1704
+ if bs is not None:
1705
+ cc = cc[:bs]
1706
+ cc = cc.to(self.device)
1707
+ cc = self.depth_model(cc)
1708
+ cc = torch.nn.functional.interpolate(
1709
+ cc,
1710
+ size=z.shape[2:],
1711
+ mode="bicubic",
1712
+ align_corners=False,
1713
+ )
1714
+
1715
+ depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
1716
+ keepdim=True)
1717
+ cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.
1718
+ c_cat.append(cc)
1719
+ c_cat = torch.cat(c_cat, dim=1)
1720
+ all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1721
+ if return_first_stage_outputs:
1722
+ return z, all_conds, x, xrec, xc
1723
+ return z, all_conds
1724
+
1725
+ @torch.no_grad()
1726
+ def log_images(self, *args, **kwargs):
1727
+ log = super().log_images(*args, **kwargs)
1728
+ depth = self.depth_model(args[0][self.depth_stage_key])
1729
+ depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \
1730
+ torch.amax(depth, dim=[1, 2, 3], keepdim=True)
1731
+ log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1.
1732
+ return log
1733
+
1734
+
1735
+ class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion):
1736
+ """
1737
+ condition on low-res image (and optionally on some spatial noise augmentation)
1738
+ """
1739
+ def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None,
1740
+ low_scale_config=None, low_scale_key=None, *args, **kwargs):
1741
+ super().__init__(concat_keys=concat_keys, *args, **kwargs)
1742
+ self.reshuffle_patch_size = reshuffle_patch_size
1743
+ self.low_scale_model = None
1744
+ if low_scale_config is not None:
1745
+ print("Initializing a low-scale model")
1746
+ assert exists(low_scale_key)
1747
+ self.instantiate_low_stage(low_scale_config)
1748
+ self.low_scale_key = low_scale_key
1749
+
1750
+ def instantiate_low_stage(self, config):
1751
+ model = instantiate_from_config(config)
1752
+ self.low_scale_model = model.eval()
1753
+ self.low_scale_model.train = disabled_train
1754
+ for param in self.low_scale_model.parameters():
1755
+ param.requires_grad = False
1756
+
1757
+ @torch.no_grad()
1758
+ def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1759
+ # note: restricted to non-trainable encoders currently
1760
+ assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft'
1761
+ z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1762
+ force_c_encode=True, return_original_cond=True, bs=bs)
1763
+
1764
+ assert exists(self.concat_keys)
1765
+ assert len(self.concat_keys) == 1
1766
+ # optionally make spatial noise_level here
1767
+ c_cat = list()
1768
+ noise_level = None
1769
+ for ck in self.concat_keys:
1770
+ cc = batch[ck]
1771
+ cc = rearrange(cc, 'b h w c -> b c h w')
1772
+ if exists(self.reshuffle_patch_size):
1773
+ assert isinstance(self.reshuffle_patch_size, int)
1774
+ cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',
1775
+ p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size)
1776
+ if bs is not None:
1777
+ cc = cc[:bs]
1778
+ cc = cc.to(self.device)
1779
+ if exists(self.low_scale_model) and ck == self.low_scale_key:
1780
+ cc, noise_level = self.low_scale_model(cc)
1781
+ c_cat.append(cc)
1782
+ c_cat = torch.cat(c_cat, dim=1)
1783
+ if exists(noise_level):
1784
+ all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level}
1785
+ else:
1786
+ all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1787
+ if return_first_stage_outputs:
1788
+ return z, all_conds, x, xrec, xc
1789
+ return z, all_conds
1790
+
1791
+ @torch.no_grad()
1792
+ def log_images(self, *args, **kwargs):
1793
+ log = super().log_images(*args, **kwargs)
1794
+ log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w')
1795
+ return log
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .sampler import DPMSolverSampler
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/dpm_solver.py ADDED
@@ -0,0 +1,1154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import math
4
+ from tqdm import tqdm
5
+
6
+
7
+ class NoiseScheduleVP:
8
+ def __init__(
9
+ self,
10
+ schedule='discrete',
11
+ betas=None,
12
+ alphas_cumprod=None,
13
+ continuous_beta_0=0.1,
14
+ continuous_beta_1=20.,
15
+ ):
16
+ """Create a wrapper class for the forward SDE (VP type).
17
+ ***
18
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
+ ***
21
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
22
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
23
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
24
+ log_alpha_t = self.marginal_log_mean_coeff(t)
25
+ sigma_t = self.marginal_std(t)
26
+ lambda_t = self.marginal_lambda(t)
27
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
28
+ t = self.inverse_lambda(lambda_t)
29
+ ===============================================================
30
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
31
+ 1. For discrete-time DPMs:
32
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
33
+ t_i = (i + 1) / N
34
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
35
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
36
+ Args:
37
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
38
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
39
+ Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
40
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
41
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
42
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
43
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
44
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
45
+ and
46
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
47
+ 2. For continuous-time DPMs:
48
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
49
+ schedule are the default settings in DDPM and improved-DDPM:
50
+ Args:
51
+ beta_min: A `float` number. The smallest beta for the linear schedule.
52
+ beta_max: A `float` number. The largest beta for the linear schedule.
53
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
54
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
55
+ T: A `float` number. The ending time of the forward process.
56
+ ===============================================================
57
+ Args:
58
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
59
+ 'linear' or 'cosine' for continuous-time DPMs.
60
+ Returns:
61
+ A wrapper object of the forward SDE (VP type).
62
+
63
+ ===============================================================
64
+ Example:
65
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
66
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
67
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
68
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
69
+ # For continuous-time DPMs (VPSDE), linear schedule:
70
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
71
+ """
72
+
73
+ if schedule not in ['discrete', 'linear', 'cosine']:
74
+ raise ValueError(
75
+ "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
76
+ schedule))
77
+
78
+ self.schedule = schedule
79
+ if schedule == 'discrete':
80
+ if betas is not None:
81
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
82
+ else:
83
+ assert alphas_cumprod is not None
84
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
85
+ self.total_N = len(log_alphas)
86
+ self.T = 1.
87
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
88
+ self.log_alpha_array = log_alphas.reshape((1, -1,))
89
+ else:
90
+ self.total_N = 1000
91
+ self.beta_0 = continuous_beta_0
92
+ self.beta_1 = continuous_beta_1
93
+ self.cosine_s = 0.008
94
+ self.cosine_beta_max = 999.
95
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
96
+ 1. + self.cosine_s) / math.pi - self.cosine_s
97
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
98
+ self.schedule = schedule
99
+ if schedule == 'cosine':
100
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
101
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
102
+ self.T = 0.9946
103
+ else:
104
+ self.T = 1.
105
+
106
+ def marginal_log_mean_coeff(self, t):
107
+ """
108
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
109
+ """
110
+ if self.schedule == 'discrete':
111
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
112
+ self.log_alpha_array.to(t.device)).reshape((-1))
113
+ elif self.schedule == 'linear':
114
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
115
+ elif self.schedule == 'cosine':
116
+ log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
117
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
118
+ return log_alpha_t
119
+
120
+ def marginal_alpha(self, t):
121
+ """
122
+ Compute alpha_t of a given continuous-time label t in [0, T].
123
+ """
124
+ return torch.exp(self.marginal_log_mean_coeff(t))
125
+
126
+ def marginal_std(self, t):
127
+ """
128
+ Compute sigma_t of a given continuous-time label t in [0, T].
129
+ """
130
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
131
+
132
+ def marginal_lambda(self, t):
133
+ """
134
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
135
+ """
136
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
137
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
138
+ return log_mean_coeff - log_std
139
+
140
+ def inverse_lambda(self, lamb):
141
+ """
142
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
143
+ """
144
+ if self.schedule == 'linear':
145
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
146
+ Delta = self.beta_0 ** 2 + tmp
147
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
148
+ elif self.schedule == 'discrete':
149
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
150
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
151
+ torch.flip(self.t_array.to(lamb.device), [1]))
152
+ return t.reshape((-1,))
153
+ else:
154
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
155
+ t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
156
+ 1. + self.cosine_s) / math.pi - self.cosine_s
157
+ t = t_fn(log_alpha)
158
+ return t
159
+
160
+
161
+ def model_wrapper(
162
+ model,
163
+ noise_schedule,
164
+ model_type="noise",
165
+ model_kwargs={},
166
+ guidance_type="uncond",
167
+ condition=None,
168
+ unconditional_condition=None,
169
+ guidance_scale=1.,
170
+ classifier_fn=None,
171
+ classifier_kwargs={},
172
+ ):
173
+ """Create a wrapper function for the noise prediction model.
174
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
175
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
176
+ We support four types of the diffusion model by setting `model_type`:
177
+ 1. "noise": noise prediction model. (Trained by predicting noise).
178
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
179
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
180
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
181
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
182
+ arXiv preprint arXiv:2202.00512 (2022).
183
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
184
+ arXiv preprint arXiv:2210.02303 (2022).
185
+
186
+ 4. "score": marginal score function. (Trained by denoising score matching).
187
+ Note that the score function and the noise prediction model follows a simple relationship:
188
+ ```
189
+ noise(x_t, t) = -sigma_t * score(x_t, t)
190
+ ```
191
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
192
+ 1. "uncond": unconditional sampling by DPMs.
193
+ The input `model` has the following format:
194
+ ``
195
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
196
+ ``
197
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
198
+ The input `model` has the following format:
199
+ ``
200
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
201
+ ``
202
+ The input `classifier_fn` has the following format:
203
+ ``
204
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
205
+ ``
206
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
207
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
208
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
209
+ The input `model` has the following format:
210
+ ``
211
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
212
+ ``
213
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
214
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
215
+ arXiv preprint arXiv:2207.12598 (2022).
216
+
217
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
218
+ or continuous-time labels (i.e. epsilon to T).
219
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
220
+ ``
221
+ def model_fn(x, t_continuous) -> noise:
222
+ t_input = get_model_input_time(t_continuous)
223
+ return noise_pred(model, x, t_input, **model_kwargs)
224
+ ``
225
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
226
+ ===============================================================
227
+ Args:
228
+ model: A diffusion model with the corresponding format described above.
229
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
230
+ model_type: A `str`. The parameterization type of the diffusion model.
231
+ "noise" or "x_start" or "v" or "score".
232
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
233
+ guidance_type: A `str`. The type of the guidance for sampling.
234
+ "uncond" or "classifier" or "classifier-free".
235
+ condition: A pytorch tensor. The condition for the guided sampling.
236
+ Only used for "classifier" or "classifier-free" guidance type.
237
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
238
+ Only used for "classifier-free" guidance type.
239
+ guidance_scale: A `float`. The scale for the guided sampling.
240
+ classifier_fn: A classifier function. Only used for the classifier guidance.
241
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
242
+ Returns:
243
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
244
+ """
245
+
246
+ def get_model_input_time(t_continuous):
247
+ """
248
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
249
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
250
+ For continuous-time DPMs, we just use `t_continuous`.
251
+ """
252
+ if noise_schedule.schedule == 'discrete':
253
+ return (t_continuous - 1. / noise_schedule.total_N) * 1000.
254
+ else:
255
+ return t_continuous
256
+
257
+ def noise_pred_fn(x, t_continuous, cond=None):
258
+ if t_continuous.reshape((-1,)).shape[0] == 1:
259
+ t_continuous = t_continuous.expand((x.shape[0]))
260
+ t_input = get_model_input_time(t_continuous)
261
+ if cond is None:
262
+ output = model(x, t_input, **model_kwargs)
263
+ else:
264
+ output = model(x, t_input, cond, **model_kwargs)
265
+ if model_type == "noise":
266
+ return output
267
+ elif model_type == "x_start":
268
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
269
+ dims = x.dim()
270
+ return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
271
+ elif model_type == "v":
272
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
273
+ dims = x.dim()
274
+ return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
275
+ elif model_type == "score":
276
+ sigma_t = noise_schedule.marginal_std(t_continuous)
277
+ dims = x.dim()
278
+ return -expand_dims(sigma_t, dims) * output
279
+
280
+ def cond_grad_fn(x, t_input):
281
+ """
282
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
283
+ """
284
+ with torch.enable_grad():
285
+ x_in = x.detach().requires_grad_(True)
286
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
287
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
288
+
289
+ def model_fn(x, t_continuous):
290
+ """
291
+ The noise predicition model function that is used for DPM-Solver.
292
+ """
293
+ if t_continuous.reshape((-1,)).shape[0] == 1:
294
+ t_continuous = t_continuous.expand((x.shape[0]))
295
+ if guidance_type == "uncond":
296
+ return noise_pred_fn(x, t_continuous)
297
+ elif guidance_type == "classifier":
298
+ assert classifier_fn is not None
299
+ t_input = get_model_input_time(t_continuous)
300
+ cond_grad = cond_grad_fn(x, t_input)
301
+ sigma_t = noise_schedule.marginal_std(t_continuous)
302
+ noise = noise_pred_fn(x, t_continuous)
303
+ return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
304
+ elif guidance_type == "classifier-free":
305
+ if guidance_scale == 1. or unconditional_condition is None:
306
+ return noise_pred_fn(x, t_continuous, cond=condition)
307
+ else:
308
+ x_in = torch.cat([x] * 2)
309
+ t_in = torch.cat([t_continuous] * 2)
310
+ c_in = torch.cat([unconditional_condition, condition])
311
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
312
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
313
+
314
+ assert model_type in ["noise", "x_start", "v"]
315
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
316
+ return model_fn
317
+
318
+
319
+ class DPM_Solver:
320
+ def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
321
+ """Construct a DPM-Solver.
322
+ We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
323
+ If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
324
+ If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
325
+ In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
326
+ The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
327
+ Args:
328
+ model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
329
+ ``
330
+ def model_fn(x, t_continuous):
331
+ return noise
332
+ ``
333
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
334
+ predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
335
+ thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
336
+ max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
337
+
338
+ [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
339
+ """
340
+ self.model = model_fn
341
+ self.noise_schedule = noise_schedule
342
+ self.predict_x0 = predict_x0
343
+ self.thresholding = thresholding
344
+ self.max_val = max_val
345
+
346
+ def noise_prediction_fn(self, x, t):
347
+ """
348
+ Return the noise prediction model.
349
+ """
350
+ return self.model(x, t)
351
+
352
+ def data_prediction_fn(self, x, t):
353
+ """
354
+ Return the data prediction model (with thresholding).
355
+ """
356
+ noise = self.noise_prediction_fn(x, t)
357
+ dims = x.dim()
358
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
359
+ x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
360
+ if self.thresholding:
361
+ p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
362
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
363
+ s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
364
+ x0 = torch.clamp(x0, -s, s) / s
365
+ return x0
366
+
367
+ def model_fn(self, x, t):
368
+ """
369
+ Convert the model to the noise prediction model or the data prediction model.
370
+ """
371
+ if self.predict_x0:
372
+ return self.data_prediction_fn(x, t)
373
+ else:
374
+ return self.noise_prediction_fn(x, t)
375
+
376
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
377
+ """Compute the intermediate time steps for sampling.
378
+ Args:
379
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
380
+ - 'logSNR': uniform logSNR for the time steps.
381
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
382
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
383
+ t_T: A `float`. The starting time of the sampling (default is T).
384
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
385
+ N: A `int`. The total number of the spacing of the time steps.
386
+ device: A torch device.
387
+ Returns:
388
+ A pytorch tensor of the time steps, with the shape (N + 1,).
389
+ """
390
+ if skip_type == 'logSNR':
391
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
392
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
393
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
394
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
395
+ elif skip_type == 'time_uniform':
396
+ return torch.linspace(t_T, t_0, N + 1).to(device)
397
+ elif skip_type == 'time_quadratic':
398
+ t_order = 2
399
+ t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
400
+ return t
401
+ else:
402
+ raise ValueError(
403
+ "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
404
+
405
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
406
+ """
407
+ Get the order of each step for sampling by the singlestep DPM-Solver.
408
+ We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
409
+ Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
410
+ - If order == 1:
411
+ We take `steps` of DPM-Solver-1 (i.e. DDIM).
412
+ - If order == 2:
413
+ - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
414
+ - If steps % 2 == 0, we use K steps of DPM-Solver-2.
415
+ - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
416
+ - If order == 3:
417
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
418
+ - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
419
+ - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
420
+ - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
421
+ ============================================
422
+ Args:
423
+ order: A `int`. The max order for the solver (2 or 3).
424
+ steps: A `int`. The total number of function evaluations (NFE).
425
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
426
+ - 'logSNR': uniform logSNR for the time steps.
427
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
428
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
429
+ t_T: A `float`. The starting time of the sampling (default is T).
430
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
431
+ device: A torch device.
432
+ Returns:
433
+ orders: A list of the solver order of each step.
434
+ """
435
+ if order == 3:
436
+ K = steps // 3 + 1
437
+ if steps % 3 == 0:
438
+ orders = [3, ] * (K - 2) + [2, 1]
439
+ elif steps % 3 == 1:
440
+ orders = [3, ] * (K - 1) + [1]
441
+ else:
442
+ orders = [3, ] * (K - 1) + [2]
443
+ elif order == 2:
444
+ if steps % 2 == 0:
445
+ K = steps // 2
446
+ orders = [2, ] * K
447
+ else:
448
+ K = steps // 2 + 1
449
+ orders = [2, ] * (K - 1) + [1]
450
+ elif order == 1:
451
+ K = 1
452
+ orders = [1, ] * steps
453
+ else:
454
+ raise ValueError("'order' must be '1' or '2' or '3'.")
455
+ if skip_type == 'logSNR':
456
+ # To reproduce the results in DPM-Solver paper
457
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
458
+ else:
459
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
460
+ torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
461
+ return timesteps_outer, orders
462
+
463
+ def denoise_to_zero_fn(self, x, s):
464
+ """
465
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
466
+ """
467
+ return self.data_prediction_fn(x, s)
468
+
469
+ def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
470
+ """
471
+ DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
472
+ Args:
473
+ x: A pytorch tensor. The initial value at time `s`.
474
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
475
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
476
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
477
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
478
+ return_intermediate: A `bool`. If true, also return the model value at time `s`.
479
+ Returns:
480
+ x_t: A pytorch tensor. The approximated solution at time `t`.
481
+ """
482
+ ns = self.noise_schedule
483
+ dims = x.dim()
484
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
485
+ h = lambda_t - lambda_s
486
+ log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
487
+ sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
488
+ alpha_t = torch.exp(log_alpha_t)
489
+
490
+ if self.predict_x0:
491
+ phi_1 = torch.expm1(-h)
492
+ if model_s is None:
493
+ model_s = self.model_fn(x, s)
494
+ x_t = (
495
+ expand_dims(sigma_t / sigma_s, dims) * x
496
+ - expand_dims(alpha_t * phi_1, dims) * model_s
497
+ )
498
+ if return_intermediate:
499
+ return x_t, {'model_s': model_s}
500
+ else:
501
+ return x_t
502
+ else:
503
+ phi_1 = torch.expm1(h)
504
+ if model_s is None:
505
+ model_s = self.model_fn(x, s)
506
+ x_t = (
507
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
508
+ - expand_dims(sigma_t * phi_1, dims) * model_s
509
+ )
510
+ if return_intermediate:
511
+ return x_t, {'model_s': model_s}
512
+ else:
513
+ return x_t
514
+
515
+ def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
516
+ solver_type='dpm_solver'):
517
+ """
518
+ Singlestep solver DPM-Solver-2 from time `s` to time `t`.
519
+ Args:
520
+ x: A pytorch tensor. The initial value at time `s`.
521
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
522
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
523
+ r1: A `float`. The hyperparameter of the second-order solver.
524
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
525
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
526
+ return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
527
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
528
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
529
+ Returns:
530
+ x_t: A pytorch tensor. The approximated solution at time `t`.
531
+ """
532
+ if solver_type not in ['dpm_solver', 'taylor']:
533
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
534
+ if r1 is None:
535
+ r1 = 0.5
536
+ ns = self.noise_schedule
537
+ dims = x.dim()
538
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
539
+ h = lambda_t - lambda_s
540
+ lambda_s1 = lambda_s + r1 * h
541
+ s1 = ns.inverse_lambda(lambda_s1)
542
+ log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
543
+ s1), ns.marginal_log_mean_coeff(t)
544
+ sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
545
+ alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
546
+
547
+ if self.predict_x0:
548
+ phi_11 = torch.expm1(-r1 * h)
549
+ phi_1 = torch.expm1(-h)
550
+
551
+ if model_s is None:
552
+ model_s = self.model_fn(x, s)
553
+ x_s1 = (
554
+ expand_dims(sigma_s1 / sigma_s, dims) * x
555
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
556
+ )
557
+ model_s1 = self.model_fn(x_s1, s1)
558
+ if solver_type == 'dpm_solver':
559
+ x_t = (
560
+ expand_dims(sigma_t / sigma_s, dims) * x
561
+ - expand_dims(alpha_t * phi_1, dims) * model_s
562
+ - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
563
+ )
564
+ elif solver_type == 'taylor':
565
+ x_t = (
566
+ expand_dims(sigma_t / sigma_s, dims) * x
567
+ - expand_dims(alpha_t * phi_1, dims) * model_s
568
+ + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
569
+ model_s1 - model_s)
570
+ )
571
+ else:
572
+ phi_11 = torch.expm1(r1 * h)
573
+ phi_1 = torch.expm1(h)
574
+
575
+ if model_s is None:
576
+ model_s = self.model_fn(x, s)
577
+ x_s1 = (
578
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
579
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
580
+ )
581
+ model_s1 = self.model_fn(x_s1, s1)
582
+ if solver_type == 'dpm_solver':
583
+ x_t = (
584
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
585
+ - expand_dims(sigma_t * phi_1, dims) * model_s
586
+ - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
587
+ )
588
+ elif solver_type == 'taylor':
589
+ x_t = (
590
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
591
+ - expand_dims(sigma_t * phi_1, dims) * model_s
592
+ - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
593
+ )
594
+ if return_intermediate:
595
+ return x_t, {'model_s': model_s, 'model_s1': model_s1}
596
+ else:
597
+ return x_t
598
+
599
+ def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
600
+ return_intermediate=False, solver_type='dpm_solver'):
601
+ """
602
+ Singlestep solver DPM-Solver-3 from time `s` to time `t`.
603
+ Args:
604
+ x: A pytorch tensor. The initial value at time `s`.
605
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
606
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
607
+ r1: A `float`. The hyperparameter of the third-order solver.
608
+ r2: A `float`. The hyperparameter of the third-order solver.
609
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
610
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
611
+ model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
612
+ If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
613
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
614
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
615
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
616
+ Returns:
617
+ x_t: A pytorch tensor. The approximated solution at time `t`.
618
+ """
619
+ if solver_type not in ['dpm_solver', 'taylor']:
620
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
621
+ if r1 is None:
622
+ r1 = 1. / 3.
623
+ if r2 is None:
624
+ r2 = 2. / 3.
625
+ ns = self.noise_schedule
626
+ dims = x.dim()
627
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
628
+ h = lambda_t - lambda_s
629
+ lambda_s1 = lambda_s + r1 * h
630
+ lambda_s2 = lambda_s + r2 * h
631
+ s1 = ns.inverse_lambda(lambda_s1)
632
+ s2 = ns.inverse_lambda(lambda_s2)
633
+ log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
634
+ s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
635
+ sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
636
+ s2), ns.marginal_std(t)
637
+ alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
638
+
639
+ if self.predict_x0:
640
+ phi_11 = torch.expm1(-r1 * h)
641
+ phi_12 = torch.expm1(-r2 * h)
642
+ phi_1 = torch.expm1(-h)
643
+ phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
644
+ phi_2 = phi_1 / h + 1.
645
+ phi_3 = phi_2 / h - 0.5
646
+
647
+ if model_s is None:
648
+ model_s = self.model_fn(x, s)
649
+ if model_s1 is None:
650
+ x_s1 = (
651
+ expand_dims(sigma_s1 / sigma_s, dims) * x
652
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
653
+ )
654
+ model_s1 = self.model_fn(x_s1, s1)
655
+ x_s2 = (
656
+ expand_dims(sigma_s2 / sigma_s, dims) * x
657
+ - expand_dims(alpha_s2 * phi_12, dims) * model_s
658
+ + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
659
+ )
660
+ model_s2 = self.model_fn(x_s2, s2)
661
+ if solver_type == 'dpm_solver':
662
+ x_t = (
663
+ expand_dims(sigma_t / sigma_s, dims) * x
664
+ - expand_dims(alpha_t * phi_1, dims) * model_s
665
+ + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
666
+ )
667
+ elif solver_type == 'taylor':
668
+ D1_0 = (1. / r1) * (model_s1 - model_s)
669
+ D1_1 = (1. / r2) * (model_s2 - model_s)
670
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
671
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
672
+ x_t = (
673
+ expand_dims(sigma_t / sigma_s, dims) * x
674
+ - expand_dims(alpha_t * phi_1, dims) * model_s
675
+ + expand_dims(alpha_t * phi_2, dims) * D1
676
+ - expand_dims(alpha_t * phi_3, dims) * D2
677
+ )
678
+ else:
679
+ phi_11 = torch.expm1(r1 * h)
680
+ phi_12 = torch.expm1(r2 * h)
681
+ phi_1 = torch.expm1(h)
682
+ phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
683
+ phi_2 = phi_1 / h - 1.
684
+ phi_3 = phi_2 / h - 0.5
685
+
686
+ if model_s is None:
687
+ model_s = self.model_fn(x, s)
688
+ if model_s1 is None:
689
+ x_s1 = (
690
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
691
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
692
+ )
693
+ model_s1 = self.model_fn(x_s1, s1)
694
+ x_s2 = (
695
+ expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
696
+ - expand_dims(sigma_s2 * phi_12, dims) * model_s
697
+ - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
698
+ )
699
+ model_s2 = self.model_fn(x_s2, s2)
700
+ if solver_type == 'dpm_solver':
701
+ x_t = (
702
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
703
+ - expand_dims(sigma_t * phi_1, dims) * model_s
704
+ - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
705
+ )
706
+ elif solver_type == 'taylor':
707
+ D1_0 = (1. / r1) * (model_s1 - model_s)
708
+ D1_1 = (1. / r2) * (model_s2 - model_s)
709
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
710
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
711
+ x_t = (
712
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
713
+ - expand_dims(sigma_t * phi_1, dims) * model_s
714
+ - expand_dims(sigma_t * phi_2, dims) * D1
715
+ - expand_dims(sigma_t * phi_3, dims) * D2
716
+ )
717
+
718
+ if return_intermediate:
719
+ return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
720
+ else:
721
+ return x_t
722
+
723
+ def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
724
+ """
725
+ Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
726
+ Args:
727
+ x: A pytorch tensor. The initial value at time `s`.
728
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
729
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
730
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
731
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
732
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
733
+ Returns:
734
+ x_t: A pytorch tensor. The approximated solution at time `t`.
735
+ """
736
+ if solver_type not in ['dpm_solver', 'taylor']:
737
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
738
+ ns = self.noise_schedule
739
+ dims = x.dim()
740
+ model_prev_1, model_prev_0 = model_prev_list
741
+ t_prev_1, t_prev_0 = t_prev_list
742
+ lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
743
+ t_prev_0), ns.marginal_lambda(t)
744
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
745
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
746
+ alpha_t = torch.exp(log_alpha_t)
747
+
748
+ h_0 = lambda_prev_0 - lambda_prev_1
749
+ h = lambda_t - lambda_prev_0
750
+ r0 = h_0 / h
751
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
752
+ if self.predict_x0:
753
+ if solver_type == 'dpm_solver':
754
+ x_t = (
755
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
756
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
757
+ - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
758
+ )
759
+ elif solver_type == 'taylor':
760
+ x_t = (
761
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
762
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
763
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
764
+ )
765
+ else:
766
+ if solver_type == 'dpm_solver':
767
+ x_t = (
768
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
769
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
770
+ - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
771
+ )
772
+ elif solver_type == 'taylor':
773
+ x_t = (
774
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
775
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
776
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
777
+ )
778
+ return x_t
779
+
780
+ def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
781
+ """
782
+ Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
783
+ Args:
784
+ x: A pytorch tensor. The initial value at time `s`.
785
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
786
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
787
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
788
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
789
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
790
+ Returns:
791
+ x_t: A pytorch tensor. The approximated solution at time `t`.
792
+ """
793
+ ns = self.noise_schedule
794
+ dims = x.dim()
795
+ model_prev_2, model_prev_1, model_prev_0 = model_prev_list
796
+ t_prev_2, t_prev_1, t_prev_0 = t_prev_list
797
+ lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
798
+ t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
799
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
800
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
801
+ alpha_t = torch.exp(log_alpha_t)
802
+
803
+ h_1 = lambda_prev_1 - lambda_prev_2
804
+ h_0 = lambda_prev_0 - lambda_prev_1
805
+ h = lambda_t - lambda_prev_0
806
+ r0, r1 = h_0 / h, h_1 / h
807
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
808
+ D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
809
+ D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
810
+ D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
811
+ if self.predict_x0:
812
+ x_t = (
813
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
814
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
815
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
816
+ - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
817
+ )
818
+ else:
819
+ x_t = (
820
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
821
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
822
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
823
+ - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
824
+ )
825
+ return x_t
826
+
827
+ def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
828
+ r2=None):
829
+ """
830
+ Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
831
+ Args:
832
+ x: A pytorch tensor. The initial value at time `s`.
833
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
834
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
835
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
836
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
837
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
838
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
839
+ r1: A `float`. The hyperparameter of the second-order or third-order solver.
840
+ r2: A `float`. The hyperparameter of the third-order solver.
841
+ Returns:
842
+ x_t: A pytorch tensor. The approximated solution at time `t`.
843
+ """
844
+ if order == 1:
845
+ return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
846
+ elif order == 2:
847
+ return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
848
+ solver_type=solver_type, r1=r1)
849
+ elif order == 3:
850
+ return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
851
+ solver_type=solver_type, r1=r1, r2=r2)
852
+ else:
853
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
854
+
855
+ def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
856
+ """
857
+ Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
858
+ Args:
859
+ x: A pytorch tensor. The initial value at time `s`.
860
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
861
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
862
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
863
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
864
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
865
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
866
+ Returns:
867
+ x_t: A pytorch tensor. The approximated solution at time `t`.
868
+ """
869
+ if order == 1:
870
+ return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
871
+ elif order == 2:
872
+ return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
873
+ elif order == 3:
874
+ return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
875
+ else:
876
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
877
+
878
+ def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
879
+ solver_type='dpm_solver'):
880
+ """
881
+ The adaptive step size solver based on singlestep DPM-Solver.
882
+ Args:
883
+ x: A pytorch tensor. The initial value at time `t_T`.
884
+ order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
885
+ t_T: A `float`. The starting time of the sampling (default is T).
886
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
887
+ h_init: A `float`. The initial step size (for logSNR).
888
+ atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
889
+ rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
890
+ theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
891
+ t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
892
+ current time and `t_0` is less than `t_err`. The default setting is 1e-5.
893
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
894
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
895
+ Returns:
896
+ x_0: A pytorch tensor. The approximated solution at time `t_0`.
897
+ [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
898
+ """
899
+ ns = self.noise_schedule
900
+ s = t_T * torch.ones((x.shape[0],)).to(x)
901
+ lambda_s = ns.marginal_lambda(s)
902
+ lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
903
+ h = h_init * torch.ones_like(s).to(x)
904
+ x_prev = x
905
+ nfe = 0
906
+ if order == 2:
907
+ r1 = 0.5
908
+ lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
909
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
910
+ solver_type=solver_type,
911
+ **kwargs)
912
+ elif order == 3:
913
+ r1, r2 = 1. / 3., 2. / 3.
914
+ lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
915
+ return_intermediate=True,
916
+ solver_type=solver_type)
917
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
918
+ solver_type=solver_type,
919
+ **kwargs)
920
+ else:
921
+ raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
922
+ while torch.abs((s - t_0)).mean() > t_err:
923
+ t = ns.inverse_lambda(lambda_s + h)
924
+ x_lower, lower_noise_kwargs = lower_update(x, s, t)
925
+ x_higher = higher_update(x, s, t, **lower_noise_kwargs)
926
+ delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
927
+ norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
928
+ E = norm_fn((x_higher - x_lower) / delta).max()
929
+ if torch.all(E <= 1.):
930
+ x = x_higher
931
+ s = t
932
+ x_prev = x_lower
933
+ lambda_s = ns.marginal_lambda(s)
934
+ h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
935
+ nfe += order
936
+ print('adaptive solver nfe', nfe)
937
+ return x
938
+
939
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
940
+ method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
941
+ atol=0.0078, rtol=0.05,
942
+ ):
943
+ """
944
+ Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
945
+ =====================================================
946
+ We support the following algorithms for both noise prediction model and data prediction model:
947
+ - 'singlestep':
948
+ Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
949
+ We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
950
+ The total number of function evaluations (NFE) == `steps`.
951
+ Given a fixed NFE == `steps`, the sampling procedure is:
952
+ - If `order` == 1:
953
+ - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
954
+ - If `order` == 2:
955
+ - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
956
+ - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
957
+ - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
958
+ - If `order` == 3:
959
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
960
+ - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
961
+ - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
962
+ - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
963
+ - 'multistep':
964
+ Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
965
+ We initialize the first `order` values by lower order multistep solvers.
966
+ Given a fixed NFE == `steps`, the sampling procedure is:
967
+ Denote K = steps.
968
+ - If `order` == 1:
969
+ - We use K steps of DPM-Solver-1 (i.e. DDIM).
970
+ - If `order` == 2:
971
+ - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
972
+ - If `order` == 3:
973
+ - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
974
+ - 'singlestep_fixed':
975
+ Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
976
+ We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
977
+ - 'adaptive':
978
+ Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
979
+ We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
980
+ You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
981
+ (NFE) and the sample quality.
982
+ - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
983
+ - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
984
+ =====================================================
985
+ Some advices for choosing the algorithm:
986
+ - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
987
+ Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
988
+ e.g.
989
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
990
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
991
+ skip_type='time_uniform', method='singlestep')
992
+ - For **guided sampling with large guidance scale** by DPMs:
993
+ Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
994
+ e.g.
995
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
996
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
997
+ skip_type='time_uniform', method='multistep')
998
+ We support three types of `skip_type`:
999
+ - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1000
+ - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1001
+ - 'time_quadratic': quadratic time for the time steps.
1002
+ =====================================================
1003
+ Args:
1004
+ x: A pytorch tensor. The initial value at time `t_start`
1005
+ e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1006
+ steps: A `int`. The total number of function evaluations (NFE).
1007
+ t_start: A `float`. The starting time of the sampling.
1008
+ If `T` is None, we use self.noise_schedule.T (default is 1.0).
1009
+ t_end: A `float`. The ending time of the sampling.
1010
+ If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1011
+ e.g. if total_N == 1000, we have `t_end` == 1e-3.
1012
+ For discrete-time DPMs:
1013
+ - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1014
+ For continuous-time DPMs:
1015
+ - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1016
+ order: A `int`. The order of DPM-Solver.
1017
+ skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1018
+ method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1019
+ denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
1020
+ Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
1021
+ This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
1022
+ score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
1023
+ for diffusion models sampling by diffusion SDEs for low-resolutional images
1024
+ (such as CIFAR-10). However, we observed that such trick does not matter for
1025
+ high-resolutional images. As it needs an additional NFE, we do not recommend
1026
+ it for high-resolutional images.
1027
+ lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
1028
+ Only valid for `method=multistep` and `steps < 15`. We empirically find that
1029
+ this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
1030
+ (especially for steps <= 10). So we recommend to set it to be `True`.
1031
+ solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
1032
+ atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1033
+ rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1034
+ Returns:
1035
+ x_end: A pytorch tensor. The approximated solution at time `t_end`.
1036
+ """
1037
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1038
+ t_T = self.noise_schedule.T if t_start is None else t_start
1039
+ device = x.device
1040
+ if method == 'adaptive':
1041
+ with torch.no_grad():
1042
+ x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
1043
+ solver_type=solver_type)
1044
+ elif method == 'multistep':
1045
+ assert steps >= order
1046
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1047
+ assert timesteps.shape[0] - 1 == steps
1048
+ with torch.no_grad():
1049
+ vec_t = timesteps[0].expand((x.shape[0]))
1050
+ model_prev_list = [self.model_fn(x, vec_t)]
1051
+ t_prev_list = [vec_t]
1052
+ # Init the first `order` values by lower order multistep DPM-Solver.
1053
+ for init_order in tqdm(range(1, order), desc="DPM init order"):
1054
+ vec_t = timesteps[init_order].expand(x.shape[0])
1055
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
1056
+ solver_type=solver_type)
1057
+ model_prev_list.append(self.model_fn(x, vec_t))
1058
+ t_prev_list.append(vec_t)
1059
+ # Compute the remaining values by `order`-th order multistep DPM-Solver.
1060
+ for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
1061
+ vec_t = timesteps[step].expand(x.shape[0])
1062
+ if lower_order_final and steps < 15:
1063
+ step_order = min(order, steps + 1 - step)
1064
+ else:
1065
+ step_order = order
1066
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
1067
+ solver_type=solver_type)
1068
+ for i in range(order - 1):
1069
+ t_prev_list[i] = t_prev_list[i + 1]
1070
+ model_prev_list[i] = model_prev_list[i + 1]
1071
+ t_prev_list[-1] = vec_t
1072
+ # We do not need to evaluate the final model value.
1073
+ if step < steps:
1074
+ model_prev_list[-1] = self.model_fn(x, vec_t)
1075
+ elif method in ['singlestep', 'singlestep_fixed']:
1076
+ if method == 'singlestep':
1077
+ timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
1078
+ skip_type=skip_type,
1079
+ t_T=t_T, t_0=t_0,
1080
+ device=device)
1081
+ elif method == 'singlestep_fixed':
1082
+ K = steps // order
1083
+ orders = [order, ] * K
1084
+ timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1085
+ for i, order in enumerate(orders):
1086
+ t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
1087
+ timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
1088
+ N=order, device=device)
1089
+ lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1090
+ vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
1091
+ h = lambda_inner[-1] - lambda_inner[0]
1092
+ r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1093
+ r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1094
+ x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
1095
+ if denoise_to_zero:
1096
+ x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
1097
+ return x
1098
+
1099
+
1100
+ #############################################################
1101
+ # other utility functions
1102
+ #############################################################
1103
+
1104
+ def interpolate_fn(x, xp, yp):
1105
+ """
1106
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
1107
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
1108
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1109
+ Args:
1110
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1111
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1112
+ yp: PyTorch tensor with shape [C, K].
1113
+ Returns:
1114
+ The function values f(x), with shape [N, C].
1115
+ """
1116
+ N, K = x.shape[0], xp.shape[1]
1117
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1118
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1119
+ x_idx = torch.argmin(x_indices, dim=2)
1120
+ cand_start_idx = x_idx - 1
1121
+ start_idx = torch.where(
1122
+ torch.eq(x_idx, 0),
1123
+ torch.tensor(1, device=x.device),
1124
+ torch.where(
1125
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1126
+ ),
1127
+ )
1128
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1129
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1130
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1131
+ start_idx2 = torch.where(
1132
+ torch.eq(x_idx, 0),
1133
+ torch.tensor(0, device=x.device),
1134
+ torch.where(
1135
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1136
+ ),
1137
+ )
1138
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1139
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1140
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1141
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1142
+ return cand
1143
+
1144
+
1145
+ def expand_dims(v, dims):
1146
+ """
1147
+ Expand the tensor `v` to the dim `dims`.
1148
+ Args:
1149
+ `v`: a PyTorch tensor with shape [N].
1150
+ `dim`: a `int`.
1151
+ Returns:
1152
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1153
+ """
1154
+ return v[(...,) + (None,) * (dims - 1)]
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/sampler.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+ import torch
3
+
4
+ from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
5
+
6
+
7
+ MODEL_TYPES = {
8
+ "eps": "noise",
9
+ "v": "v"
10
+ }
11
+
12
+
13
+ class DPMSolverSampler(object):
14
+ def __init__(self, model, **kwargs):
15
+ super().__init__()
16
+ self.model = model
17
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
18
+ self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
19
+
20
+ def register_buffer(self, name, attr):
21
+ if type(attr) == torch.Tensor:
22
+ if attr.device != torch.device("cuda"):
23
+ attr = attr.to(torch.device("cuda"))
24
+ setattr(self, name, attr)
25
+
26
+ @torch.no_grad()
27
+ def sample(self,
28
+ S,
29
+ batch_size,
30
+ shape,
31
+ conditioning=None,
32
+ callback=None,
33
+ normals_sequence=None,
34
+ img_callback=None,
35
+ quantize_x0=False,
36
+ eta=0.,
37
+ mask=None,
38
+ x0=None,
39
+ temperature=1.,
40
+ noise_dropout=0.,
41
+ score_corrector=None,
42
+ corrector_kwargs=None,
43
+ verbose=True,
44
+ x_T=None,
45
+ log_every_t=100,
46
+ unconditional_guidance_scale=1.,
47
+ unconditional_conditioning=None,
48
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
49
+ **kwargs
50
+ ):
51
+ if conditioning is not None:
52
+ if isinstance(conditioning, dict):
53
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
54
+ if cbs != batch_size:
55
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
56
+ else:
57
+ if conditioning.shape[0] != batch_size:
58
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
59
+
60
+ # sampling
61
+ C, H, W = shape
62
+ size = (batch_size, C, H, W)
63
+
64
+ print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')
65
+
66
+ device = self.model.betas.device
67
+ if x_T is None:
68
+ img = torch.randn(size, device=device)
69
+ else:
70
+ img = x_T
71
+
72
+ ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
73
+
74
+ model_fn = model_wrapper(
75
+ lambda x, t, c: self.model.apply_model(x, t, c),
76
+ ns,
77
+ model_type=MODEL_TYPES[self.model.parameterization],
78
+ guidance_type="classifier-free",
79
+ condition=conditioning,
80
+ unconditional_condition=unconditional_conditioning,
81
+ guidance_scale=unconditional_guidance_scale,
82
+ )
83
+
84
+ dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
85
+ x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True)
86
+
87
+ return x.to(device), None
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpmpp_2m.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import torch
5
+ import k_diffusion as K
6
+ import numpy as np
7
+
8
+ from ldm.models.diffusion.sampler import Sampler
9
+
10
+
11
+ class DPMPP2M_Sampler(Sampler):
12
+ def __init__(self, model, v_mode, **kwargs):
13
+ super().__init__()
14
+ self.model = model
15
+ if v_mode:
16
+ self.model_wrap = K.external.CompVisVDenoiser(model)
17
+ else:
18
+ self.model_wrap = K.external.CompVisDenoiser(model)
19
+
20
+ def generate_params(self, sigmas):
21
+ """DPM-Solver++(2M)."""
22
+ # Based on https://github.com/crowsonkb/k-diffusion/blob/v0.0.14/k_diffusion/sampling.py#L585
23
+ device = sigmas.device
24
+ sigmas = sigmas.cpu()
25
+ def sigma_fn(t): return t.neg().exp()
26
+ def t_fn(sigma): return sigma.log().neg()
27
+ params = []
28
+ for i in range(len(sigmas) - 1):
29
+ sigma = sigmas[i]
30
+ t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
31
+ h = t_next - t
32
+ a = sigma_fn(t_next) / sigma_fn(t)
33
+ if i == 0 or sigmas[i + 1] == 0:
34
+ b = 1.0
35
+ c = 0.0
36
+ else:
37
+ h_last = t - t_fn(sigmas[i - 1])
38
+ r = h_last / h
39
+ b = 1 + 1 / (2 * r)
40
+ c = 1 / (2 * r)
41
+ b *= - (-h).expm1()
42
+ c *= (-h).expm1()
43
+ p = np.array([a.numpy(), b.numpy(), c.numpy(), sigma.numpy()])
44
+ params.append(p)
45
+ params = torch.Tensor(np.stack(params, axis=0)
46
+ ).transpose(0, 1).to(device)
47
+ return params
48
+
49
+ @torch.no_grad()
50
+ def compile(self,
51
+ S,
52
+ shape,
53
+ unconditional_guidance_scale=1.,
54
+ batch_size=1,
55
+ **kwargs
56
+ ):
57
+ self.sigmas = self.model_wrap.get_sigmas(S)
58
+ self.params = self.generate_params(self.sigmas)
59
+ self.cond_scale = unconditional_guidance_scale
60
+ self.old_denoised_zeros = self.sigmas.new_zeros([batch_size] + shape)
61
+ self.rand_scale = self.params[3, 0].to(torch.float32).cpu()
62
+ self.batch_size = batch_size
63
+
64
+ def one_step(self, x, c_in, old_denoised, param_t):
65
+ a, b, c, sigma = param_t.chunk(4)
66
+ sigma = sigma.broadcast_to((self.batch_size)).contiguous()
67
+ denoised = self.run_model(x, c_in, sigma)
68
+ x = a * x + b * denoised + c * old_denoised
69
+ return x, denoised
70
+
71
+ def sampler_step(self, arg):
72
+ x, c_in, params, old_denoised = arg
73
+ x, denoised = self.one_step(x, c_in, old_denoised, params[:, 0])
74
+ params = torch.roll(params, shifts=-1, dims=1)
75
+ return [x, c_in, params, denoised]
76
+
77
+ def init_loop(self, x, c_in):
78
+ return [x, c_in, self.params.clone(), self.old_denoised_zeros.clone()]
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/plms.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+
3
+ import torch
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ from functools import partial
7
+
8
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
9
+ from ldm.models.diffusion.sampling_util import norm_thresholding
10
+
11
+
12
+ class PLMSSampler(object):
13
+ def __init__(self, model, schedule="linear", **kwargs):
14
+ super().__init__()
15
+ self.model = model
16
+ self.ddpm_num_timesteps = model.num_timesteps
17
+ self.schedule = schedule
18
+
19
+ def register_buffer(self, name, attr):
20
+ if type(attr) == torch.Tensor:
21
+ if attr.device != torch.device("cuda"):
22
+ attr = attr.to(torch.device("cuda"))
23
+ setattr(self, name, attr)
24
+
25
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
26
+ if ddim_eta != 0:
27
+ raise ValueError('ddim_eta must be 0 for PLMS')
28
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
29
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
30
+ alphas_cumprod = self.model.alphas_cumprod
31
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
32
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
33
+
34
+ self.register_buffer('betas', to_torch(self.model.betas))
35
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
36
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
37
+
38
+ # calculations for diffusion q(x_t | x_{t-1}) and others
39
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
40
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
41
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
42
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
43
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
44
+
45
+ # ddim sampling parameters
46
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
47
+ ddim_timesteps=self.ddim_timesteps,
48
+ eta=ddim_eta,verbose=verbose)
49
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
50
+ self.register_buffer('ddim_alphas', ddim_alphas)
51
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
52
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
53
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
54
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
55
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
56
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
57
+
58
+ @torch.no_grad()
59
+ def sample(self,
60
+ S,
61
+ batch_size,
62
+ shape,
63
+ conditioning=None,
64
+ callback=None,
65
+ normals_sequence=None,
66
+ img_callback=None,
67
+ quantize_x0=False,
68
+ eta=0.,
69
+ mask=None,
70
+ x0=None,
71
+ temperature=1.,
72
+ noise_dropout=0.,
73
+ score_corrector=None,
74
+ corrector_kwargs=None,
75
+ verbose=True,
76
+ x_T=None,
77
+ log_every_t=100,
78
+ unconditional_guidance_scale=1.,
79
+ unconditional_conditioning=None,
80
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
81
+ dynamic_threshold=None,
82
+ **kwargs
83
+ ):
84
+ if conditioning is not None:
85
+ if isinstance(conditioning, dict):
86
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
87
+ if cbs != batch_size:
88
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
89
+ else:
90
+ if conditioning.shape[0] != batch_size:
91
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
92
+
93
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
94
+ # sampling
95
+ C, H, W = shape
96
+ size = (batch_size, C, H, W)
97
+ print(f'Data shape for PLMS sampling is {size}')
98
+
99
+ samples, intermediates = self.plms_sampling(conditioning, size,
100
+ callback=callback,
101
+ img_callback=img_callback,
102
+ quantize_denoised=quantize_x0,
103
+ mask=mask, x0=x0,
104
+ ddim_use_original_steps=False,
105
+ noise_dropout=noise_dropout,
106
+ temperature=temperature,
107
+ score_corrector=score_corrector,
108
+ corrector_kwargs=corrector_kwargs,
109
+ x_T=x_T,
110
+ log_every_t=log_every_t,
111
+ unconditional_guidance_scale=unconditional_guidance_scale,
112
+ unconditional_conditioning=unconditional_conditioning,
113
+ dynamic_threshold=dynamic_threshold,
114
+ )
115
+ return samples, intermediates
116
+
117
+ @torch.no_grad()
118
+ def plms_sampling(self, cond, shape,
119
+ x_T=None, ddim_use_original_steps=False,
120
+ callback=None, timesteps=None, quantize_denoised=False,
121
+ mask=None, x0=None, img_callback=None, log_every_t=100,
122
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
123
+ unconditional_guidance_scale=1., unconditional_conditioning=None,
124
+ dynamic_threshold=None):
125
+ device = self.model.betas.device
126
+ b = shape[0]
127
+ if x_T is None:
128
+ img = torch.randn(shape, device=device)
129
+ else:
130
+ img = x_T
131
+
132
+ if timesteps is None:
133
+ timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
134
+ elif timesteps is not None and not ddim_use_original_steps:
135
+ subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
136
+ timesteps = self.ddim_timesteps[:subset_end]
137
+
138
+ intermediates = {'x_inter': [img], 'pred_x0': [img]}
139
+ time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
140
+ total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
141
+ print(f"Running PLMS Sampling with {total_steps} timesteps")
142
+
143
+ iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
144
+ old_eps = []
145
+
146
+ for i, step in enumerate(iterator):
147
+ index = total_steps - i - 1
148
+ ts = torch.full((b,), step, device=device, dtype=torch.long)
149
+ ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
150
+
151
+ if mask is not None:
152
+ assert x0 is not None
153
+ img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
154
+ img = img_orig * mask + (1. - mask) * img
155
+
156
+ outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
157
+ quantize_denoised=quantize_denoised, temperature=temperature,
158
+ noise_dropout=noise_dropout, score_corrector=score_corrector,
159
+ corrector_kwargs=corrector_kwargs,
160
+ unconditional_guidance_scale=unconditional_guidance_scale,
161
+ unconditional_conditioning=unconditional_conditioning,
162
+ old_eps=old_eps, t_next=ts_next,
163
+ dynamic_threshold=dynamic_threshold)
164
+ img, pred_x0, e_t = outs
165
+ old_eps.append(e_t)
166
+ if len(old_eps) >= 4:
167
+ old_eps.pop(0)
168
+ if callback: callback(i)
169
+ if img_callback: img_callback(pred_x0, i)
170
+
171
+ if index % log_every_t == 0 or index == total_steps - 1:
172
+ intermediates['x_inter'].append(img)
173
+ intermediates['pred_x0'].append(pred_x0)
174
+
175
+ return img, intermediates
176
+
177
+ @torch.no_grad()
178
+ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
179
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
180
+ unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None,
181
+ dynamic_threshold=None):
182
+ b, *_, device = *x.shape, x.device
183
+
184
+ def get_model_output(x, t):
185
+ if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
186
+ e_t = self.model.apply_model(x, t, c)
187
+ else:
188
+ x_in = torch.cat([x] * 2)
189
+ t_in = torch.cat([t] * 2)
190
+ c_in = torch.cat([unconditional_conditioning, c])
191
+ e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
192
+ e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
193
+
194
+ if score_corrector is not None:
195
+ assert self.model.parameterization == "eps"
196
+ e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
197
+
198
+ return e_t
199
+
200
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
201
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
202
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
203
+ sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
204
+
205
+ def get_x_prev_and_pred_x0(e_t, index):
206
+ # select parameters corresponding to the currently considered timestep
207
+ a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
208
+ a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
209
+ sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
210
+ sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
211
+
212
+ # current prediction for x_0
213
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
214
+ if quantize_denoised:
215
+ pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
216
+ if dynamic_threshold is not None:
217
+ pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
218
+ # direction pointing to x_t
219
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
220
+ noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
221
+ if noise_dropout > 0.:
222
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
223
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
224
+ return x_prev, pred_x0
225
+
226
+ e_t = get_model_output(x, t)
227
+ if len(old_eps) == 0:
228
+ # Pseudo Improved Euler (2nd order)
229
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
230
+ e_t_next = get_model_output(x_prev, t_next)
231
+ e_t_prime = (e_t + e_t_next) / 2
232
+ elif len(old_eps) == 1:
233
+ # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
234
+ e_t_prime = (3 * e_t - old_eps[-1]) / 2
235
+ elif len(old_eps) == 2:
236
+ # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
237
+ e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
238
+ elif len(old_eps) >= 3:
239
+ # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
240
+ e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
241
+
242
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
243
+
244
+ return x_prev, pred_x0, e_t
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampler.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import torch
5
+
6
+
7
+ class Sampler(object):
8
+ def __init__(self, **kwargs):
9
+ super().__init__()
10
+
11
+ @torch.no_grad()
12
+ def compile(self, S, shape, **kwargs):
13
+ pass
14
+
15
+ def run_model(self, x, c_in, sigma):
16
+ x_in = torch.cat([x] * 2)
17
+ sigma_in = torch.cat([sigma] * 2)
18
+ uncond, cond = self.model_wrap(x_in, sigma_in, cond=c_in).chunk(2)
19
+ return uncond + (cond - uncond) * self.cond_scale
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampling_util.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ def append_dims(x, target_dims):
6
+ """Appends dimensions to the end of a tensor until it has target_dims dimensions.
7
+ From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py"""
8
+ dims_to_append = target_dims - x.ndim
9
+ if dims_to_append < 0:
10
+ raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
11
+ return x[(...,) + (None,) * dims_to_append]
12
+
13
+
14
+ def norm_thresholding(x0, value):
15
+ s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
16
+ return x0 * (value / s)
17
+
18
+
19
+ def spatial_norm_thresholding(x0, value):
20
+ # b c h w
21
+ s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)
22
+ return x0 * (value / s)
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/openaimodel.py ADDED
@@ -0,0 +1,786 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ import math
3
+
4
+ import numpy as np
5
+ import torch as th
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ from ldm.modules.diffusionmodules.util import (
10
+ checkpoint,
11
+ conv_nd,
12
+ linear,
13
+ avg_pool_nd,
14
+ zero_module,
15
+ normalization,
16
+ timestep_embedding,
17
+ )
18
+ from ldm.modules.attention import SpatialTransformer
19
+ from ldm.util import exists
20
+
21
+
22
+ # dummy replace
23
+ def convert_module_to_f16(x):
24
+ pass
25
+
26
+ def convert_module_to_f32(x):
27
+ pass
28
+
29
+
30
+ ## go
31
+ class AttentionPool2d(nn.Module):
32
+ """
33
+ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ spacial_dim: int,
39
+ embed_dim: int,
40
+ num_heads_channels: int,
41
+ output_dim: int = None,
42
+ ):
43
+ super().__init__()
44
+ self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
45
+ self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
46
+ self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
47
+ self.num_heads = embed_dim // num_heads_channels
48
+ self.attention = QKVAttention(self.num_heads)
49
+
50
+ def forward(self, x):
51
+ b, c, *_spatial = x.shape
52
+ x = x.reshape(b, c, -1) # NC(HW)
53
+ x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
54
+ x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
55
+ x = self.qkv_proj(x)
56
+ x = self.attention(x)
57
+ x = self.c_proj(x)
58
+ return x[:, :, 0]
59
+
60
+
61
+ class TimestepBlock(nn.Module):
62
+ """
63
+ Any module where forward() takes timestep embeddings as a second argument.
64
+ """
65
+
66
+ @abstractmethod
67
+ def forward(self, x, emb):
68
+ """
69
+ Apply the module to `x` given `emb` timestep embeddings.
70
+ """
71
+
72
+
73
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
74
+ """
75
+ A sequential module that passes timestep embeddings to the children that
76
+ support it as an extra input.
77
+ """
78
+
79
+ def forward(self, x, emb, context=None):
80
+ for layer in self:
81
+ if isinstance(layer, TimestepBlock):
82
+ x = layer(x, emb)
83
+ elif isinstance(layer, SpatialTransformer):
84
+ x = layer(x, context)
85
+ else:
86
+ x = layer(x)
87
+ return x
88
+
89
+
90
+ class Upsample(nn.Module):
91
+ """
92
+ An upsampling layer with an optional convolution.
93
+ :param channels: channels in the inputs and outputs.
94
+ :param use_conv: a bool determining if a convolution is applied.
95
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
96
+ upsampling occurs in the inner-two dimensions.
97
+ """
98
+
99
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
100
+ super().__init__()
101
+ self.channels = channels
102
+ self.out_channels = out_channels or channels
103
+ self.use_conv = use_conv
104
+ self.dims = dims
105
+ if use_conv:
106
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
107
+
108
+ def forward(self, x):
109
+ assert x.shape[1] == self.channels
110
+ if self.dims == 3:
111
+ x = F.interpolate(
112
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
113
+ )
114
+ else:
115
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
116
+ if self.use_conv:
117
+ x = self.conv(x)
118
+ return x
119
+
120
+ class TransposedUpsample(nn.Module):
121
+ 'Learned 2x upsampling without padding'
122
+ def __init__(self, channels, out_channels=None, ks=5):
123
+ super().__init__()
124
+ self.channels = channels
125
+ self.out_channels = out_channels or channels
126
+
127
+ self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
128
+
129
+ def forward(self,x):
130
+ return self.up(x)
131
+
132
+
133
+ class Downsample(nn.Module):
134
+ """
135
+ A downsampling layer with an optional convolution.
136
+ :param channels: channels in the inputs and outputs.
137
+ :param use_conv: a bool determining if a convolution is applied.
138
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
139
+ downsampling occurs in the inner-two dimensions.
140
+ """
141
+
142
+ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
143
+ super().__init__()
144
+ self.channels = channels
145
+ self.out_channels = out_channels or channels
146
+ self.use_conv = use_conv
147
+ self.dims = dims
148
+ stride = 2 if dims != 3 else (1, 2, 2)
149
+ if use_conv:
150
+ self.op = conv_nd(
151
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
152
+ )
153
+ else:
154
+ assert self.channels == self.out_channels
155
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
156
+
157
+ def forward(self, x):
158
+ assert x.shape[1] == self.channels
159
+ return self.op(x)
160
+
161
+
162
+ class ResBlock(TimestepBlock):
163
+ """
164
+ A residual block that can optionally change the number of channels.
165
+ :param channels: the number of input channels.
166
+ :param emb_channels: the number of timestep embedding channels.
167
+ :param dropout: the rate of dropout.
168
+ :param out_channels: if specified, the number of out channels.
169
+ :param use_conv: if True and out_channels is specified, use a spatial
170
+ convolution instead of a smaller 1x1 convolution to change the
171
+ channels in the skip connection.
172
+ :param dims: determines if the signal is 1D, 2D, or 3D.
173
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
174
+ :param up: if True, use this block for upsampling.
175
+ :param down: if True, use this block for downsampling.
176
+ """
177
+
178
+ def __init__(
179
+ self,
180
+ channels,
181
+ emb_channels,
182
+ dropout,
183
+ out_channels=None,
184
+ use_conv=False,
185
+ use_scale_shift_norm=False,
186
+ dims=2,
187
+ use_checkpoint=False,
188
+ up=False,
189
+ down=False,
190
+ ):
191
+ super().__init__()
192
+ self.channels = channels
193
+ self.emb_channels = emb_channels
194
+ self.dropout = dropout
195
+ self.out_channels = out_channels or channels
196
+ self.use_conv = use_conv
197
+ self.use_checkpoint = use_checkpoint
198
+ self.use_scale_shift_norm = use_scale_shift_norm
199
+
200
+ self.in_layers = nn.Sequential(
201
+ normalization(channels),
202
+ nn.SiLU(),
203
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
204
+ )
205
+
206
+ self.updown = up or down
207
+
208
+ if up:
209
+ self.h_upd = Upsample(channels, False, dims)
210
+ self.x_upd = Upsample(channels, False, dims)
211
+ elif down:
212
+ self.h_upd = Downsample(channels, False, dims)
213
+ self.x_upd = Downsample(channels, False, dims)
214
+ else:
215
+ self.h_upd = self.x_upd = nn.Identity()
216
+
217
+ self.emb_layers = nn.Sequential(
218
+ nn.SiLU(),
219
+ linear(
220
+ emb_channels,
221
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
222
+ ),
223
+ )
224
+ self.out_layers = nn.Sequential(
225
+ normalization(self.out_channels),
226
+ nn.SiLU(),
227
+ nn.Dropout(p=dropout),
228
+ zero_module(
229
+ conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
230
+ ),
231
+ )
232
+
233
+ if self.out_channels == channels:
234
+ self.skip_connection = nn.Identity()
235
+ elif use_conv:
236
+ self.skip_connection = conv_nd(
237
+ dims, channels, self.out_channels, 3, padding=1
238
+ )
239
+ else:
240
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
241
+
242
+ def forward(self, x, emb):
243
+ """
244
+ Apply the block to a Tensor, conditioned on a timestep embedding.
245
+ :param x: an [N x C x ...] Tensor of features.
246
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
247
+ :return: an [N x C x ...] Tensor of outputs.
248
+ """
249
+ return checkpoint(
250
+ self._forward, (x, emb), self.parameters(), self.use_checkpoint
251
+ )
252
+
253
+
254
+ def _forward(self, x, emb):
255
+ if self.updown:
256
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
257
+ h = in_rest(x)
258
+ h = self.h_upd(h)
259
+ x = self.x_upd(x)
260
+ h = in_conv(h)
261
+ else:
262
+ h = self.in_layers(x)
263
+ emb_out = self.emb_layers(emb).type(h.dtype)
264
+ while len(emb_out.shape) < len(h.shape):
265
+ emb_out = emb_out[..., None]
266
+ if self.use_scale_shift_norm:
267
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
268
+ scale, shift = th.chunk(emb_out, 2, dim=1)
269
+ h = out_norm(h) * (1 + scale) + shift
270
+ h = out_rest(h)
271
+ else:
272
+ h = h + emb_out
273
+ h = self.out_layers(h)
274
+ return self.skip_connection(x) + h
275
+
276
+
277
+ class AttentionBlock(nn.Module):
278
+ """
279
+ An attention block that allows spatial positions to attend to each other.
280
+ Originally ported from here, but adapted to the N-d case.
281
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
282
+ """
283
+
284
+ def __init__(
285
+ self,
286
+ channels,
287
+ num_heads=1,
288
+ num_head_channels=-1,
289
+ use_checkpoint=False,
290
+ use_new_attention_order=False,
291
+ ):
292
+ super().__init__()
293
+ self.channels = channels
294
+ if num_head_channels == -1:
295
+ self.num_heads = num_heads
296
+ else:
297
+ assert (
298
+ channels % num_head_channels == 0
299
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
300
+ self.num_heads = channels // num_head_channels
301
+ self.use_checkpoint = use_checkpoint
302
+ self.norm = normalization(channels)
303
+ self.qkv = conv_nd(1, channels, channels * 3, 1)
304
+ if use_new_attention_order:
305
+ # split qkv before split heads
306
+ self.attention = QKVAttention(self.num_heads)
307
+ else:
308
+ # split heads before split qkv
309
+ self.attention = QKVAttentionLegacy(self.num_heads)
310
+
311
+ self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
312
+
313
+ def forward(self, x):
314
+ return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
315
+ #return pt_checkpoint(self._forward, x) # pytorch
316
+
317
+ def _forward(self, x):
318
+ b, c, *spatial = x.shape
319
+ x = x.reshape(b, c, -1)
320
+ qkv = self.qkv(self.norm(x))
321
+ h = self.attention(qkv)
322
+ h = self.proj_out(h)
323
+ return (x + h).reshape(b, c, *spatial)
324
+
325
+
326
+ def count_flops_attn(model, _x, y):
327
+ """
328
+ A counter for the `thop` package to count the operations in an
329
+ attention operation.
330
+ Meant to be used like:
331
+ macs, params = thop.profile(
332
+ model,
333
+ inputs=(inputs, timestamps),
334
+ custom_ops={QKVAttention: QKVAttention.count_flops},
335
+ )
336
+ """
337
+ b, c, *spatial = y[0].shape
338
+ num_spatial = int(np.prod(spatial))
339
+ # We perform two matmuls with the same number of ops.
340
+ # The first computes the weight matrix, the second computes
341
+ # the combination of the value vectors.
342
+ matmul_ops = 2 * b * (num_spatial ** 2) * c
343
+ model.total_ops += th.DoubleTensor([matmul_ops])
344
+
345
+
346
+ class QKVAttentionLegacy(nn.Module):
347
+ """
348
+ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
349
+ """
350
+
351
+ def __init__(self, n_heads):
352
+ super().__init__()
353
+ self.n_heads = n_heads
354
+
355
+ def forward(self, qkv):
356
+ """
357
+ Apply QKV attention.
358
+ :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
359
+ :return: an [N x (H * C) x T] tensor after attention.
360
+ """
361
+ bs, width, length = qkv.shape
362
+ assert width % (3 * self.n_heads) == 0
363
+ ch = width // (3 * self.n_heads)
364
+ q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
365
+ scale = 1 / math.sqrt(math.sqrt(ch))
366
+ weight = th.einsum(
367
+ "bct,bcs->bts", q * scale, k * scale
368
+ ) # More stable with f16 than dividing afterwards
369
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
370
+ a = th.einsum("bts,bcs->bct", weight, v)
371
+ return a.reshape(bs, -1, length)
372
+
373
+ @staticmethod
374
+ def count_flops(model, _x, y):
375
+ return count_flops_attn(model, _x, y)
376
+
377
+
378
+ class QKVAttention(nn.Module):
379
+ """
380
+ A module which performs QKV attention and splits in a different order.
381
+ """
382
+
383
+ def __init__(self, n_heads):
384
+ super().__init__()
385
+ self.n_heads = n_heads
386
+
387
+ def forward(self, qkv):
388
+ """
389
+ Apply QKV attention.
390
+ :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
391
+ :return: an [N x (H * C) x T] tensor after attention.
392
+ """
393
+ bs, width, length = qkv.shape
394
+ assert width % (3 * self.n_heads) == 0
395
+ ch = width // (3 * self.n_heads)
396
+ q, k, v = qkv.chunk(3, dim=1)
397
+ scale = 1 / math.sqrt(math.sqrt(ch))
398
+ weight = th.einsum(
399
+ "bct,bcs->bts",
400
+ (q * scale).view(bs * self.n_heads, ch, length),
401
+ (k * scale).view(bs * self.n_heads, ch, length),
402
+ ) # More stable with f16 than dividing afterwards
403
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
404
+ a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
405
+ return a.reshape(bs, -1, length)
406
+
407
+ @staticmethod
408
+ def count_flops(model, _x, y):
409
+ return count_flops_attn(model, _x, y)
410
+
411
+
412
+ class UNetModel(nn.Module):
413
+ """
414
+ The full UNet model with attention and timestep embedding.
415
+ :param in_channels: channels in the input Tensor.
416
+ :param model_channels: base channel count for the model.
417
+ :param out_channels: channels in the output Tensor.
418
+ :param num_res_blocks: number of residual blocks per downsample.
419
+ :param attention_resolutions: a collection of downsample rates at which
420
+ attention will take place. May be a set, list, or tuple.
421
+ For example, if this contains 4, then at 4x downsampling, attention
422
+ will be used.
423
+ :param dropout: the dropout probability.
424
+ :param channel_mult: channel multiplier for each level of the UNet.
425
+ :param conv_resample: if True, use learned convolutions for upsampling and
426
+ downsampling.
427
+ :param dims: determines if the signal is 1D, 2D, or 3D.
428
+ :param num_classes: if specified (as an int), then this model will be
429
+ class-conditional with `num_classes` classes.
430
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
431
+ :param num_heads: the number of attention heads in each attention layer.
432
+ :param num_heads_channels: if specified, ignore num_heads and instead use
433
+ a fixed channel width per attention head.
434
+ :param num_heads_upsample: works with num_heads to set a different number
435
+ of heads for upsampling. Deprecated.
436
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
437
+ :param resblock_updown: use residual blocks for up/downsampling.
438
+ :param use_new_attention_order: use a different attention pattern for potentially
439
+ increased efficiency.
440
+ """
441
+
442
+ def __init__(
443
+ self,
444
+ image_size,
445
+ in_channels,
446
+ model_channels,
447
+ out_channels,
448
+ num_res_blocks,
449
+ attention_resolutions,
450
+ dropout=0,
451
+ channel_mult=(1, 2, 4, 8),
452
+ conv_resample=True,
453
+ dims=2,
454
+ num_classes=None,
455
+ use_checkpoint=False,
456
+ use_fp16=False,
457
+ num_heads=-1,
458
+ num_head_channels=-1,
459
+ num_heads_upsample=-1,
460
+ use_scale_shift_norm=False,
461
+ resblock_updown=False,
462
+ use_new_attention_order=False,
463
+ use_spatial_transformer=False, # custom transformer support
464
+ transformer_depth=1, # custom transformer support
465
+ context_dim=None, # custom transformer support
466
+ n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
467
+ legacy=True,
468
+ disable_self_attentions=None,
469
+ num_attention_blocks=None,
470
+ disable_middle_self_attn=False,
471
+ use_linear_in_transformer=False,
472
+ ):
473
+ super().__init__()
474
+ if use_spatial_transformer:
475
+ assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
476
+
477
+ if context_dim is not None:
478
+ assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
479
+ from omegaconf.listconfig import ListConfig
480
+ if type(context_dim) == ListConfig:
481
+ context_dim = list(context_dim)
482
+
483
+ if num_heads_upsample == -1:
484
+ num_heads_upsample = num_heads
485
+
486
+ if num_heads == -1:
487
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
488
+
489
+ if num_head_channels == -1:
490
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
491
+
492
+ self.image_size = image_size
493
+ self.in_channels = in_channels
494
+ self.model_channels = model_channels
495
+ self.out_channels = out_channels
496
+ if isinstance(num_res_blocks, int):
497
+ self.num_res_blocks = len(channel_mult) * [num_res_blocks]
498
+ else:
499
+ if len(num_res_blocks) != len(channel_mult):
500
+ raise ValueError("provide num_res_blocks either as an int (globally constant) or "
501
+ "as a list/tuple (per-level) with the same length as channel_mult")
502
+ self.num_res_blocks = num_res_blocks
503
+ if disable_self_attentions is not None:
504
+ # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
505
+ assert len(disable_self_attentions) == len(channel_mult)
506
+ if num_attention_blocks is not None:
507
+ assert len(num_attention_blocks) == len(self.num_res_blocks)
508
+ assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
509
+ print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
510
+ f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
511
+ f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
512
+ f"attention will still not be set.")
513
+
514
+ self.attention_resolutions = attention_resolutions
515
+ self.dropout = dropout
516
+ self.channel_mult = channel_mult
517
+ self.conv_resample = conv_resample
518
+ self.num_classes = num_classes
519
+ self.use_checkpoint = use_checkpoint
520
+ self.dtype = th.float16 if use_fp16 else th.float32
521
+ self.num_heads = num_heads
522
+ self.num_head_channels = num_head_channels
523
+ self.num_heads_upsample = num_heads_upsample
524
+ self.predict_codebook_ids = n_embed is not None
525
+
526
+ time_embed_dim = model_channels * 4
527
+ self.time_embed = nn.Sequential(
528
+ linear(model_channels, time_embed_dim),
529
+ nn.SiLU(),
530
+ linear(time_embed_dim, time_embed_dim),
531
+ )
532
+
533
+ if self.num_classes is not None:
534
+ if isinstance(self.num_classes, int):
535
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
536
+ elif self.num_classes == "continuous":
537
+ print("setting up linear c_adm embedding layer")
538
+ self.label_emb = nn.Linear(1, time_embed_dim)
539
+ else:
540
+ raise ValueError()
541
+
542
+ self.input_blocks = nn.ModuleList(
543
+ [
544
+ TimestepEmbedSequential(
545
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
546
+ )
547
+ ]
548
+ )
549
+ self._feature_size = model_channels
550
+ input_block_chans = [model_channels]
551
+ ch = model_channels
552
+ ds = 1
553
+ for level, mult in enumerate(channel_mult):
554
+ for nr in range(self.num_res_blocks[level]):
555
+ layers = [
556
+ ResBlock(
557
+ ch,
558
+ time_embed_dim,
559
+ dropout,
560
+ out_channels=mult * model_channels,
561
+ dims=dims,
562
+ use_checkpoint=use_checkpoint,
563
+ use_scale_shift_norm=use_scale_shift_norm,
564
+ )
565
+ ]
566
+ ch = mult * model_channels
567
+ if ds in attention_resolutions:
568
+ if num_head_channels == -1:
569
+ dim_head = ch // num_heads
570
+ else:
571
+ num_heads = ch // num_head_channels
572
+ dim_head = num_head_channels
573
+ if legacy:
574
+ #num_heads = 1
575
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
576
+ if exists(disable_self_attentions):
577
+ disabled_sa = disable_self_attentions[level]
578
+ else:
579
+ disabled_sa = False
580
+
581
+ if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
582
+ layers.append(
583
+ AttentionBlock(
584
+ ch,
585
+ use_checkpoint=use_checkpoint,
586
+ num_heads=num_heads,
587
+ num_head_channels=dim_head,
588
+ use_new_attention_order=use_new_attention_order,
589
+ ) if not use_spatial_transformer else SpatialTransformer(
590
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
591
+ disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
592
+ use_checkpoint=use_checkpoint
593
+ )
594
+ )
595
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
596
+ self._feature_size += ch
597
+ input_block_chans.append(ch)
598
+ if level != len(channel_mult) - 1:
599
+ out_ch = ch
600
+ self.input_blocks.append(
601
+ TimestepEmbedSequential(
602
+ ResBlock(
603
+ ch,
604
+ time_embed_dim,
605
+ dropout,
606
+ out_channels=out_ch,
607
+ dims=dims,
608
+ use_checkpoint=use_checkpoint,
609
+ use_scale_shift_norm=use_scale_shift_norm,
610
+ down=True,
611
+ )
612
+ if resblock_updown
613
+ else Downsample(
614
+ ch, conv_resample, dims=dims, out_channels=out_ch
615
+ )
616
+ )
617
+ )
618
+ ch = out_ch
619
+ input_block_chans.append(ch)
620
+ ds *= 2
621
+ self._feature_size += ch
622
+
623
+ if num_head_channels == -1:
624
+ dim_head = ch // num_heads
625
+ else:
626
+ num_heads = ch // num_head_channels
627
+ dim_head = num_head_channels
628
+ if legacy:
629
+ #num_heads = 1
630
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
631
+ self.middle_block = TimestepEmbedSequential(
632
+ ResBlock(
633
+ ch,
634
+ time_embed_dim,
635
+ dropout,
636
+ dims=dims,
637
+ use_checkpoint=use_checkpoint,
638
+ use_scale_shift_norm=use_scale_shift_norm,
639
+ ),
640
+ AttentionBlock(
641
+ ch,
642
+ use_checkpoint=use_checkpoint,
643
+ num_heads=num_heads,
644
+ num_head_channels=dim_head,
645
+ use_new_attention_order=use_new_attention_order,
646
+ ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
647
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
648
+ disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
649
+ use_checkpoint=use_checkpoint
650
+ ),
651
+ ResBlock(
652
+ ch,
653
+ time_embed_dim,
654
+ dropout,
655
+ dims=dims,
656
+ use_checkpoint=use_checkpoint,
657
+ use_scale_shift_norm=use_scale_shift_norm,
658
+ ),
659
+ )
660
+ self._feature_size += ch
661
+
662
+ self.output_blocks = nn.ModuleList([])
663
+ for level, mult in list(enumerate(channel_mult))[::-1]:
664
+ for i in range(self.num_res_blocks[level] + 1):
665
+ ich = input_block_chans.pop()
666
+ layers = [
667
+ ResBlock(
668
+ ch + ich,
669
+ time_embed_dim,
670
+ dropout,
671
+ out_channels=model_channels * mult,
672
+ dims=dims,
673
+ use_checkpoint=use_checkpoint,
674
+ use_scale_shift_norm=use_scale_shift_norm,
675
+ )
676
+ ]
677
+ ch = model_channels * mult
678
+ if ds in attention_resolutions:
679
+ if num_head_channels == -1:
680
+ dim_head = ch // num_heads
681
+ else:
682
+ num_heads = ch // num_head_channels
683
+ dim_head = num_head_channels
684
+ if legacy:
685
+ #num_heads = 1
686
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
687
+ if exists(disable_self_attentions):
688
+ disabled_sa = disable_self_attentions[level]
689
+ else:
690
+ disabled_sa = False
691
+
692
+ if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
693
+ layers.append(
694
+ AttentionBlock(
695
+ ch,
696
+ use_checkpoint=use_checkpoint,
697
+ num_heads=num_heads_upsample,
698
+ num_head_channels=dim_head,
699
+ use_new_attention_order=use_new_attention_order,
700
+ ) if not use_spatial_transformer else SpatialTransformer(
701
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
702
+ disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
703
+ use_checkpoint=use_checkpoint
704
+ )
705
+ )
706
+ if level and i == self.num_res_blocks[level]:
707
+ out_ch = ch
708
+ layers.append(
709
+ ResBlock(
710
+ ch,
711
+ time_embed_dim,
712
+ dropout,
713
+ out_channels=out_ch,
714
+ dims=dims,
715
+ use_checkpoint=use_checkpoint,
716
+ use_scale_shift_norm=use_scale_shift_norm,
717
+ up=True,
718
+ )
719
+ if resblock_updown
720
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
721
+ )
722
+ ds //= 2
723
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
724
+ self._feature_size += ch
725
+
726
+ self.out = nn.Sequential(
727
+ normalization(ch),
728
+ nn.SiLU(),
729
+ zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
730
+ )
731
+ if self.predict_codebook_ids:
732
+ self.id_predictor = nn.Sequential(
733
+ normalization(ch),
734
+ conv_nd(dims, model_channels, n_embed, 1),
735
+ #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
736
+ )
737
+
738
+ def convert_to_fp16(self):
739
+ """
740
+ Convert the torso of the model to float16.
741
+ """
742
+ self.input_blocks.apply(convert_module_to_f16)
743
+ self.middle_block.apply(convert_module_to_f16)
744
+ self.output_blocks.apply(convert_module_to_f16)
745
+
746
+ def convert_to_fp32(self):
747
+ """
748
+ Convert the torso of the model to float32.
749
+ """
750
+ self.input_blocks.apply(convert_module_to_f32)
751
+ self.middle_block.apply(convert_module_to_f32)
752
+ self.output_blocks.apply(convert_module_to_f32)
753
+
754
+ def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
755
+ """
756
+ Apply the model to an input batch.
757
+ :param x: an [N x C x ...] Tensor of inputs.
758
+ :param timesteps: a 1-D batch of timesteps.
759
+ :param context: conditioning plugged in via crossattn
760
+ :param y: an [N] Tensor of labels, if class-conditional.
761
+ :return: an [N x C x ...] Tensor of outputs.
762
+ """
763
+ assert (y is not None) == (
764
+ self.num_classes is not None
765
+ ), "must specify y if and only if the model is class-conditional"
766
+ hs = []
767
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
768
+ emb = self.time_embed(t_emb)
769
+
770
+ if self.num_classes is not None:
771
+ assert y.shape[0] == x.shape[0]
772
+ emb = emb + self.label_emb(y)
773
+
774
+ h = x.type(self.dtype)
775
+ for module in self.input_blocks:
776
+ h = module(h, emb, context)
777
+ hs.append(h)
778
+ h = self.middle_block(h, emb, context)
779
+ for module in self.output_blocks:
780
+ h = th.cat([h, hs.pop()], dim=1)
781
+ h = module(h, emb, context)
782
+ h = h.type(x.dtype)
783
+ if self.predict_codebook_ids:
784
+ return self.id_predictor(h)
785
+ else:
786
+ return self.out(h)
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/distributions/__init__.py ADDED
File without changes
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/distributions/distributions.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ class AbstractDistribution:
6
+ def sample(self):
7
+ raise NotImplementedError()
8
+
9
+ def mode(self):
10
+ raise NotImplementedError()
11
+
12
+
13
+ class DiracDistribution(AbstractDistribution):
14
+ def __init__(self, value):
15
+ self.value = value
16
+
17
+ def sample(self):
18
+ return self.value
19
+
20
+ def mode(self):
21
+ return self.value
22
+
23
+
24
+ class DiagonalGaussianDistribution(object):
25
+ def __init__(self, parameters, deterministic=False):
26
+ self.parameters = parameters
27
+ self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
28
+ self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
29
+ self.deterministic = deterministic
30
+ self.std = torch.exp(0.5 * self.logvar)
31
+ self.var = torch.exp(self.logvar)
32
+ if self.deterministic:
33
+ self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
34
+
35
+ def sample(self):
36
+ x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
37
+ return x
38
+
39
+ def kl(self, other=None):
40
+ if self.deterministic:
41
+ return torch.Tensor([0.])
42
+ else:
43
+ if other is None:
44
+ return 0.5 * torch.sum(torch.pow(self.mean, 2)
45
+ + self.var - 1.0 - self.logvar,
46
+ dim=[1, 2, 3])
47
+ else:
48
+ return 0.5 * torch.sum(
49
+ torch.pow(self.mean - other.mean, 2) / other.var
50
+ + self.var / other.var - 1.0 - self.logvar + other.logvar,
51
+ dim=[1, 2, 3])
52
+
53
+ def nll(self, sample, dims=[1,2,3]):
54
+ if self.deterministic:
55
+ return torch.Tensor([0.])
56
+ logtwopi = np.log(2.0 * np.pi)
57
+ return 0.5 * torch.sum(
58
+ logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
59
+ dim=dims)
60
+
61
+ def mode(self):
62
+ return self.mean
63
+
64
+
65
+ def normal_kl(mean1, logvar1, mean2, logvar2):
66
+ """
67
+ source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
68
+ Compute the KL divergence between two gaussians.
69
+ Shapes are automatically broadcasted, so batches can be compared to
70
+ scalars, among other use cases.
71
+ """
72
+ tensor = None
73
+ for obj in (mean1, logvar1, mean2, logvar2):
74
+ if isinstance(obj, torch.Tensor):
75
+ tensor = obj
76
+ break
77
+ assert tensor is not None, "at least one argument must be a Tensor"
78
+
79
+ # Force variances to be Tensors. Broadcasting helps convert scalars to
80
+ # Tensors, but it does not work for torch.exp().
81
+ logvar1, logvar2 = [
82
+ x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
83
+ for x in (logvar1, logvar2)
84
+ ]
85
+
86
+ return 0.5 * (
87
+ -1.0
88
+ + logvar2
89
+ - logvar1
90
+ + torch.exp(logvar1 - logvar2)
91
+ + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
92
+ )
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/base_model.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class BaseModel(torch.nn.Module):
5
+ def load(self, path):
6
+ """Load model from file.
7
+
8
+ Args:
9
+ path (str): file path
10
+ """
11
+ parameters = torch.load(path, map_location=torch.device('cpu'))
12
+
13
+ if "optimizer" in parameters:
14
+ parameters = parameters["model"]
15
+
16
+ self.load_state_dict(parameters)
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/blocks.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .vit import (
5
+ _make_pretrained_vitb_rn50_384,
6
+ _make_pretrained_vitl16_384,
7
+ _make_pretrained_vitb16_384,
8
+ forward_vit,
9
+ )
10
+
11
+ def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
12
+ if backbone == "vitl16_384":
13
+ pretrained = _make_pretrained_vitl16_384(
14
+ use_pretrained, hooks=hooks, use_readout=use_readout
15
+ )
16
+ scratch = _make_scratch(
17
+ [256, 512, 1024, 1024], features, groups=groups, expand=expand
18
+ ) # ViT-L/16 - 85.0% Top1 (backbone)
19
+ elif backbone == "vitb_rn50_384":
20
+ pretrained = _make_pretrained_vitb_rn50_384(
21
+ use_pretrained,
22
+ hooks=hooks,
23
+ use_vit_only=use_vit_only,
24
+ use_readout=use_readout,
25
+ )
26
+ scratch = _make_scratch(
27
+ [256, 512, 768, 768], features, groups=groups, expand=expand
28
+ ) # ViT-H/16 - 85.0% Top1 (backbone)
29
+ elif backbone == "vitb16_384":
30
+ pretrained = _make_pretrained_vitb16_384(
31
+ use_pretrained, hooks=hooks, use_readout=use_readout
32
+ )
33
+ scratch = _make_scratch(
34
+ [96, 192, 384, 768], features, groups=groups, expand=expand
35
+ ) # ViT-B/16 - 84.6% Top1 (backbone)
36
+ elif backbone == "resnext101_wsl":
37
+ pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
38
+ scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
39
+ elif backbone == "efficientnet_lite3":
40
+ pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
41
+ scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
42
+ else:
43
+ print(f"Backbone '{backbone}' not implemented")
44
+ assert False
45
+
46
+ return pretrained, scratch
47
+
48
+
49
+ def _make_scratch(in_shape, out_shape, groups=1, expand=False):
50
+ scratch = nn.Module()
51
+
52
+ out_shape1 = out_shape
53
+ out_shape2 = out_shape
54
+ out_shape3 = out_shape
55
+ out_shape4 = out_shape
56
+ if expand==True:
57
+ out_shape1 = out_shape
58
+ out_shape2 = out_shape*2
59
+ out_shape3 = out_shape*4
60
+ out_shape4 = out_shape*8
61
+
62
+ scratch.layer1_rn = nn.Conv2d(
63
+ in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
64
+ )
65
+ scratch.layer2_rn = nn.Conv2d(
66
+ in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
67
+ )
68
+ scratch.layer3_rn = nn.Conv2d(
69
+ in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
70
+ )
71
+ scratch.layer4_rn = nn.Conv2d(
72
+ in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
73
+ )
74
+
75
+ return scratch
76
+
77
+
78
+ def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
79
+ efficientnet = torch.hub.load(
80
+ "rwightman/gen-efficientnet-pytorch",
81
+ "tf_efficientnet_lite3",
82
+ pretrained=use_pretrained,
83
+ exportable=exportable
84
+ )
85
+ return _make_efficientnet_backbone(efficientnet)
86
+
87
+
88
+ def _make_efficientnet_backbone(effnet):
89
+ pretrained = nn.Module()
90
+
91
+ pretrained.layer1 = nn.Sequential(
92
+ effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
93
+ )
94
+ pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
95
+ pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
96
+ pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
97
+
98
+ return pretrained
99
+
100
+
101
+ def _make_resnet_backbone(resnet):
102
+ pretrained = nn.Module()
103
+ pretrained.layer1 = nn.Sequential(
104
+ resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
105
+ )
106
+
107
+ pretrained.layer2 = resnet.layer2
108
+ pretrained.layer3 = resnet.layer3
109
+ pretrained.layer4 = resnet.layer4
110
+
111
+ return pretrained
112
+
113
+
114
+ def _make_pretrained_resnext101_wsl(use_pretrained):
115
+ resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
116
+ return _make_resnet_backbone(resnet)
117
+
118
+
119
+
120
+ class Interpolate(nn.Module):
121
+ """Interpolation module.
122
+ """
123
+
124
+ def __init__(self, scale_factor, mode, align_corners=False):
125
+ """Init.
126
+
127
+ Args:
128
+ scale_factor (float): scaling
129
+ mode (str): interpolation mode
130
+ """
131
+ super(Interpolate, self).__init__()
132
+
133
+ self.interp = nn.functional.interpolate
134
+ self.scale_factor = scale_factor
135
+ self.mode = mode
136
+ self.align_corners = align_corners
137
+
138
+ def forward(self, x):
139
+ """Forward pass.
140
+
141
+ Args:
142
+ x (tensor): input
143
+
144
+ Returns:
145
+ tensor: interpolated data
146
+ """
147
+
148
+ x = self.interp(
149
+ x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
150
+ )
151
+
152
+ return x
153
+
154
+
155
+ class ResidualConvUnit(nn.Module):
156
+ """Residual convolution module.
157
+ """
158
+
159
+ def __init__(self, features):
160
+ """Init.
161
+
162
+ Args:
163
+ features (int): number of features
164
+ """
165
+ super().__init__()
166
+
167
+ self.conv1 = nn.Conv2d(
168
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
169
+ )
170
+
171
+ self.conv2 = nn.Conv2d(
172
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
173
+ )
174
+
175
+ self.relu = nn.ReLU(inplace=True)
176
+
177
+ def forward(self, x):
178
+ """Forward pass.
179
+
180
+ Args:
181
+ x (tensor): input
182
+
183
+ Returns:
184
+ tensor: output
185
+ """
186
+ out = self.relu(x)
187
+ out = self.conv1(out)
188
+ out = self.relu(out)
189
+ out = self.conv2(out)
190
+
191
+ return out + x
192
+
193
+
194
+ class FeatureFusionBlock(nn.Module):
195
+ """Feature fusion block.
196
+ """
197
+
198
+ def __init__(self, features):
199
+ """Init.
200
+
201
+ Args:
202
+ features (int): number of features
203
+ """
204
+ super(FeatureFusionBlock, self).__init__()
205
+
206
+ self.resConfUnit1 = ResidualConvUnit(features)
207
+ self.resConfUnit2 = ResidualConvUnit(features)
208
+
209
+ def forward(self, *xs):
210
+ """Forward pass.
211
+
212
+ Returns:
213
+ tensor: output
214
+ """
215
+ output = xs[0]
216
+
217
+ if len(xs) == 2:
218
+ output += self.resConfUnit1(xs[1])
219
+
220
+ output = self.resConfUnit2(output)
221
+
222
+ output = nn.functional.interpolate(
223
+ output, scale_factor=2, mode="bilinear", align_corners=True
224
+ )
225
+
226
+ return output
227
+
228
+
229
+
230
+
231
+ class ResidualConvUnit_custom(nn.Module):
232
+ """Residual convolution module.
233
+ """
234
+
235
+ def __init__(self, features, activation, bn):
236
+ """Init.
237
+
238
+ Args:
239
+ features (int): number of features
240
+ """
241
+ super().__init__()
242
+
243
+ self.bn = bn
244
+
245
+ self.groups=1
246
+
247
+ self.conv1 = nn.Conv2d(
248
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
249
+ )
250
+
251
+ self.conv2 = nn.Conv2d(
252
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
253
+ )
254
+
255
+ if self.bn==True:
256
+ self.bn1 = nn.BatchNorm2d(features)
257
+ self.bn2 = nn.BatchNorm2d(features)
258
+
259
+ self.activation = activation
260
+
261
+ self.skip_add = nn.quantized.FloatFunctional()
262
+
263
+ def forward(self, x):
264
+ """Forward pass.
265
+
266
+ Args:
267
+ x (tensor): input
268
+
269
+ Returns:
270
+ tensor: output
271
+ """
272
+
273
+ out = self.activation(x)
274
+ out = self.conv1(out)
275
+ if self.bn==True:
276
+ out = self.bn1(out)
277
+
278
+ out = self.activation(out)
279
+ out = self.conv2(out)
280
+ if self.bn==True:
281
+ out = self.bn2(out)
282
+
283
+ if self.groups > 1:
284
+ out = self.conv_merge(out)
285
+
286
+ return self.skip_add.add(out, x)
287
+
288
+ # return out + x
289
+
290
+
291
+ class FeatureFusionBlock_custom(nn.Module):
292
+ """Feature fusion block.
293
+ """
294
+
295
+ def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
296
+ """Init.
297
+
298
+ Args:
299
+ features (int): number of features
300
+ """
301
+ super(FeatureFusionBlock_custom, self).__init__()
302
+
303
+ self.deconv = deconv
304
+ self.align_corners = align_corners
305
+
306
+ self.groups=1
307
+
308
+ self.expand = expand
309
+ out_features = features
310
+ if self.expand==True:
311
+ out_features = features//2
312
+
313
+ self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
314
+
315
+ self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
316
+ self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
317
+
318
+ self.skip_add = nn.quantized.FloatFunctional()
319
+
320
+ def forward(self, *xs):
321
+ """Forward pass.
322
+
323
+ Returns:
324
+ tensor: output
325
+ """
326
+ output = xs[0]
327
+
328
+ if len(xs) == 2:
329
+ res = self.resConfUnit1(xs[1])
330
+ output = self.skip_add.add(output, res)
331
+ # output += res
332
+
333
+ output = self.resConfUnit2(output)
334
+
335
+ output = nn.functional.interpolate(
336
+ output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
337
+ )
338
+
339
+ output = self.out_conv(output)
340
+
341
+ return output
342
+
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/dpt_depth.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ from .base_model import BaseModel
6
+ from .blocks import (
7
+ FeatureFusionBlock,
8
+ FeatureFusionBlock_custom,
9
+ Interpolate,
10
+ _make_encoder,
11
+ forward_vit,
12
+ )
13
+
14
+
15
+ def _make_fusion_block(features, use_bn):
16
+ return FeatureFusionBlock_custom(
17
+ features,
18
+ nn.ReLU(False),
19
+ deconv=False,
20
+ bn=use_bn,
21
+ expand=False,
22
+ align_corners=True,
23
+ )
24
+
25
+
26
+ class DPT(BaseModel):
27
+ def __init__(
28
+ self,
29
+ head,
30
+ features=256,
31
+ backbone="vitb_rn50_384",
32
+ readout="project",
33
+ channels_last=False,
34
+ use_bn=False,
35
+ ):
36
+
37
+ super(DPT, self).__init__()
38
+
39
+ self.channels_last = channels_last
40
+
41
+ hooks = {
42
+ "vitb_rn50_384": [0, 1, 8, 11],
43
+ "vitb16_384": [2, 5, 8, 11],
44
+ "vitl16_384": [5, 11, 17, 23],
45
+ }
46
+
47
+ # Instantiate backbone and reassemble blocks
48
+ self.pretrained, self.scratch = _make_encoder(
49
+ backbone,
50
+ features,
51
+ False, # Set to true of you want to train from scratch, uses ImageNet weights
52
+ groups=1,
53
+ expand=False,
54
+ exportable=False,
55
+ hooks=hooks[backbone],
56
+ use_readout=readout,
57
+ )
58
+
59
+ self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
60
+ self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
61
+ self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
62
+ self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
63
+
64
+ self.scratch.output_conv = head
65
+
66
+
67
+ def forward(self, x):
68
+ if self.channels_last == True:
69
+ x.contiguous(memory_format=torch.channels_last)
70
+
71
+ layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
72
+
73
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
74
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
75
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
76
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
77
+
78
+ path_4 = self.scratch.refinenet4(layer_4_rn)
79
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
80
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
81
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
82
+
83
+ out = self.scratch.output_conv(path_1)
84
+
85
+ return out
86
+
87
+
88
+ class DPTDepthModel(DPT):
89
+ def __init__(self, path=None, non_negative=True, **kwargs):
90
+ features = kwargs["features"] if "features" in kwargs else 256
91
+
92
+ head = nn.Sequential(
93
+ nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
94
+ Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
95
+ nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
96
+ nn.ReLU(True),
97
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
98
+ nn.ReLU(True) if non_negative else nn.Identity(),
99
+ nn.Identity(),
100
+ )
101
+
102
+ super().__init__(head, **kwargs)
103
+
104
+ if path is not None:
105
+ self.load(path)
106
+
107
+ def forward(self, x):
108
+ return super().forward(x).squeeze(dim=1)
109
+
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
+ This file contains code that is adapted from
3
+ https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from .base_model import BaseModel
9
+ from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
10
+
11
+
12
+ class MidasNet(BaseModel):
13
+ """Network for monocular depth estimation.
14
+ """
15
+
16
+ def __init__(self, path=None, features=256, non_negative=True):
17
+ """Init.
18
+
19
+ Args:
20
+ path (str, optional): Path to saved model. Defaults to None.
21
+ features (int, optional): Number of features. Defaults to 256.
22
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
23
+ """
24
+ print("Loading weights: ", path)
25
+
26
+ super(MidasNet, self).__init__()
27
+
28
+ use_pretrained = False if path is None else True
29
+
30
+ self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
31
+
32
+ self.scratch.refinenet4 = FeatureFusionBlock(features)
33
+ self.scratch.refinenet3 = FeatureFusionBlock(features)
34
+ self.scratch.refinenet2 = FeatureFusionBlock(features)
35
+ self.scratch.refinenet1 = FeatureFusionBlock(features)
36
+
37
+ self.scratch.output_conv = nn.Sequential(
38
+ nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
39
+ Interpolate(scale_factor=2, mode="bilinear"),
40
+ nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
41
+ nn.ReLU(True),
42
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
43
+ nn.ReLU(True) if non_negative else nn.Identity(),
44
+ )
45
+
46
+ if path:
47
+ self.load(path)
48
+
49
+ def forward(self, x):
50
+ """Forward pass.
51
+
52
+ Args:
53
+ x (tensor): input data (image)
54
+
55
+ Returns:
56
+ tensor: depth
57
+ """
58
+
59
+ layer_1 = self.pretrained.layer1(x)
60
+ layer_2 = self.pretrained.layer2(layer_1)
61
+ layer_3 = self.pretrained.layer3(layer_2)
62
+ layer_4 = self.pretrained.layer4(layer_3)
63
+
64
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
65
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
66
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
67
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
68
+
69
+ path_4 = self.scratch.refinenet4(layer_4_rn)
70
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
71
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
72
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
73
+
74
+ out = self.scratch.output_conv(path_1)
75
+
76
+ return torch.squeeze(out, dim=1)
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net_custom.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
+ This file contains code that is adapted from
3
+ https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from .base_model import BaseModel
9
+ from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
10
+
11
+
12
+ class MidasNet_small(BaseModel):
13
+ """Network for monocular depth estimation.
14
+ """
15
+
16
+ def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
17
+ blocks={'expand': True}):
18
+ """Init.
19
+
20
+ Args:
21
+ path (str, optional): Path to saved model. Defaults to None.
22
+ features (int, optional): Number of features. Defaults to 256.
23
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
24
+ """
25
+ print("Loading weights: ", path)
26
+
27
+ super(MidasNet_small, self).__init__()
28
+
29
+ use_pretrained = False if path else True
30
+
31
+ self.channels_last = channels_last
32
+ self.blocks = blocks
33
+ self.backbone = backbone
34
+
35
+ self.groups = 1
36
+
37
+ features1=features
38
+ features2=features
39
+ features3=features
40
+ features4=features
41
+ self.expand = False
42
+ if "expand" in self.blocks and self.blocks['expand'] == True:
43
+ self.expand = True
44
+ features1=features
45
+ features2=features*2
46
+ features3=features*4
47
+ features4=features*8
48
+
49
+ self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
50
+
51
+ self.scratch.activation = nn.ReLU(False)
52
+
53
+ self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
54
+ self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
55
+ self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
56
+ self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
57
+
58
+
59
+ self.scratch.output_conv = nn.Sequential(
60
+ nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
61
+ Interpolate(scale_factor=2, mode="bilinear"),
62
+ nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
63
+ self.scratch.activation,
64
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
65
+ nn.ReLU(True) if non_negative else nn.Identity(),
66
+ nn.Identity(),
67
+ )
68
+
69
+ if path:
70
+ self.load(path)
71
+
72
+
73
+ def forward(self, x):
74
+ """Forward pass.
75
+
76
+ Args:
77
+ x (tensor): input data (image)
78
+
79
+ Returns:
80
+ tensor: depth
81
+ """
82
+ if self.channels_last==True:
83
+ print("self.channels_last = ", self.channels_last)
84
+ x.contiguous(memory_format=torch.channels_last)
85
+
86
+
87
+ layer_1 = self.pretrained.layer1(x)
88
+ layer_2 = self.pretrained.layer2(layer_1)
89
+ layer_3 = self.pretrained.layer3(layer_2)
90
+ layer_4 = self.pretrained.layer4(layer_3)
91
+
92
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
93
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
94
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
95
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
96
+
97
+
98
+ path_4 = self.scratch.refinenet4(layer_4_rn)
99
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
100
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
101
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
102
+
103
+ out = self.scratch.output_conv(path_1)
104
+
105
+ return torch.squeeze(out, dim=1)
106
+
107
+
108
+
109
+ def fuse_model(m):
110
+ prev_previous_type = nn.Identity()
111
+ prev_previous_name = ''
112
+ previous_type = nn.Identity()
113
+ previous_name = ''
114
+ for name, module in m.named_modules():
115
+ if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
116
+ # print("FUSED ", prev_previous_name, previous_name, name)
117
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
118
+ elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
119
+ # print("FUSED ", prev_previous_name, previous_name)
120
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
121
+ # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
122
+ # print("FUSED ", previous_name, name)
123
+ # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
124
+
125
+ prev_previous_type = previous_type
126
+ prev_previous_name = previous_name
127
+ previous_type = type(module)
128
+ previous_name = name
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/transforms.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import math
4
+
5
+
6
+ def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
7
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
8
+
9
+ Args:
10
+ sample (dict): sample
11
+ size (tuple): image size
12
+
13
+ Returns:
14
+ tuple: new size
15
+ """
16
+ shape = list(sample["disparity"].shape)
17
+
18
+ if shape[0] >= size[0] and shape[1] >= size[1]:
19
+ return sample
20
+
21
+ scale = [0, 0]
22
+ scale[0] = size[0] / shape[0]
23
+ scale[1] = size[1] / shape[1]
24
+
25
+ scale = max(scale)
26
+
27
+ shape[0] = math.ceil(scale * shape[0])
28
+ shape[1] = math.ceil(scale * shape[1])
29
+
30
+ # resize
31
+ sample["image"] = cv2.resize(
32
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
33
+ )
34
+
35
+ sample["disparity"] = cv2.resize(
36
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
37
+ )
38
+ sample["mask"] = cv2.resize(
39
+ sample["mask"].astype(np.float32),
40
+ tuple(shape[::-1]),
41
+ interpolation=cv2.INTER_NEAREST,
42
+ )
43
+ sample["mask"] = sample["mask"].astype(bool)
44
+
45
+ return tuple(shape)
46
+
47
+
48
+ class Resize(object):
49
+ """Resize sample to given size (width, height).
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ width,
55
+ height,
56
+ resize_target=True,
57
+ keep_aspect_ratio=False,
58
+ ensure_multiple_of=1,
59
+ resize_method="lower_bound",
60
+ image_interpolation_method=cv2.INTER_AREA,
61
+ ):
62
+ """Init.
63
+
64
+ Args:
65
+ width (int): desired output width
66
+ height (int): desired output height
67
+ resize_target (bool, optional):
68
+ True: Resize the full sample (image, mask, target).
69
+ False: Resize image only.
70
+ Defaults to True.
71
+ keep_aspect_ratio (bool, optional):
72
+ True: Keep the aspect ratio of the input sample.
73
+ Output sample might not have the given width and height, and
74
+ resize behaviour depends on the parameter 'resize_method'.
75
+ Defaults to False.
76
+ ensure_multiple_of (int, optional):
77
+ Output width and height is constrained to be multiple of this parameter.
78
+ Defaults to 1.
79
+ resize_method (str, optional):
80
+ "lower_bound": Output will be at least as large as the given size.
81
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
82
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
83
+ Defaults to "lower_bound".
84
+ """
85
+ self.__width = width
86
+ self.__height = height
87
+
88
+ self.__resize_target = resize_target
89
+ self.__keep_aspect_ratio = keep_aspect_ratio
90
+ self.__multiple_of = ensure_multiple_of
91
+ self.__resize_method = resize_method
92
+ self.__image_interpolation_method = image_interpolation_method
93
+
94
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
95
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
96
+
97
+ if max_val is not None and y > max_val:
98
+ y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
99
+
100
+ if y < min_val:
101
+ y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
102
+
103
+ return y
104
+
105
+ def get_size(self, width, height):
106
+ # determine new height and width
107
+ scale_height = self.__height / height
108
+ scale_width = self.__width / width
109
+
110
+ if self.__keep_aspect_ratio:
111
+ if self.__resize_method == "lower_bound":
112
+ # scale such that output size is lower bound
113
+ if scale_width > scale_height:
114
+ # fit width
115
+ scale_height = scale_width
116
+ else:
117
+ # fit height
118
+ scale_width = scale_height
119
+ elif self.__resize_method == "upper_bound":
120
+ # scale such that output size is upper bound
121
+ if scale_width < scale_height:
122
+ # fit width
123
+ scale_height = scale_width
124
+ else:
125
+ # fit height
126
+ scale_width = scale_height
127
+ elif self.__resize_method == "minimal":
128
+ # scale as least as possbile
129
+ if abs(1 - scale_width) < abs(1 - scale_height):
130
+ # fit width
131
+ scale_height = scale_width
132
+ else:
133
+ # fit height
134
+ scale_width = scale_height
135
+ else:
136
+ raise ValueError(
137
+ f"resize_method {self.__resize_method} not implemented"
138
+ )
139
+
140
+ if self.__resize_method == "lower_bound":
141
+ new_height = self.constrain_to_multiple_of(
142
+ scale_height * height, min_val=self.__height
143
+ )
144
+ new_width = self.constrain_to_multiple_of(
145
+ scale_width * width, min_val=self.__width
146
+ )
147
+ elif self.__resize_method == "upper_bound":
148
+ new_height = self.constrain_to_multiple_of(
149
+ scale_height * height, max_val=self.__height
150
+ )
151
+ new_width = self.constrain_to_multiple_of(
152
+ scale_width * width, max_val=self.__width
153
+ )
154
+ elif self.__resize_method == "minimal":
155
+ new_height = self.constrain_to_multiple_of(scale_height * height)
156
+ new_width = self.constrain_to_multiple_of(scale_width * width)
157
+ else:
158
+ raise ValueError(f"resize_method {self.__resize_method} not implemented")
159
+
160
+ return (new_width, new_height)
161
+
162
+ def __call__(self, sample):
163
+ width, height = self.get_size(
164
+ sample["image"].shape[1], sample["image"].shape[0]
165
+ )
166
+
167
+ # resize sample
168
+ sample["image"] = cv2.resize(
169
+ sample["image"],
170
+ (width, height),
171
+ interpolation=self.__image_interpolation_method,
172
+ )
173
+
174
+ if self.__resize_target:
175
+ if "disparity" in sample:
176
+ sample["disparity"] = cv2.resize(
177
+ sample["disparity"],
178
+ (width, height),
179
+ interpolation=cv2.INTER_NEAREST,
180
+ )
181
+
182
+ if "depth" in sample:
183
+ sample["depth"] = cv2.resize(
184
+ sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
185
+ )
186
+
187
+ sample["mask"] = cv2.resize(
188
+ sample["mask"].astype(np.float32),
189
+ (width, height),
190
+ interpolation=cv2.INTER_NEAREST,
191
+ )
192
+ sample["mask"] = sample["mask"].astype(bool)
193
+
194
+ return sample
195
+
196
+
197
+ class NormalizeImage(object):
198
+ """Normlize image by given mean and std.
199
+ """
200
+
201
+ def __init__(self, mean, std):
202
+ self.__mean = mean
203
+ self.__std = std
204
+
205
+ def __call__(self, sample):
206
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
207
+
208
+ return sample
209
+
210
+
211
+ class PrepareForNet(object):
212
+ """Prepare sample for usage as network input.
213
+ """
214
+
215
+ def __init__(self):
216
+ pass
217
+
218
+ def __call__(self, sample):
219
+ image = np.transpose(sample["image"], (2, 0, 1))
220
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
221
+
222
+ if "mask" in sample:
223
+ sample["mask"] = sample["mask"].astype(np.float32)
224
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
225
+
226
+ if "disparity" in sample:
227
+ disparity = sample["disparity"].astype(np.float32)
228
+ sample["disparity"] = np.ascontiguousarray(disparity)
229
+
230
+ if "depth" in sample:
231
+ depth = sample["depth"].astype(np.float32)
232
+ sample["depth"] = np.ascontiguousarray(depth)
233
+
234
+ return sample
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/vit.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import timm
4
+ import types
5
+ import math
6
+ import torch.nn.functional as F
7
+
8
+
9
+ class Slice(nn.Module):
10
+ def __init__(self, start_index=1):
11
+ super(Slice, self).__init__()
12
+ self.start_index = start_index
13
+
14
+ def forward(self, x):
15
+ return x[:, self.start_index :]
16
+
17
+
18
+ class AddReadout(nn.Module):
19
+ def __init__(self, start_index=1):
20
+ super(AddReadout, self).__init__()
21
+ self.start_index = start_index
22
+
23
+ def forward(self, x):
24
+ if self.start_index == 2:
25
+ readout = (x[:, 0] + x[:, 1]) / 2
26
+ else:
27
+ readout = x[:, 0]
28
+ return x[:, self.start_index :] + readout.unsqueeze(1)
29
+
30
+
31
+ class ProjectReadout(nn.Module):
32
+ def __init__(self, in_features, start_index=1):
33
+ super(ProjectReadout, self).__init__()
34
+ self.start_index = start_index
35
+
36
+ self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
37
+
38
+ def forward(self, x):
39
+ readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
40
+ features = torch.cat((x[:, self.start_index :], readout), -1)
41
+
42
+ return self.project(features)
43
+
44
+
45
+ class Transpose(nn.Module):
46
+ def __init__(self, dim0, dim1):
47
+ super(Transpose, self).__init__()
48
+ self.dim0 = dim0
49
+ self.dim1 = dim1
50
+
51
+ def forward(self, x):
52
+ x = x.transpose(self.dim0, self.dim1)
53
+ return x
54
+
55
+
56
+ def forward_vit(pretrained, x):
57
+ b, c, h, w = x.shape
58
+
59
+ glob = pretrained.model.forward_flex(x)
60
+
61
+ layer_1 = pretrained.activations["1"]
62
+ layer_2 = pretrained.activations["2"]
63
+ layer_3 = pretrained.activations["3"]
64
+ layer_4 = pretrained.activations["4"]
65
+
66
+ layer_1 = pretrained.act_postprocess1[0:2](layer_1)
67
+ layer_2 = pretrained.act_postprocess2[0:2](layer_2)
68
+ layer_3 = pretrained.act_postprocess3[0:2](layer_3)
69
+ layer_4 = pretrained.act_postprocess4[0:2](layer_4)
70
+
71
+ unflatten = nn.Sequential(
72
+ nn.Unflatten(
73
+ 2,
74
+ torch.Size(
75
+ [
76
+ h // pretrained.model.patch_size[1],
77
+ w // pretrained.model.patch_size[0],
78
+ ]
79
+ ),
80
+ )
81
+ )
82
+
83
+ if layer_1.ndim == 3:
84
+ layer_1 = unflatten(layer_1)
85
+ if layer_2.ndim == 3:
86
+ layer_2 = unflatten(layer_2)
87
+ if layer_3.ndim == 3:
88
+ layer_3 = unflatten(layer_3)
89
+ if layer_4.ndim == 3:
90
+ layer_4 = unflatten(layer_4)
91
+
92
+ layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
93
+ layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
94
+ layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
95
+ layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
96
+
97
+ return layer_1, layer_2, layer_3, layer_4
98
+
99
+
100
+ def _resize_pos_embed(self, posemb, gs_h, gs_w):
101
+ posemb_tok, posemb_grid = (
102
+ posemb[:, : self.start_index],
103
+ posemb[0, self.start_index :],
104
+ )
105
+
106
+ gs_old = int(math.sqrt(len(posemb_grid)))
107
+
108
+ posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
109
+ posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
110
+ posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
111
+
112
+ posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
113
+
114
+ return posemb
115
+
116
+
117
+ def forward_flex(self, x):
118
+ b, c, h, w = x.shape
119
+
120
+ pos_embed = self._resize_pos_embed(
121
+ self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
122
+ )
123
+
124
+ B = x.shape[0]
125
+
126
+ if hasattr(self.patch_embed, "backbone"):
127
+ x = self.patch_embed.backbone(x)
128
+ if isinstance(x, (list, tuple)):
129
+ x = x[-1] # last feature if backbone outputs list/tuple of features
130
+
131
+ x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
132
+
133
+ if getattr(self, "dist_token", None) is not None:
134
+ cls_tokens = self.cls_token.expand(
135
+ B, -1, -1
136
+ ) # stole cls_tokens impl from Phil Wang, thanks
137
+ dist_token = self.dist_token.expand(B, -1, -1)
138
+ x = torch.cat((cls_tokens, dist_token, x), dim=1)
139
+ else:
140
+ cls_tokens = self.cls_token.expand(
141
+ B, -1, -1
142
+ ) # stole cls_tokens impl from Phil Wang, thanks
143
+ x = torch.cat((cls_tokens, x), dim=1)
144
+
145
+ x = x + pos_embed
146
+ x = self.pos_drop(x)
147
+
148
+ for blk in self.blocks:
149
+ x = blk(x)
150
+
151
+ x = self.norm(x)
152
+
153
+ return x
154
+
155
+
156
+ activations = {}
157
+
158
+
159
+ def get_activation(name):
160
+ def hook(model, input, output):
161
+ activations[name] = output
162
+
163
+ return hook
164
+
165
+
166
+ def get_readout_oper(vit_features, features, use_readout, start_index=1):
167
+ if use_readout == "ignore":
168
+ readout_oper = [Slice(start_index)] * len(features)
169
+ elif use_readout == "add":
170
+ readout_oper = [AddReadout(start_index)] * len(features)
171
+ elif use_readout == "project":
172
+ readout_oper = [
173
+ ProjectReadout(vit_features, start_index) for out_feat in features
174
+ ]
175
+ else:
176
+ assert (
177
+ False
178
+ ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
179
+
180
+ return readout_oper
181
+
182
+
183
+ def _make_vit_b16_backbone(
184
+ model,
185
+ features=[96, 192, 384, 768],
186
+ size=[384, 384],
187
+ hooks=[2, 5, 8, 11],
188
+ vit_features=768,
189
+ use_readout="ignore",
190
+ start_index=1,
191
+ ):
192
+ pretrained = nn.Module()
193
+
194
+ pretrained.model = model
195
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
196
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
197
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
198
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
199
+
200
+ pretrained.activations = activations
201
+
202
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
203
+
204
+ # 32, 48, 136, 384
205
+ pretrained.act_postprocess1 = nn.Sequential(
206
+ readout_oper[0],
207
+ Transpose(1, 2),
208
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
209
+ nn.Conv2d(
210
+ in_channels=vit_features,
211
+ out_channels=features[0],
212
+ kernel_size=1,
213
+ stride=1,
214
+ padding=0,
215
+ ),
216
+ nn.ConvTranspose2d(
217
+ in_channels=features[0],
218
+ out_channels=features[0],
219
+ kernel_size=4,
220
+ stride=4,
221
+ padding=0,
222
+ bias=True,
223
+ dilation=1,
224
+ groups=1,
225
+ ),
226
+ )
227
+
228
+ pretrained.act_postprocess2 = nn.Sequential(
229
+ readout_oper[1],
230
+ Transpose(1, 2),
231
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
232
+ nn.Conv2d(
233
+ in_channels=vit_features,
234
+ out_channels=features[1],
235
+ kernel_size=1,
236
+ stride=1,
237
+ padding=0,
238
+ ),
239
+ nn.ConvTranspose2d(
240
+ in_channels=features[1],
241
+ out_channels=features[1],
242
+ kernel_size=2,
243
+ stride=2,
244
+ padding=0,
245
+ bias=True,
246
+ dilation=1,
247
+ groups=1,
248
+ ),
249
+ )
250
+
251
+ pretrained.act_postprocess3 = nn.Sequential(
252
+ readout_oper[2],
253
+ Transpose(1, 2),
254
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
255
+ nn.Conv2d(
256
+ in_channels=vit_features,
257
+ out_channels=features[2],
258
+ kernel_size=1,
259
+ stride=1,
260
+ padding=0,
261
+ ),
262
+ )
263
+
264
+ pretrained.act_postprocess4 = nn.Sequential(
265
+ readout_oper[3],
266
+ Transpose(1, 2),
267
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
268
+ nn.Conv2d(
269
+ in_channels=vit_features,
270
+ out_channels=features[3],
271
+ kernel_size=1,
272
+ stride=1,
273
+ padding=0,
274
+ ),
275
+ nn.Conv2d(
276
+ in_channels=features[3],
277
+ out_channels=features[3],
278
+ kernel_size=3,
279
+ stride=2,
280
+ padding=1,
281
+ ),
282
+ )
283
+
284
+ pretrained.model.start_index = start_index
285
+ pretrained.model.patch_size = [16, 16]
286
+
287
+ # We inject this function into the VisionTransformer instances so that
288
+ # we can use it with interpolated position embeddings without modifying the library source.
289
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
290
+ pretrained.model._resize_pos_embed = types.MethodType(
291
+ _resize_pos_embed, pretrained.model
292
+ )
293
+
294
+ return pretrained
295
+
296
+
297
+ def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
298
+ model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
299
+
300
+ hooks = [5, 11, 17, 23] if hooks == None else hooks
301
+ return _make_vit_b16_backbone(
302
+ model,
303
+ features=[256, 512, 1024, 1024],
304
+ hooks=hooks,
305
+ vit_features=1024,
306
+ use_readout=use_readout,
307
+ )
308
+
309
+
310
+ def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
311
+ model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
312
+
313
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
314
+ return _make_vit_b16_backbone(
315
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
316
+ )
317
+
318
+
319
+ def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
320
+ model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
321
+
322
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
323
+ return _make_vit_b16_backbone(
324
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
325
+ )
326
+
327
+
328
+ def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
329
+ model = timm.create_model(
330
+ "vit_deit_base_distilled_patch16_384", pretrained=pretrained
331
+ )
332
+
333
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
334
+ return _make_vit_b16_backbone(
335
+ model,
336
+ features=[96, 192, 384, 768],
337
+ hooks=hooks,
338
+ use_readout=use_readout,
339
+ start_index=2,
340
+ )
341
+
342
+
343
+ def _make_vit_b_rn50_backbone(
344
+ model,
345
+ features=[256, 512, 768, 768],
346
+ size=[384, 384],
347
+ hooks=[0, 1, 8, 11],
348
+ vit_features=768,
349
+ use_vit_only=False,
350
+ use_readout="ignore",
351
+ start_index=1,
352
+ ):
353
+ pretrained = nn.Module()
354
+
355
+ pretrained.model = model
356
+
357
+ if use_vit_only == True:
358
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
359
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
360
+ else:
361
+ pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
362
+ get_activation("1")
363
+ )
364
+ pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
365
+ get_activation("2")
366
+ )
367
+
368
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
369
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
370
+
371
+ pretrained.activations = activations
372
+
373
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
374
+
375
+ if use_vit_only == True:
376
+ pretrained.act_postprocess1 = nn.Sequential(
377
+ readout_oper[0],
378
+ Transpose(1, 2),
379
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
380
+ nn.Conv2d(
381
+ in_channels=vit_features,
382
+ out_channels=features[0],
383
+ kernel_size=1,
384
+ stride=1,
385
+ padding=0,
386
+ ),
387
+ nn.ConvTranspose2d(
388
+ in_channels=features[0],
389
+ out_channels=features[0],
390
+ kernel_size=4,
391
+ stride=4,
392
+ padding=0,
393
+ bias=True,
394
+ dilation=1,
395
+ groups=1,
396
+ ),
397
+ )
398
+
399
+ pretrained.act_postprocess2 = nn.Sequential(
400
+ readout_oper[1],
401
+ Transpose(1, 2),
402
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
403
+ nn.Conv2d(
404
+ in_channels=vit_features,
405
+ out_channels=features[1],
406
+ kernel_size=1,
407
+ stride=1,
408
+ padding=0,
409
+ ),
410
+ nn.ConvTranspose2d(
411
+ in_channels=features[1],
412
+ out_channels=features[1],
413
+ kernel_size=2,
414
+ stride=2,
415
+ padding=0,
416
+ bias=True,
417
+ dilation=1,
418
+ groups=1,
419
+ ),
420
+ )
421
+ else:
422
+ pretrained.act_postprocess1 = nn.Sequential(
423
+ nn.Identity(), nn.Identity(), nn.Identity()
424
+ )
425
+ pretrained.act_postprocess2 = nn.Sequential(
426
+ nn.Identity(), nn.Identity(), nn.Identity()
427
+ )
428
+
429
+ pretrained.act_postprocess3 = nn.Sequential(
430
+ readout_oper[2],
431
+ Transpose(1, 2),
432
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
433
+ nn.Conv2d(
434
+ in_channels=vit_features,
435
+ out_channels=features[2],
436
+ kernel_size=1,
437
+ stride=1,
438
+ padding=0,
439
+ ),
440
+ )
441
+
442
+ pretrained.act_postprocess4 = nn.Sequential(
443
+ readout_oper[3],
444
+ Transpose(1, 2),
445
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
446
+ nn.Conv2d(
447
+ in_channels=vit_features,
448
+ out_channels=features[3],
449
+ kernel_size=1,
450
+ stride=1,
451
+ padding=0,
452
+ ),
453
+ nn.Conv2d(
454
+ in_channels=features[3],
455
+ out_channels=features[3],
456
+ kernel_size=3,
457
+ stride=2,
458
+ padding=1,
459
+ ),
460
+ )
461
+
462
+ pretrained.model.start_index = start_index
463
+ pretrained.model.patch_size = [16, 16]
464
+
465
+ # We inject this function into the VisionTransformer instances so that
466
+ # we can use it with interpolated position embeddings without modifying the library source.
467
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
468
+
469
+ # We inject this function into the VisionTransformer instances so that
470
+ # we can use it with interpolated position embeddings without modifying the library source.
471
+ pretrained.model._resize_pos_embed = types.MethodType(
472
+ _resize_pos_embed, pretrained.model
473
+ )
474
+
475
+ return pretrained
476
+
477
+
478
+ def _make_pretrained_vitb_rn50_384(
479
+ pretrained, use_readout="ignore", hooks=None, use_vit_only=False
480
+ ):
481
+ model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
482
+
483
+ hooks = [0, 1, 8, 11] if hooks == None else hooks
484
+ return _make_vit_b_rn50_backbone(
485
+ model,
486
+ features=[256, 512, 768, 768],
487
+ size=[384, 384],
488
+ hooks=hooks,
489
+ use_vit_only=use_vit_only,
490
+ use_readout=use_readout,
491
+ )
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/util.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ import torch
4
+ from torch import optim
5
+ import numpy as np
6
+
7
+ from inspect import isfunction
8
+ from PIL import Image, ImageDraw, ImageFont
9
+
10
+
11
+ def log_txt_as_img(wh, xc, size=10):
12
+ # wh a tuple of (width, height)
13
+ # xc a list of captions to plot
14
+ b = len(xc)
15
+ txts = list()
16
+ for bi in range(b):
17
+ txt = Image.new("RGB", wh, color="white")
18
+ draw = ImageDraw.Draw(txt)
19
+ font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)
20
+ nc = int(40 * (wh[0] / 256))
21
+ lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
22
+
23
+ try:
24
+ draw.text((0, 0), lines, fill="black", font=font)
25
+ except UnicodeEncodeError:
26
+ print("Cant encode string for logging. Skipping.")
27
+
28
+ txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
29
+ txts.append(txt)
30
+ txts = np.stack(txts)
31
+ txts = torch.tensor(txts)
32
+ return txts
33
+
34
+
35
+ def ismap(x):
36
+ if not isinstance(x, torch.Tensor):
37
+ return False
38
+ return (len(x.shape) == 4) and (x.shape[1] > 3)
39
+
40
+
41
+ def isimage(x):
42
+ if not isinstance(x,torch.Tensor):
43
+ return False
44
+ return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
45
+
46
+
47
+ def exists(x):
48
+ return x is not None
49
+
50
+
51
+ def default(val, d):
52
+ if exists(val):
53
+ return val
54
+ return d() if isfunction(d) else d
55
+
56
+
57
+ def mean_flat(tensor):
58
+ """
59
+ https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
60
+ Take the mean over all non-batch dimensions.
61
+ """
62
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
63
+
64
+
65
+ def count_params(model, verbose=False):
66
+ total_params = sum(p.numel() for p in model.parameters())
67
+ if verbose:
68
+ print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
69
+ return total_params
70
+
71
+
72
+ def instantiate_from_config(config):
73
+ if not "target" in config:
74
+ if config == '__is_first_stage__':
75
+ return None
76
+ elif config == "__is_unconditional__":
77
+ return None
78
+ raise KeyError("Expected key `target` to instantiate.")
79
+ return get_obj_from_str(config["target"])(**config.get("params", dict()))
80
+
81
+
82
+ def get_obj_from_str(string, reload=False):
83
+ module, cls = string.rsplit(".", 1)
84
+ if reload:
85
+ module_imp = importlib.import_module(module)
86
+ importlib.reload(module_imp)
87
+ return getattr(importlib.import_module(module, package=None), cls)
88
+
89
+
90
+ class AdamWwithEMAandWings(optim.Optimizer):
91
+ # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298
92
+ def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using
93
+ weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code
94
+ ema_power=1., param_names=()):
95
+ """AdamW that saves EMA versions of the parameters."""
96
+ if not 0.0 <= lr:
97
+ raise ValueError("Invalid learning rate: {}".format(lr))
98
+ if not 0.0 <= eps:
99
+ raise ValueError("Invalid epsilon value: {}".format(eps))
100
+ if not 0.0 <= betas[0] < 1.0:
101
+ raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
102
+ if not 0.0 <= betas[1] < 1.0:
103
+ raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
104
+ if not 0.0 <= weight_decay:
105
+ raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
106
+ if not 0.0 <= ema_decay <= 1.0:
107
+ raise ValueError("Invalid ema_decay value: {}".format(ema_decay))
108
+ defaults = dict(lr=lr, betas=betas, eps=eps,
109
+ weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay,
110
+ ema_power=ema_power, param_names=param_names)
111
+ super().__init__(params, defaults)
112
+
113
+ def __setstate__(self, state):
114
+ super().__setstate__(state)
115
+ for group in self.param_groups:
116
+ group.setdefault('amsgrad', False)
117
+
118
+ @torch.no_grad()
119
+ def step(self, closure=None):
120
+ """Performs a single optimization step.
121
+ Args:
122
+ closure (callable, optional): A closure that reevaluates the model
123
+ and returns the loss.
124
+ """
125
+ loss = None
126
+ if closure is not None:
127
+ with torch.enable_grad():
128
+ loss = closure()
129
+
130
+ for group in self.param_groups:
131
+ params_with_grad = []
132
+ grads = []
133
+ exp_avgs = []
134
+ exp_avg_sqs = []
135
+ ema_params_with_grad = []
136
+ state_sums = []
137
+ max_exp_avg_sqs = []
138
+ state_steps = []
139
+ amsgrad = group['amsgrad']
140
+ beta1, beta2 = group['betas']
141
+ ema_decay = group['ema_decay']
142
+ ema_power = group['ema_power']
143
+
144
+ for p in group['params']:
145
+ if p.grad is None:
146
+ continue
147
+ params_with_grad.append(p)
148
+ if p.grad.is_sparse:
149
+ raise RuntimeError('AdamW does not support sparse gradients')
150
+ grads.append(p.grad)
151
+
152
+ state = self.state[p]
153
+
154
+ # State initialization
155
+ if len(state) == 0:
156
+ state['step'] = 0
157
+ # Exponential moving average of gradient values
158
+ state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
159
+ # Exponential moving average of squared gradient values
160
+ state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
161
+ if amsgrad:
162
+ # Maintains max of all exp. moving avg. of sq. grad. values
163
+ state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
164
+ # Exponential moving average of parameter values
165
+ state['param_exp_avg'] = p.detach().float().clone()
166
+
167
+ exp_avgs.append(state['exp_avg'])
168
+ exp_avg_sqs.append(state['exp_avg_sq'])
169
+ ema_params_with_grad.append(state['param_exp_avg'])
170
+
171
+ if amsgrad:
172
+ max_exp_avg_sqs.append(state['max_exp_avg_sq'])
173
+
174
+ # update the steps for each param group update
175
+ state['step'] += 1
176
+ # record the step after step update
177
+ state_steps.append(state['step'])
178
+
179
+ optim._functional.adamw(params_with_grad,
180
+ grads,
181
+ exp_avgs,
182
+ exp_avg_sqs,
183
+ max_exp_avg_sqs,
184
+ state_steps,
185
+ amsgrad=amsgrad,
186
+ beta1=beta1,
187
+ beta2=beta2,
188
+ lr=group['lr'],
189
+ weight_decay=group['weight_decay'],
190
+ eps=group['eps'],
191
+ maximize=False)
192
+
193
+ cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power)
194
+ for param, ema_param in zip(params_with_grad, ema_params_with_grad):
195
+ ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay)
196
+
197
+ return loss
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_bf16.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _convolution.deprecated
2
+ _convolution
3
+ conv1d
4
+ conv2d
5
+ conv3d
6
+ conv_tbc
7
+ conv_transpose1d
8
+ conv_transpose2d.input
9
+ conv_transpose3d.input
10
+ convolution
11
+ prelu
12
+ addmm
13
+ addmv
14
+ addr
15
+ matmul
16
+ einsum
17
+ mm
18
+ mv
19
+ silu
20
+ linear
21
+ addbmm
22
+ baddbmm
23
+ bmm
24
+ chain_matmul
25
+ linalg_multi_dot
26
+ layer_norm
27
+ group_norm
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_fp32.txt ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ acos
2
+ asin
3
+ cosh
4
+ erfinv
5
+ exp
6
+ expm1
7
+ log
8
+ log10
9
+ log2
10
+ log1p
11
+ reciprocal
12
+ rsqrt
13
+ sinh
14
+ tan
15
+ pow.Tensor_Scalar
16
+ pow.Tensor_Tensor
17
+ pow.Scalar
18
+ softplus
19
+ frobenius_norm
20
+ frobenius_norm.dim
21
+ nuclear_norm
22
+ nuclear_norm.dim
23
+ cosine_similarity
24
+ poisson_nll_loss
25
+ cosine_embedding_loss
26
+ nll_loss
27
+ nll_loss2d
28
+ hinge_embedding_loss
29
+ kl_div
30
+ l1_loss
31
+ smooth_l1_loss
32
+ huber_loss
33
+ mse_loss
34
+ margin_ranking_loss
35
+ multilabel_margin_loss
36
+ soft_margin_loss
37
+ triplet_margin_loss
38
+ multi_margin_loss
39
+ binary_cross_entropy_with_logits
40
+ dist
41
+ pdist
42
+ cdist
43
+ renorm
44
+ logsumexp
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ lightning==2.1.2
2
+ lightning-habana==1.3.0
3
+ omegaconf==2.1.1
4
+ einops==0.3.0
5
+ transformers==4.37.1
6
+ open-clip-torch==2.7.0
7
+ gradio==4.19.2
8
+ k_diffusion==0.0.14
9
+ -e .
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/setup.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name='stable-diffusion',
5
+ version='0.0.1',
6
+ description='',
7
+ packages=find_packages(),
8
+ install_requires=[
9
+ 'torch',
10
+ 'numpy',
11
+ 'tqdm',
12
+ ],
13
+ )
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2022 Habana Labs, Ltd. an Intel Company
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/README.md ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BART for PyTorch
2
+
3
+ This folder contains scripts to fine-tune BART model on Intel® Gaudi® AI Accelerator. To obtain model performance data, refer to the [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-training-models/#performance).
4
+
5
+ For more information about training deep learning models using Gaudi, visit [developer.habana.ai](https://developer.habana.ai/resources/).
6
+
7
+ ## Table of Contents
8
+ * [Model-References](../../../../README.md)
9
+ * [Model Overview](#model-overview)
10
+ * [Setup](#setup)
11
+ * [Training Examples ](#training-examples)
12
+ * [Supported Configurations](#supported-configurations)
13
+ * [Changelog](#changelog)
14
+ * [Known Issues](#known-issues)
15
+
16
+ ## Model Overview
17
+
18
+ BART, Bidirectional and Auto-Regressive Transformers, is proposed in this paper: [Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://aclanthology.org/2020.acl-main.703/), ACL 2020. It is a denoising autoencoder that maps a corrupted document to the original document it was derived from. BART is implemented as a sequence-to-sequence model with a bidirectional encoder over corrupted text and a left-to-right autoregressive decoder. According to the paper, BART's architecture is related to that used in BERT, with these differences: (1) each layer of the decoder additionally performs cross-attention over the final hidden layer of the encoder; and (2) BERT uses an additional feed-forward network before wordprediction, which BART does not. BART contains roughly 10% more parameters than the equivalently sized BERT model.
19
+
20
+ ### BART Fine-Tuning
21
+ - Suited for tasks:
22
+ - Text paraphrasing: The model aims to generate paraphrases of the given input sentence.
23
+ - Text summarization: The model aims to generate a summary of the given input sentence.
24
+ - Uses optimizer: FusedAdamW (AdamW: “ADAM with Weight Decay Regularization”).
25
+ - Based on model weights trained with pre-training.
26
+ - Light-weight: The training takes a few minutes.
27
+
28
+ The BART demo uses training scripts from simple transformers https://github.com/ThilinaRajapakse/simpletransformers.
29
+
30
+ ## Setup
31
+ Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html)
32
+ to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/PyTorch/Model_Optimization_PyTorch/Optimization_in_Training_Platform.html).
33
+ The guides will walk you through the process of setting up your system to run the model on Gaudi.
34
+
35
+ ### Clone Habana Model-References
36
+ In the docker container, clone this repository and switch to the branch that
37
+ matches your SynapseAI version. You can run the
38
+ [`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options)
39
+ utility to determine the SynapseAI version.
40
+ ```bash
41
+ git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References
42
+ ```
43
+
44
+ Then, navigate to the BART model directory:
45
+ ```bash
46
+ cd Model-References/PyTorch/nlp/BART/simpletransformers
47
+ ```
48
+
49
+ ### Install Model Requirements
50
+ Install the python packages required for fine-tuning:
51
+ ```bash
52
+ cd Model-References/PyTorch/nlp/BART/simpletransformers
53
+ pip install -e .
54
+ pip install bert_score
55
+ ```
56
+
57
+ ### Fine-tuning Dataset Preparation
58
+
59
+ Public datasets can be downloaded with this script:
60
+ ```bash
61
+ bash ./examples/seq2seq/paraphrasing/data_download.sh
62
+ ```
63
+
64
+ **Note:** Going forward it is assumed that the dataset is located in `./data` directory.
65
+
66
+ ## Training Examples
67
+
68
+ ### Single Card and Multi-Card Training Examples
69
+
70
+ **Run training on 1 HPU - Lazy mode:**
71
+
72
+ - 1 HPU, BART fine-tuning on the dataset using BF16 mixed precision:
73
+ ```python
74
+ PT_HPU_AUTOCAST_LOWER_PRECISION_OPS_LIST=ops_bf16_bart.txt PT_HPU_AUTOCAST_FP32_OPS_LIST=ops_fp32_bart.txt $PYTHON examples/seq2seq/paraphrasing/train.py --use_habana --no_cuda --use_fused_adam --use_fused_clip_norm --max_seq_length 128 --train_batch_size 32 --num_train_epochs 5 --logging_steps 50 --save_best_model --output_dir output --bf16 autocast
75
+ ```
76
+ - 1 HPU, BART fine-tuning on the dataset using FP32 data type:
77
+ ```python
78
+ $PYTHON examples/seq2seq/paraphrasing/train.py --use_habana --no_cuda --use_fused_adam --use_fused_clip_norm --max_seq_length 128 --train_batch_size 32 --num_train_epochs 5 --logging_steps 50 --save_best_model --output_dir output
79
+ ```
80
+
81
+ **Run training on 8 HPUs:**
82
+
83
+ To run multi-card demo, make sure the host machine has 512 GB of RAM installed. Modify the docker run command to pass 8 Gaudi cards to the docker container. This ensures the docker has access to all the 8 cards required for multi-card training.
84
+
85
+ **NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Scaling_Guide/DDP_Based_Scaling.html#mpirun-configuration).
86
+
87
+ - 8 HPUs on a single server, BF16, batch size 32, Lazy mode:
88
+ ```bash
89
+ PT_HPU_AUTOCAST_LOWER_PRECISION_OPS_LIST=ops_bf16_bart.txt PT_HPU_AUTOCAST_FP32_OPS_LIST=ops_fp32_bart.txt mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root $PYTHON examples/seq2seq/paraphrasing/train.py --use_habana --no_cuda --use_fused_adam --use_fused_clip_norm --max_seq_length 128 --train_batch_size 32 --num_train_epochs 5 --logging_steps 50 --save_best_model --output_dir /tmp/multicards --bf16 autocast --distributed
90
+ ```
91
+
92
+ - 8 HPUs on a single server, FP32, batch size 32, Lazy mode:
93
+ ```bash
94
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root $PYTHON examples/seq2seq/paraphrasing/train.py --use_habana --no_cuda --use_fused_adam --use_fused_clip_norm --max_seq_length 128 --train_batch_size 32 --num_train_epochs 5 --logging_steps 50 --save_best_model --output_dir /tmp/multicards --distributed
95
+ ```
96
+
97
+
98
+ ## Supported Configurations
99
+
100
+ | Device | SynapseAI Version | PyTorch Version |
101
+ |-----|-----|-----|
102
+ | Gaudi | 1.14.0 | 2.1.1 |
103
+
104
+ ## Changelog
105
+ ### 1.12.0
106
+ - Eager mode support is deprecated.
107
+ - Removed PT_HPU_LAZY_MODE environment variable.
108
+ - Removed flag lazy_mode.
109
+ - Removed HMP; switched to Autocast.
110
+ - Updated run commands.
111
+ ### 1.9.0
112
+ - Enabled PyTorch autocast on Gaudi
113
+ ### 1.6.0
114
+ - Changed BART distributed API to initialize_distributed_hpu.
115
+ ### 1.5.0
116
+ - Removed unnecessary mark_step.
117
+ ### 1.4.0
118
+ - Removed wrapper script run_bart.py.
119
+ - Added support for reducing the print frequency of Running Loss to the frequency of logging_steps.
120
+
121
+ ### Training Script Modifications
122
+
123
+ The following changes have been added to scripts & source:
124
+ modifications to the [simple transformer](https://github.com/ThilinaRajapakse/simpletransformers) source:
125
+
126
+ 1. Added Habana Device support (seq2seq_model.py).
127
+ 2. Modifications for saving checkpoint: Bring tensors to CPU and save (seq2seq_model.py).
128
+ 3. Introduced Habana BF16 Mixed precision, adding ops lists for BF16 and FP32 (seq2seq_model.py, ops_bf16_bart.txt, ops_fp32_bart.txt).
129
+ 4. Change for supporting HMP disable for optimizer.step (seq2seq_model.py).
130
+ 5. Use fused AdamW optimizer on Habana device (seq2seq_model.py, train.py).
131
+ 6. Use fused clip norm for grad clipping on Habana device (seq2seq_model.py, train.py).
132
+ 7. Modified training script to use mpirun for distributed training (train.py).
133
+ 8. Gradients are used as views using gradient_as_bucket_view (seq2seq_model.py).
134
+ 9. Default allreduce bucket size set to 200MB for better performance in distributed training (seq2seq_model.py).
135
+ 10. Added changes to support Lazy mode with required mark_step (seq2seq_model.py).
136
+ 11. Only print and save in the master process (seq2seq_model.py).
137
+ 12. Added prediction (sentence generation) metrics (seq2seq_model.py).
138
+ 13. Modified training script to use Habana data loader (seq2seq_model.py).
139
+ 14. Add data_dir as an input argument for data directory.
140
+ 15. Added this README.
141
+
142
+ ## Known Issues
143
+
144
+ 1. Placing mark_step() arbitrarily may lead to undefined behavior. Recommend to keep mark_step() as shown in provided scripts.
145
+ 2. Sentence generation (prediction) is not enabled in this release. We plan to enable it in next release.
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/bin/simple-viewer ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ cat >run_simple_transformers_streamlit_app.py <<'END_SCRIPT'
3
+ #!/usr/bin/env python
4
+ from simpletransformers.streamlit.simple_view import streamlit_runner
5
+
6
+
7
+ streamlit_runner()
8
+
9
+ END_SCRIPT
10
+
11
+ # Run
12
+ streamlit run run_simple_transformers_streamlit_app.py
13
+
14
+ rm run_simple_transformers_streamlit_app.py
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/data_download.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ mkdir data
2
+ wget https://storage.googleapis.com/paws/english/paws_wiki_labeled_final.tar.gz -P data
3
+ tar -xvf data/paws_wiki_labeled_final.tar.gz -C data
4
+ mv data/final/* data
5
+ rm -r data/final
6
+
7
+ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv -P data
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/train.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import datetime
4
+ import logging
5
+ import time
6
+ import pandas as pd
7
+ from sklearn.model_selection import train_test_split
8
+
9
+ import torch
10
+ sys.path.append(os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../")))
11
+ from simpletransformers.config.model_args import Seq2SeqArgs
12
+ from simpletransformers.seq2seq.seq2seq_model import Seq2SeqModel
13
+
14
+
15
+ from utils import load_data, clean_unnecessary_spaces
16
+ import argparse
17
+ import random
18
+ import hb_utils
19
+
20
+ try:
21
+ from apex import amp
22
+ except ImportError:
23
+ amp = None
24
+
25
+ logging.basicConfig(level=logging.INFO)
26
+ transformers_logger = logging.getLogger("transformers")
27
+ transformers_logger.setLevel(logging.ERROR)
28
+
29
+
30
+ def parse_args():
31
+ parser = argparse.ArgumentParser()
32
+ parser.add_argument(
33
+ "--use_habana",
34
+ action="store_true",
35
+ help="Whether not to use Habana device when available"
36
+ )
37
+ parser.add_argument(
38
+ "--output_dir",
39
+ default='/tmp/bart',
40
+ type=str,
41
+ help="Output dir",
42
+ )
43
+ parser.add_argument(
44
+ "--no_cache",
45
+ action="store_true",
46
+ help="Whether not to cache data"
47
+ )
48
+ parser.add_argument(
49
+ "--reprocess_input_data",
50
+ action="store_true",
51
+ help="Whether or not to reprocess input data"
52
+ )
53
+ parser.add_argument(
54
+ "--no_cuda",
55
+ action="store_true",
56
+ help="Whether not to use CUDA when available"
57
+ )
58
+ parser.add_argument(
59
+ "--use_fused_adam",
60
+ action="store_true",
61
+ help="Whether to use fused adamw on habana device"
62
+ )
63
+ parser.add_argument(
64
+ "--use_fused_clip_norm",
65
+ action="store_true",
66
+ help="Whether to use fused clip norm on habana device"
67
+ )
68
+ parser.add_argument(
69
+ "--local_rank",
70
+ type=int,
71
+ default=-1,
72
+ help="local_rank for distributed training on gpus"
73
+ )
74
+ parser.add_argument(
75
+ "--seed",
76
+ type=int,
77
+ default=42,
78
+ help="random seed for initialization"
79
+ )
80
+ parser.add_argument(
81
+ "--max_seq_length",
82
+ type=int,
83
+ default=128,
84
+ help="maximum input sequence length"
85
+ )
86
+ parser.add_argument(
87
+ "--train_batch_size",
88
+ type=int,
89
+ default=8,
90
+ help="batch size for training"
91
+ )
92
+ parser.add_argument(
93
+ "--fp16",
94
+ action="store_true",
95
+ help="Whether to use fp16"
96
+ )
97
+ parser.add_argument(
98
+ "--bf16",
99
+ type=str,
100
+ help="Type of bf16 mixed precision implementation",
101
+ choices=["none", "autocast"]
102
+ )
103
+ parser.add_argument(
104
+ "--debug",
105
+ action="store_true",
106
+ help="Whether in debug mode"
107
+ )
108
+ parser.add_argument(
109
+ "--save_steps",
110
+ type=int,
111
+ default=-1,
112
+ help="number of steps to save the model"
113
+ )
114
+ parser.add_argument(
115
+ "--max_steps",
116
+ type=int,
117
+ default=-1,
118
+ help="number of maximum training steps"
119
+ )
120
+ parser.add_argument(
121
+ "--save_optimizer_and_scheduler",
122
+ action="store_true",
123
+ help="Whether save optimizer and scheduler"
124
+ )
125
+ parser.add_argument(
126
+ "--eval_batch_size",
127
+ type=int,
128
+ default=64,
129
+ help="batch size for evaluation"
130
+ )
131
+ parser.add_argument(
132
+ "--evaluate_during_training",
133
+ action="store_true",
134
+ help="Whether evaluate during training"
135
+ )
136
+ parser.add_argument(
137
+ "--evaluate_during_training_steps",
138
+ type=int,
139
+ default=-1,
140
+ help="evaluate every training steps"
141
+ )
142
+ parser.add_argument(
143
+ "--evaluate_each_epoch",
144
+ action="store_true",
145
+ help="Whether evaluate after each epoch"
146
+ )
147
+ parser.add_argument(
148
+ "--evaluate_generated_text",
149
+ action="store_true",
150
+ help="Whether evaluate the generated text"
151
+ )
152
+ parser.add_argument(
153
+ "--save_model_every_epoch",
154
+ action="store_true",
155
+ help="Whether save the model after each epoch"
156
+ )
157
+ parser.add_argument(
158
+ "--save_eval_checkpoints",
159
+ action="store_true",
160
+ help="Whether save the checkpoint after evaluation"
161
+ )
162
+ parser.add_argument(
163
+ "--save_best_model",
164
+ action="store_true",
165
+ help="Whether save the best model"
166
+ )
167
+ parser.add_argument(
168
+ "--logging_steps",
169
+ type=int,
170
+ default=50,
171
+ help="number of logging steps"
172
+ )
173
+ parser.add_argument(
174
+ "--num_train_epochs",
175
+ type=int,
176
+ default=3,
177
+ help="number of epochs for training"
178
+ )
179
+ parser.add_argument(
180
+ "--num_return_sequences",
181
+ type=int,
182
+ default=1,
183
+ help="number of return sequences during beam sampling"
184
+ )
185
+ parser.add_argument(
186
+ "--predict",
187
+ action="store_true",
188
+ help="Whether generate text given input"
189
+ )
190
+ #################### distributed training ######################
191
+ parser.add_argument(
192
+ '--dl_worker_type',
193
+ default='HABANA',
194
+ type=lambda x: x.upper(),
195
+ choices = ["MT", "MP", "HABANA"],
196
+ help='select multithreading or multiprocessing'
197
+ )
198
+ parser.add_argument(
199
+ '--world_size',
200
+ default=1,
201
+ type=int,
202
+ metavar='N',
203
+ help='number of total workers (default: 1)'
204
+ )
205
+ parser.add_argument(
206
+ '--process_per_node',
207
+ default=8,
208
+ type=int,
209
+ metavar='N',
210
+ help='Number of process per node'
211
+ )
212
+ parser.add_argument(
213
+ '--distributed',
214
+ action='store_true',
215
+ help='whether to enable distributed mode and run on multiple devices'
216
+ )
217
+ parser.add_argument(
218
+ '--dist_url',
219
+ default='env://',
220
+ help='url used to set up distributed training'
221
+ )
222
+ parser.add_argument(
223
+ "--data_dir",
224
+ default="",
225
+ type=str,
226
+ help="The input data dir. If no data dir, will run with ./data under local directory.",
227
+ )
228
+ args = parser.parse_args()
229
+
230
+ model_args = Seq2SeqArgs()
231
+ model_args.debug = True if args.debug else False
232
+ model_args.eval_batch_size = args.eval_batch_size
233
+ model_args.evaluate_during_training = True if args.evaluate_during_training else False
234
+ model_args.evaluate_during_training_steps = args.evaluate_during_training_steps
235
+ model_args.evaluate_each_epoch = True if args.evaluate_each_epoch else False
236
+ model_args.evaluate_during_training_verbose = True
237
+ model_args.evaluate_generated_text = True if args.evaluate_generated_text else False
238
+ model_args.fp16 = True if args.fp16 else False
239
+ model_args.bf16 = args.bf16
240
+ model_args.learning_rate = 5e-5
241
+ model_args.gradient_accumulation_steps = 1
242
+ model_args.max_seq_length = args.max_seq_length
243
+ model_args.num_train_epochs = args.num_train_epochs
244
+ model_args.overwrite_output_dir = True
245
+ model_args.reprocess_input_data = True if args.reprocess_input_data else False
246
+ model_args.logging_steps = args.logging_steps
247
+ model_args.save_eval_checkpoints = True if args.save_eval_checkpoints else False
248
+ model_args.save_steps = args.save_steps
249
+ model_args.save_model_every_epoch = True if args.save_model_every_epoch else False
250
+ model_args.save_best_model = True if args.save_best_model else False
251
+ model_args.save_optimizer_and_scheduler = True if args.save_optimizer_and_scheduler else False
252
+ model_args.train_batch_size = args.train_batch_size
253
+ model_args.use_multiprocessing = False
254
+ model_args.use_multiprocessing_for_evaluation = False
255
+ model_args.predict = True if args.predict else False
256
+ model_args.do_sample = True
257
+ model_args.num_beams = None
258
+ model_args.num_return_sequences = args.num_return_sequences
259
+ model_args.max_length = args.max_seq_length
260
+ model_args.top_k = 50
261
+ model_args.top_p = 0.95
262
+
263
+ model_args.max_steps = args.max_steps
264
+ model_args.seed = args.seed
265
+ model_args.use_habana = args.use_habana
266
+ model_args.use_fused_adam = args.use_fused_adam
267
+ model_args.use_fused_clip_norm = args.use_fused_clip_norm
268
+ model_args.output_dir = args.output_dir
269
+ model_args.best_model_dir = args.output_dir
270
+ model_args.tensorboard_dir = args.output_dir
271
+ model_args.no_cache = True if args.no_cache else False
272
+ model_args.cache_dir = args.output_dir
273
+
274
+ if args.use_habana and args.use_fused_adam:
275
+ model_args.optimizer = 'FusedAdamW'
276
+ model_args.max_grad_norm = 1.0
277
+ else:
278
+ model_args.optimizer = 'AdamW'
279
+ model_args.adafactor_relative_step = False
280
+ model_args.adafactor_scale_parameter = False
281
+ model_args.adafactor_warmup_init = False
282
+
283
+ model_args.scheduler = "linear_schedule_with_warmup"
284
+ return args, model_args
285
+
286
+ def load_train_val_data():
287
+ if args.local_rank not in [-1, 0]:
288
+ if args.use_habana:
289
+ hb_utils.barrier()
290
+ else:
291
+ torch.distributed.barrier()
292
+
293
+ if args.local_rank in [-1, 0]:
294
+ # Google Data
295
+ train_df = pd.read_csv(os.path.join(args.data_dir,"data/train.tsv"), sep="\t").astype(str)
296
+ eval_df = pd.read_csv(os.path.join(args.data_dir,"data/dev.tsv"), sep="\t").astype(str)
297
+
298
+ train_df = train_df.loc[train_df["label"] == "1"]
299
+ eval_df = eval_df.loc[eval_df["label"] == "1"]
300
+
301
+ train_df = train_df.rename(
302
+ columns={"sentence1": "input_text", "sentence2": "target_text"}
303
+ )
304
+ eval_df = eval_df.rename(
305
+ columns={"sentence1": "input_text", "sentence2": "target_text"}
306
+ )
307
+
308
+ train_df = train_df[["input_text", "target_text"]]
309
+ eval_df = eval_df[["input_text", "target_text"]]
310
+
311
+ train_df["prefix"] = "paraphrase"
312
+ eval_df["prefix"] = "paraphrase"
313
+
314
+ # MSRP Data
315
+ '''
316
+ train_df = pd.concat(
317
+ [
318
+ train_df,
319
+ load_data("data/msr_paraphrase_train.txt", "Quality", "#1_String", "#2_String"),
320
+ ]
321
+ )
322
+ eval_df = pd.concat(
323
+ [
324
+ eval_df,
325
+ load_data("data/msr_paraphrase_test.txt", "#1 String", "#2 String", "Quality"),
326
+ ]
327
+ )
328
+ '''
329
+ train_df = []
330
+ eval_df = []
331
+
332
+ # Quora Data
333
+
334
+ # The Quora Dataset is not separated into train/test, so we do it manually the first time.
335
+ if not os.path.exists("data/quora_train.tsv") or not os.path.exists("data/quora_test.tsv"):
336
+ df = load_data(
337
+ os.path.join(args.data_dir, "data/quora_duplicate_questions.tsv"), "question1", "question2", "is_duplicate"
338
+ )
339
+ q_train, q_test = train_test_split(df)
340
+ print('Splitting train and test...')
341
+ q_train.to_csv(os.path.join(args.data_dir, "data/quora_train.tsv"), sep="\t")
342
+ q_test.to_csv(os.path.join(args.data_dir, "data/quora_test.tsv"), sep="\t")
343
+ else:
344
+ # The code block above only needs to be run once.
345
+ # After that, the two lines below are sufficient to load the Quora dataset.
346
+ print('Reading train and test...')
347
+ q_train = pd.read_csv("data/quora_train.tsv", sep="\t")
348
+ q_test = pd.read_csv("data/quora_test.tsv", sep="\t")
349
+
350
+ train_df = q_train #pd.concat([train_df, q_train])
351
+ eval_df = q_test #pd.concat([eval_df, q_test])
352
+
353
+ train_df = train_df[["prefix", "input_text", "target_text"]]
354
+ eval_df = eval_df[["prefix", "input_text", "target_text"]]
355
+
356
+ train_df = train_df.dropna()
357
+ eval_df = eval_df.dropna()
358
+
359
+ train_df["input_text"] = train_df["input_text"].apply(clean_unnecessary_spaces)
360
+ train_df["target_text"] = train_df["target_text"].apply(clean_unnecessary_spaces)
361
+
362
+ eval_df["input_text"] = eval_df["input_text"].apply(clean_unnecessary_spaces)
363
+ eval_df["target_text"] = eval_df["target_text"].apply(clean_unnecessary_spaces)
364
+
365
+
366
+ if args.local_rank == 0:
367
+ if args.use_habana:
368
+ hb_utils.barrier()
369
+ else:
370
+ torch.distributed.barrier()
371
+ return train_df, eval_df
372
+
373
+
374
+ def main(args, model_args):
375
+ if args.dl_worker_type == "MP":
376
+ try:
377
+ # Default 'fork' doesn't work with synapse. Use 'forkserver' or 'spawn'
378
+ torch.multiprocessing.set_start_method('spawn')
379
+ except RuntimeError:
380
+ pass
381
+ elif args.dl_worker_type == "HABANA":
382
+ try:
383
+ import habana_dataloader
384
+ except ImportError:
385
+ assert False, "Could Not import habana dataloader package"
386
+
387
+ #if args.apex:
388
+ # if sys.version_info < (3, 0):
389
+ # raise RuntimeError("Apex currently only supports Python 3. Aborting.")
390
+ # if amp is None:
391
+ # raise RuntimeError("Failed to import apex. Please install apex from https://www.github.com/nvidia/apex "
392
+ # "to enable mixed-precision training.")
393
+ hb_utils.init_distributed_mode(args)
394
+ if hasattr(args, "rank"):
395
+ args.local_rank = args.rank
396
+ print('####################### These are args: ######################')
397
+ print(args)
398
+
399
+ model_args.dl_worker_type = args.dl_worker_type
400
+ model_args.world_size = args.world_size
401
+ model_args.process_per_node = args.process_per_node
402
+ model_args.distributed = args.distributed
403
+ model_args.dist_url = args.dist_url
404
+
405
+ args.is_master = False
406
+ if args.local_rank in [-1, 0]:
407
+ args.is_master = True
408
+ model_args.is_master = args.is_master
409
+ model_args.local_rank = args.local_rank
410
+ print("############### local_rank is_master #############", model_args.local_rank, model_args.is_master)
411
+
412
+
413
+ if model_args.use_habana is True:
414
+ device = torch.device("hpu")
415
+ args.n_gpu = 0
416
+ print("########## HPU ##########")
417
+
418
+ if args.no_cuda is False:
419
+ if torch.cuda.is_available():
420
+ n_gpu = torch.cuda.device_count()
421
+ if n_gpu > 1:
422
+ torch.cuda.set_device(args.local_rank)
423
+ device = torch.device("cuda", args.local_rank)
424
+ else:
425
+ device = torch.device("cuda")
426
+ args.n_gpu = n_gpu
427
+ print("########## GPU n_gpu ##########", args.n_gpu)
428
+ else:
429
+ device = torch.device("cpu")
430
+ args.n_gpu = 0
431
+ print("########## CPU ##########")
432
+
433
+ model_args.device = device
434
+ model_args.n_gpu = args.n_gpu
435
+
436
+ #if args.deterministic:
437
+ # seed = args.seed
438
+ # random.seed(seed)
439
+ # if args.device == 'cuda':
440
+ # torch.cuda.manual_seed(seed)
441
+ #else:
442
+ # seed = None
443
+
444
+
445
+ train_df, eval_df = load_train_val_data()
446
+
447
+ if model_args.device == 'hpu' and model_args.workers > 0:
448
+ # patch torch cuda functions that are being unconditionally invoked
449
+ # in the multiprocessing data loader
450
+ torch.cuda.current_device = lambda: None
451
+ torch.cuda.set_device = lambda x: None
452
+
453
+ model = Seq2SeqModel(
454
+ encoder_decoder_type="bart",
455
+ encoder_decoder_name="facebook/bart-base",
456
+ args=model_args,
457
+ use_cuda=True if args.n_gpu > 0 else False,
458
+ cuda_device=args.local_rank if args.n_gpu > 0 else -1,
459
+ )
460
+
461
+ start_time = time.time()
462
+
463
+ model.train_model(train_df, eval_data=eval_df, output_dir=args.output_dir)
464
+
465
+ ####################### prediction #######################
466
+ if args.predict and args.local_rank in [-1, 0]:
467
+ to_predict = [
468
+ prefix + ": " + str(input_text)
469
+ for prefix, input_text in zip(
470
+ eval_df["prefix"].tolist(), eval_df["input_text"].tolist()
471
+ )
472
+ ]
473
+ truth = eval_df["target_text"].tolist()
474
+
475
+ print("Start testing")
476
+ start_time = time.time()
477
+ #
478
+ preds = model.predict(to_predict)
479
+ #
480
+ total_time = time.time() - start_time
481
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
482
+ print('Testing time {}'.format(total_time_str))
483
+
484
+ os.makedirs(os.path.join(args.output_dir, "predictions"), exist_ok=True)
485
+ pred_time = f"_{datetime.datetime.now()}"
486
+ pred_text = os.path.join(args.output_dir, "predictions", "pred_text"+pred_time+".txt")
487
+
488
+ with open(pred_text, "w") as f:
489
+ for i, text in enumerate(eval_df["input_text"].tolist()):
490
+ f.write(str(text) + "\n\n")
491
+
492
+ f.write("Truth:\n")
493
+ f.write(truth[i] + "\n\n")
494
+
495
+ f.write("Prediction:\n")
496
+ for pred in preds[i]:
497
+ f.write(str(pred) + "\n")
498
+ f.write(
499
+ "________________________________________________________________________________\n"
500
+ )
501
+
502
+ results = model.compute_metrics(
503
+ truth, preds
504
+ )
505
+ print('Prediction results:')
506
+ print(results)
507
+
508
+ pred_results = os.path.join(args.output_dir, "predictions", "pred_results"+pred_time+".csv")
509
+ report = pd.DataFrame(results, index=[0])
510
+ report.to_csv(
511
+ pred_results,
512
+ index=False,
513
+ )
514
+ total_time = time.time() - start_time
515
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
516
+ print('Total time {}'.format(total_time_str))
517
+
518
+ if __name__ == "__main__":
519
+ args, model_args = parse_args()
520
+ main(args, model_args)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/utils.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import pandas as pd
4
+
5
+
6
+ def load_data(
7
+ file_path, input_text_column, target_text_column, label_column, keep_label=1
8
+ ):
9
+ df = pd.read_csv(file_path, sep="\t")#, error_bad_lines=False)
10
+ df = df.loc[df[label_column] == keep_label]
11
+ df = df.rename(
12
+ columns={input_text_column: "input_text", target_text_column: "target_text"}
13
+ )
14
+ df = df[["input_text", "target_text"]]
15
+ df["prefix"] = "paraphrase"
16
+
17
+ return df
18
+
19
+
20
+ def clean_unnecessary_spaces(out_string):
21
+ if not isinstance(out_string, str):
22
+ warnings.warn(f">>> {out_string} <<< is not a string.")
23
+ out_string = str(out_string)
24
+ out_string = (
25
+ out_string.replace(" .", ".")
26
+ .replace(" ?", "?")
27
+ .replace(" !", "!")
28
+ .replace(" ,", ",")
29
+ .replace(" ' ", "'")
30
+ .replace(" n't", "n't")
31
+ .replace(" 'm", "'m")
32
+ .replace(" 's", "'s")
33
+ .replace(" 've", "'ve")
34
+ .replace(" 're", "'re")
35
+ )
36
+ return out_string
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/hb_utils.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021, Habana Labs Ltd. All rights reserved.
2
+ from __future__ import print_function
3
+ from collections import defaultdict, deque
4
+ import datetime
5
+ import time
6
+ import torch
7
+ import torch.distributed as dist
8
+ import errno
9
+ import os
10
+ mpi_comm = None
11
+ class SmoothedValue(object):
12
+ """Track a series of values and provide access to smoothed values over a
13
+ window or the global series average.
14
+ """
15
+ def __init__(self, window_size=20, fmt=None):
16
+ if fmt is None:
17
+ fmt = "{median:.4f} ({global_avg:.4f})"
18
+ self.deque = deque(maxlen=window_size)
19
+ self.total = 0.0
20
+ self.count = 0
21
+ self.fmt = fmt
22
+ def update(self, value, n=1):
23
+ self.deque.append(value)
24
+ self.count += n
25
+ self.total += value * n
26
+ def synchronize_between_processes(self,device):
27
+ """
28
+ Warning: does not synchronize the deque!
29
+ """
30
+ if not is_dist_avail_and_initialized():
31
+ return
32
+ if device.type == 'hpu':
33
+ t = torch.tensor([self.count, self.total], dtype=torch.float32).to('hpu')
34
+ else:
35
+ t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
36
+ dist.barrier()
37
+ dist.all_reduce(t)
38
+ t = t.tolist()
39
+ self.count = int(t[0])
40
+ self.total = t[1]
41
+ @property
42
+ def median(self):
43
+ d = torch.tensor(list(self.deque))
44
+ return d.median().item()
45
+ @property
46
+ def avg(self):
47
+ d = torch.tensor(list(self.deque), dtype=torch.float32)
48
+ return d.mean().item()
49
+ @property
50
+ def global_avg(self):
51
+ return self.total / self.count
52
+ @property
53
+ def max(self):
54
+ return max(self.deque)
55
+ @property
56
+ def value(self):
57
+ return self.deque[-1]
58
+ def __str__(self):
59
+ return self.fmt.format(
60
+ median=self.median,
61
+ avg=self.avg,
62
+ global_avg=self.global_avg,
63
+ max=self.max,
64
+ value=self.value)
65
+ class MetricLogger(object):
66
+ def __init__(self, delimiter="\t",device=torch.device('cuda')):
67
+ self.meters = defaultdict(SmoothedValue)
68
+ self.delimiter = delimiter
69
+ self.device = device
70
+ def update(self, **kwargs):
71
+ for k, v in kwargs.items():
72
+ if isinstance(v, torch.Tensor):
73
+ v = v.item()
74
+ assert isinstance(v, (float, int))
75
+ self.meters[k].update(v)
76
+ def __getattr__(self, attr):
77
+ if attr in self.meters:
78
+ return self.meters[attr]
79
+ if attr in self.__dict__:
80
+ return self.__dict__[attr]
81
+ raise AttributeError("'{}' object has no attribute '{}'".format(
82
+ type(self).__name__, attr))
83
+ def __str__(self):
84
+ loss_str = []
85
+ for name, meter in self.meters.items():
86
+ loss_str.append(
87
+ "{}: {}".format(name, str(meter))
88
+ )
89
+ return self.delimiter.join(loss_str)
90
+ def synchronize_between_processes(self):
91
+ for meter in self.meters.values():
92
+ meter.synchronize_between_processes(self.device)
93
+ def add_meter(self, name, meter):
94
+ self.meters[name] = meter
95
+ def log_every(self, iterable, print_freq, header=None):
96
+ i = 0
97
+ if not header:
98
+ header = ''
99
+ start_time = time.time()
100
+ end = time.time()
101
+ iter_time = SmoothedValue(fmt='{avg:.4f}')
102
+ data_time = SmoothedValue(fmt='{avg:.4f}')
103
+ space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
104
+ if torch.cuda.is_available():
105
+ log_msg = self.delimiter.join([
106
+ header,
107
+ '[{0' + space_fmt + '}/{1}]',
108
+ 'eta: {eta}',
109
+ '{meters}',
110
+ 'time: {time}',
111
+ 'data: {data}',
112
+ 'max mem: {memory:.0f}'
113
+ ])
114
+ else:
115
+ log_msg = self.delimiter.join([
116
+ header,
117
+ '[{0' + space_fmt + '}/{1}]',
118
+ 'eta: {eta}',
119
+ '{meters}',
120
+ 'time: {time}',
121
+ 'data: {data}'
122
+ ])
123
+ MB = 1024.0 * 1024.0
124
+ for obj in iterable:
125
+ data_time.update(time.time() - end)
126
+ yield obj
127
+ iter_time.update(time.time() - end)
128
+ if i % print_freq == 0:
129
+ eta_seconds = iter_time.global_avg * (len(iterable) - i)
130
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
131
+ if torch.cuda.is_available():
132
+ print(log_msg.format(
133
+ i, len(iterable), eta=eta_string,
134
+ meters=str(self),
135
+ time=str(iter_time), data=str(data_time),
136
+ memory=torch.cuda.max_memory_allocated() / MB))
137
+ else:
138
+ print(log_msg.format(
139
+ i, len(iterable), eta=eta_string,
140
+ meters=str(self),
141
+ time=str(iter_time), data=str(data_time)))
142
+ i += 1
143
+ end = time.time()
144
+ total_time = time.time() - start_time
145
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
146
+ print('{} Total time: {}'.format(header, total_time_str))
147
+ # Modified version of accuracy. target and pred tensors are pytorch Long
148
+ # which is not supported by habana kernels yet. So fall back to CPU for
149
+ # ops involving these(and remain on CPU since this is the last oprton of
150
+ # iteration and we need the accuracy values to be printed out on host)
151
+ def accuracy(output, target, topk=(1,)):
152
+ """Computes the accuracy over the k top predictions for the specified values of k"""
153
+ with torch.no_grad():
154
+ #pdb.set_trace()
155
+ maxk = max(topk)
156
+ batch_size = target.size(0)
157
+ _, pred = output.topk(maxk, 1, True, True)
158
+ pred = pred.t()
159
+ pred_cpu = torch.tensor(pred, device='cpu')
160
+ target_cpu = torch.tensor(target, device='cpu')
161
+ correct = pred_cpu.eq(target_cpu[None])
162
+ res = []
163
+ for k in topk:
164
+ correct_k = correct[:k].flatten().sum(dtype=torch.float32)
165
+ res.append(correct_k * (100.0 / batch_size))
166
+ return res
167
+ #Original accuracy code
168
+ def accuracy_orig(output, target, topk=(1,)):
169
+ """Computes the accuracy over the k top predictions for the specified values of k"""
170
+ with torch.no_grad():
171
+ maxk = max(topk)
172
+ batch_size = target.size(0)
173
+ _, pred = output.topk(maxk, 1, True, True)
174
+ pred = pred.t()
175
+ correct = pred.eq(target[None])
176
+ res = []
177
+ for k in topk:
178
+ correct_k = correct[:k].flatten().sum(dtype=torch.float32)
179
+ res.append(correct_k * (100.0 / batch_size))
180
+ return res
181
+ def mkdir(path):
182
+ try:
183
+ os.makedirs(path)
184
+ except OSError as e:
185
+ if e.errno != errno.EEXIST:
186
+ raise
187
+ def setup_for_distributed(is_master):
188
+ """
189
+ This function disables printing when not in master process
190
+ """
191
+ import builtins as __builtin__
192
+ builtin_print = __builtin__.print
193
+ def print(*args, **kwargs):
194
+ force = kwargs.pop('force', False)
195
+ if is_master or force:
196
+ builtin_print(*args, **kwargs)
197
+ __builtin__.print = print
198
+ def is_dist_avail_and_initialized():
199
+ if not dist.is_available():
200
+ return False
201
+ if not dist.is_initialized():
202
+ return False
203
+ return True
204
+ def get_world_size():
205
+ if not is_dist_avail_and_initialized():
206
+ return 1
207
+ return dist.get_world_size()
208
+ def get_rank():
209
+ if not is_dist_avail_and_initialized():
210
+ return 0
211
+ return dist.get_rank()
212
+ def is_main_process():
213
+ return get_rank() == 0
214
+ def save_on_master(*args, **kwargs):
215
+ if is_main_process():
216
+ torch.save(*args, **kwargs)
217
+ def barrier():
218
+ if mpi_comm is not None:
219
+ mpi_comm.Barrier()
220
+ def init_distributed_mode(args):
221
+ from habana_frameworks.torch.utils.distributed_utils import initialize_distributed_hpu
222
+ args.world_size, args.rank, args.local_rank = initialize_distributed_hpu()
223
+ if args.world_size == 1:
224
+ args.distributed = False
225
+ return
226
+
227
+ args.distributed = True
228
+ print('| distributed init (rank {}): {}'.format(
229
+ args.rank, args.dist_url), flush=True)
230
+ if args.use_habana:
231
+ args.dist_backend = 'hccl'
232
+ dist.init_process_group(args.dist_backend, rank=args.rank, world_size=args.world_size)
233
+ else:
234
+ torch.cuda.set_device(args.gpu)
235
+ args.dist_backend = 'nccl'
236
+ torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
237
+ world_size=args.world_size, rank=args.rank)
238
+ setup_for_distributed(args.rank == 0)
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_bf16_bart.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ add
2
+ addmm
3
+ bmm
4
+ dot
5
+ dropout
6
+ gelu
7
+ iadd
8
+ layer_norm
9
+ linear
10
+ matmul
11
+ mm
12
+ mv
13
+ relu
14
+ softmax
15
+ sum
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_fp32_bart.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ div
2
+ embedding
3
+ log_softmax
4
+ nll_loss
5
+ truediv
6
+ sort
7
+ topk
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ scikit-learn
2
+ datasets==1.9.0
3
+ sentencepiece
4
+ seqeval
5
+ streamlit
6
+ tensorboardx
7
+ wandb
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.cfg ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool:pytest]
2
+ python_functions=test_
3
+
4
+ codestyle_max_line_length = 119
5
+
6
+ log_cli = true
7
+ log_cli_level = WARNING
8
+
9
+ [metadata]
10
+ description-file = README.md
11
+ license_file = LICENSE
12
+
13
+ [pycodestyle]
14
+ max-line-length = 119
15
+
16
+ [flake8]
17
+ max-line-length = 119
18
+ ignore = E203 , W503, F401
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import find_packages, setup
2
+
3
+ with open("README.md", "r") as fh:
4
+ long_description = fh.read()
5
+
6
+ setup(
7
+ name="simpletransformers",
8
+ version="0.61.9",
9
+ author="Thilina Rajapakse",
10
+ author_email="[email protected]",
11
+ description="An easy-to-use wrapper library for the Transformers library.",
12
+ long_description=long_description,
13
+ long_description_content_type="text/markdown",
14
+ url="https://github.com/ThilinaRajapakse/simpletransformers/",
15
+ packages=find_packages(),
16
+ scripts=["bin/simple-viewer"],
17
+ classifiers=[
18
+ "Intended Audience :: Science/Research",
19
+ "License :: OSI Approved :: Apache Software License",
20
+ "Programming Language :: Python :: 3",
21
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
22
+ ],
23
+ python_requires=">=3.6",
24
+ install_requires=[
25
+ "numpy",
26
+ "requests",
27
+ "tqdm>=4.47.0",
28
+ "regex",
29
+ "transformers>=4.2.0",
30
+ "datasets",
31
+ "scipy",
32
+ "scikit-learn",
33
+ "seqeval",
34
+ "tensorboardx",
35
+ "pandas",
36
+ "tokenizers",
37
+ "wandb>=0.10.32",
38
+ "streamlit",
39
+ "sentencepiece",
40
+ ],
41
+ )
docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/simpletransformers/seq2seq/seq2seq_utils.py ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import pickle
4
+ from multiprocessing import Pool
5
+ from functools import partial
6
+ from typing import Tuple
7
+
8
+ import pandas as pd
9
+ import torch
10
+ import transformers
11
+ from tokenizers.implementations import ByteLevelBPETokenizer
12
+ from tokenizers.processors import BertProcessing
13
+ from torch.utils.data import Dataset
14
+ from tqdm.auto import tqdm
15
+ from transformers import PreTrainedTokenizer
16
+ from transformers.models.bart.modeling_bart import (
17
+ shift_tokens_right as _shift_tokens_right,
18
+ )
19
+ from datasets import Features, Sequence, Value, load_dataset
20
+ from datasets import Dataset as HFDataset
21
+ from transformers import (
22
+ DPRContextEncoder,
23
+ DPRContextEncoderTokenizerFast,
24
+ )
25
+
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ if transformers.__version__ < "4.2.0":
30
+ shift_tokens_right = lambda input_ids, pad_token_id, decoder_start_token_id: _shift_tokens_right(
31
+ input_ids, pad_token_id
32
+ )
33
+ else:
34
+ shift_tokens_right = _shift_tokens_right
35
+
36
+
37
+ def preprocess_batch_for_hf_dataset(
38
+ dataset, encoder_tokenizer, decoder_tokenizer, args
39
+ ):
40
+ if args.model_type == "bart":
41
+ input_ids = encoder_tokenizer.batch_encode_plus(
42
+ dataset["input_text"],
43
+ max_length=args.max_seq_length,
44
+ padding="max_length",
45
+ return_tensors="np",
46
+ truncation=True,
47
+ )
48
+
49
+ target_ids = encoder_tokenizer.batch_encode_plus(
50
+ dataset["target_text"],
51
+ max_length=args.max_seq_length,
52
+ padding="max_length",
53
+ return_tensors="np",
54
+ truncation=True,
55
+ )
56
+
57
+ return {
58
+ "source_ids": input_ids["input_ids"].squeeze(),
59
+ "source_mask": input_ids["attention_mask"].squeeze(),
60
+ "target_ids": target_ids["input_ids"].squeeze(),
61
+ }
62
+ elif args.model_type == "mbart":
63
+ tokenized_example = encoder_tokenizer.prepare_seq2seq_batch(
64
+ src_texts=dataset["input_text"],
65
+ tgt_texts=dataset["target_text"],
66
+ src_lang=args.src_lang,
67
+ tgt_lang=args.tgt_lang,
68
+ max_length=args.max_seq_length,
69
+ padding="max_length", # pad_to_max_length=True won't work in this case
70
+ return_tensors="np",
71
+ truncation=True,
72
+ )
73
+
74
+ decoder_input_ids = tokenized_example["labels"].clone()
75
+ decoder_input_ids = shift_tokens_right(
76
+ decoder_input_ids,
77
+ encoder_tokenizer.pad_token_id,
78
+ encoder_tokenizer.lang_code_to_id[args.tgt_lang],
79
+ )
80
+
81
+ labels = tokenized_example["labels"]
82
+ labels[labels == encoder_tokenizer.pad_token_id] = -100
83
+
84
+ return {
85
+ "input_ids": tokenized_example["input_ids"].squeeze(),
86
+ "attention_mask": tokenized_example["attention_mask"].squeeze(),
87
+ "decoder_input_ids": decoder_input_ids.squeeze(),
88
+ "labels": labels.squeeze(),
89
+ }
90
+ elif args.model_type in ["rag-token", "rag-sequence"]:
91
+ source_inputs = encoder_tokenizer(
92
+ dataset["input_text"],
93
+ max_length=args.max_seq_length,
94
+ padding="max_length",
95
+ return_tensors="np",
96
+ truncation=True,
97
+ )
98
+ try:
99
+ target_inputs = encoder_tokenizer.generator(
100
+ dataset["target_text"],
101
+ max_length=args.max_seq_length,
102
+ padding="max_length",
103
+ return_tensors="np",
104
+ truncation=True,
105
+ )
106
+ except TypeError:
107
+ logger.warn(
108
+ """Error encountered while converting target_text.
109
+ All target_text values have been manually cast to String as a workaround.
110
+ This may have been caused by NaN values present in the data."""
111
+ )
112
+ dataset["target_text"] = [str(d) for d in dataset["target_text"]]
113
+ target_inputs = encoder_tokenizer.generator(
114
+ dataset["target_text"],
115
+ max_length=args.max_seq_length,
116
+ padding="max_length",
117
+ return_tensors="np",
118
+ truncation=True,
119
+ )
120
+ source_ids = source_inputs["input_ids"].squeeze()
121
+ target_ids = target_inputs["input_ids"].squeeze()
122
+ src_mask = source_inputs["attention_mask"].squeeze()
123
+ return {
124
+ "input_ids": source_ids,
125
+ "attention_mask": src_mask,
126
+ "decoder_input_ids": target_ids,
127
+ }
128
+ else:
129
+ source_inputs = encoder_tokenizer(
130
+ dataset["input_text"],
131
+ max_length=args.max_seq_length,
132
+ padding="max_length",
133
+ return_tensors="np",
134
+ truncation=True,
135
+ )
136
+
137
+ target_inputs = decoder_tokenizer(
138
+ dataset["target_text"],
139
+ max_length=args.max_seq_length,
140
+ padding="max_length",
141
+ return_tensors="np",
142
+ truncation=True,
143
+ )
144
+ source_ids = source_inputs["input_ids"].squeeze()
145
+ target_ids = target_inputs["input_ids"].squeeze()
146
+ src_mask = source_inputs["attention_mask"].squeeze()
147
+ return {
148
+ "input_ids": source_ids,
149
+ "attention_mask": src_mask,
150
+ "decoder_input_ids": target_ids,
151
+ }
152
+
153
+
154
+ def load_hf_dataset(data, encoder_tokenizer, decoder_tokenizer, args):
155
+ if isinstance(data, str):
156
+ dataset = load_dataset(
157
+ "csv",
158
+ data_files=data,
159
+ delimiter="\t",
160
+ download_mode="force_redownload"
161
+ if args.reprocess_input_data
162
+ else "reuse_dataset_if_exists",
163
+ )
164
+ else:
165
+ dataset = HFDataset.from_pandas(data)
166
+
167
+ dataset = dataset.map(
168
+ lambda x: preprocess_batch_for_hf_dataset(
169
+ x,
170
+ encoder_tokenizer=encoder_tokenizer,
171
+ decoder_tokenizer=decoder_tokenizer,
172
+ args=args,
173
+ ),
174
+ batched=True,
175
+ )
176
+
177
+ if args.model_type == "bart":
178
+ column_names = [
179
+ "source_ids",
180
+ "source_mask",
181
+ "target_ids",
182
+ ]
183
+ elif args.model_type == "mbart":
184
+ column_names = [
185
+ "input_ids",
186
+ "attention_mask",
187
+ "decoder_input_ids",
188
+ "labels",
189
+ ]
190
+ else:
191
+ column_names = [
192
+ "input_ids",
193
+ "attention_mask",
194
+ "decoder_input_ids",
195
+ ]
196
+
197
+ dataset.set_format(type="pt", columns=column_names)
198
+
199
+ if isinstance(data, str):
200
+ # This is not necessarily a train dataset. The datasets library insists on calling it train.
201
+ return dataset["train"]
202
+ else:
203
+ return dataset
204
+
205
+
206
+ def preprocess_data(data):
207
+ input_text, target_text, encoder_tokenizer, decoder_tokenizer, args = data
208
+
209
+ if args.model_type in ["rag-token", "rag-sequence"]:
210
+ source_inputs = encoder_tokenizer(
211
+ input_text,
212
+ max_length=args.max_seq_length,
213
+ padding="max_length",
214
+ return_tensors="pt",
215
+ truncation=True,
216
+ )
217
+ target_inputs = encoder_tokenizer.generator(
218
+ target_text,
219
+ max_length=args.max_seq_length,
220
+ padding="max_length",
221
+ return_tensors="pt",
222
+ truncation=True,
223
+ )
224
+ source_ids = source_inputs["input_ids"].squeeze()
225
+ target_ids = target_inputs["input_ids"].squeeze()
226
+ src_mask = source_inputs["attention_mask"].squeeze()
227
+ return {
228
+ "input_ids": source_ids,
229
+ "attention_mask": src_mask,
230
+ "decoder_input_ids": target_ids,
231
+ }
232
+ else:
233
+ input_text = encoder_tokenizer.encode(
234
+ input_text,
235
+ max_length=args.max_seq_length,
236
+ padding="max_length",
237
+ return_tensors="pt",
238
+ truncation=True,
239
+ )
240
+
241
+ target_text = decoder_tokenizer.encode(
242
+ target_text,
243
+ max_length=args.max_seq_length,
244
+ padding="max_length",
245
+ return_tensors="pt",
246
+ truncation=True,
247
+ )
248
+ return (torch.flatten(input_text), torch.flatten(target_text))
249
+
250
+
251
+ class Seq2SeqDataset(Dataset):
252
+ def __init__(self, encoder_tokenizer, decoder_tokenizer, args, data, mode):
253
+ cached_features_file = os.path.join(
254
+ args.cache_dir,
255
+ args.model_name.replace("/", "_")
256
+ + "_cached_"
257
+ + str(args.max_seq_length)
258
+ + str(len(data)),
259
+ )
260
+
261
+ if os.path.exists(cached_features_file) and (
262
+ (not args.reprocess_input_data and not args.no_cache)
263
+ or (mode == "dev" and args.use_cached_eval_features and not args.no_cache)
264
+ ):
265
+ if args.is_master:
266
+ logger.info(" Loading features from cached file %s", cached_features_file)
267
+ with open(cached_features_file, "rb") as handle:
268
+ self.examples = pickle.load(handle)
269
+ else:
270
+ #logger.info
271
+ print(f" Creating features from dataset file at {args.cache_dir}")
272
+
273
+ data = [
274
+ (input_text, target_text, encoder_tokenizer, decoder_tokenizer, args)
275
+ for input_text, target_text in zip(
276
+ data["input_text"], data["target_text"]
277
+ )
278
+ ]
279
+
280
+ if (mode == "train" and args.use_multiprocessing) or (
281
+ mode == "dev" and args.use_multiprocessing_for_evaluation
282
+ ):
283
+ if args.multiprocessing_chunksize == -1:
284
+ chunksize = max(len(data) // (args.process_count * 2), 500)
285
+ else:
286
+ chunksize = args.multiprocessing_chunksize
287
+
288
+ with Pool(args.process_count) as p:
289
+ self.examples = list(
290
+ tqdm(
291
+ p.imap(preprocess_data, data, chunksize=chunksize),
292
+ total=len(data),
293
+ disable=not args.is_master,#args.silent,
294
+ )
295
+ )
296
+ else:
297
+ self.examples = [
298
+ preprocess_data(d) for d in tqdm(data, disable=not args.is_master)#args.silent)
299
+ ]
300
+
301
+ if not args.no_cache and args.is_master:
302
+ logger.info(
303
+ " Saving features into cached file %s", cached_features_file
304
+ )
305
+ with open(cached_features_file, "wb") as handle:
306
+ pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
307
+
308
+ def __len__(self):
309
+ return len(self.examples)
310
+
311
+ def __getitem__(self, index):
312
+ return self.examples[index]
313
+
314
+
315
+ def preprocess_data_bart(data):
316
+ input_text, target_text, tokenizer, args = data
317
+
318
+ input_ids = tokenizer.batch_encode_plus(
319
+ [input_text],
320
+ max_length=args.max_seq_length,
321
+ padding="max_length",
322
+ return_tensors="pt",
323
+ truncation=True,
324
+ )
325
+
326
+ target_ids = tokenizer.batch_encode_plus(
327
+ [target_text],
328
+ max_length=args.max_seq_length,
329
+ padding="max_length",
330
+ return_tensors="pt",
331
+ truncation=True,
332
+ )
333
+
334
+ return {
335
+ "source_ids": input_ids["input_ids"].squeeze(),
336
+ "source_mask": input_ids["attention_mask"].squeeze(),
337
+ "target_ids": target_ids["input_ids"].squeeze(),
338
+ }
339
+
340
+
341
+ def preprocess_data_mbart(data):
342
+ input_text, target_text, tokenizer, args = data
343
+
344
+ tokenized_example = tokenizer.prepare_seq2seq_batch(
345
+ src_texts=[input_text],
346
+ tgt_texts=[target_text],
347
+ src_lang=args.src_lang,
348
+ tgt_lang=args.tgt_lang,
349
+ max_length=args.max_seq_length,
350
+ padding="max_length", # pad_to_max_length=True won't work in this case
351
+ return_tensors="pt",
352
+ truncation=True,
353
+ )
354
+
355
+ decoder_input_ids = tokenized_example["labels"].clone()
356
+ decoder_input_ids = shift_tokens_right(
357
+ decoder_input_ids,
358
+ tokenizer.pad_token_id,
359
+ tokenizer.lang_code_to_id[args.tgt_lang],
360
+ )
361
+
362
+ labels = tokenized_example["labels"]
363
+ labels[labels == tokenizer.pad_token_id] = -100
364
+
365
+ return {
366
+ "input_ids": tokenized_example["input_ids"].squeeze(),
367
+ "attention_mask": tokenized_example["attention_mask"].squeeze(),
368
+ "decoder_input_ids": decoder_input_ids.squeeze(),
369
+ "labels": labels.squeeze(),
370
+ }
371
+
372
+
373
+ class SimpleSummarizationDataset(Dataset):
374
+ def __init__(self, tokenizer, args, data, mode):
375
+ self.tokenizer = tokenizer
376
+
377
+ cached_features_file = os.path.join(
378
+ args.cache_dir,
379
+ args.model_name.replace('/', '_') + "_cached_" + str(args.max_seq_length) + str(len(data)),
380
+ )
381
+
382
+ if os.path.exists(cached_features_file) and (
383
+ (not args.reprocess_input_data and not args.no_cache)
384
+ or (mode == "dev" and args.use_cached_eval_features and not args.no_cache)
385
+ ):
386
+ if args.is_master:
387
+ logger.info(" Loading features from cached file %s", cached_features_file)
388
+ with open(cached_features_file, "rb") as handle:
389
+ self.examples = pickle.load(handle)
390
+ else:
391
+ #logger.info
392
+ print(f" Creating features from dataset file at { args.cache_dir}")
393
+
394
+ data = [
395
+ (input_text, target_text, tokenizer, args)
396
+ for input_text, target_text in zip(
397
+ data["input_text"], data["target_text"]
398
+ )
399
+ ]
400
+
401
+ preprocess_fn = (
402
+ preprocess_data_mbart
403
+ if args.model_type == "mbart"
404
+ else preprocess_data_bart
405
+ )
406
+
407
+ if (mode == "train" and args.use_multiprocessing) or (
408
+ mode == "dev" and args.use_multiprocessing_for_evaluation
409
+ ):
410
+ if args.multiprocessing_chunksize == -1:
411
+ chunksize = max(len(data) // (args.process_count * 2), 500)
412
+ else:
413
+ chunksize = args.multiprocessing_chunksize
414
+
415
+ with Pool(args.process_count) as p:
416
+ self.examples = list(
417
+ tqdm(
418
+ p.imap(preprocess_fn, data, chunksize=chunksize),
419
+ total=len(data),
420
+ disable=not args.is_master,#args.silent,
421
+ )
422
+ )
423
+ else:
424
+ self.examples = [
425
+ preprocess_fn(d) for d in tqdm(data, disable=not args.is_master)#args.silent)
426
+ ]
427
+
428
+ if not args.no_cache and args.is_master:
429
+ logger.info(
430
+ " Saving features into cached file %s", cached_features_file
431
+ )
432
+ with open(cached_features_file, "wb") as handle:
433
+ pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
434
+
435
+ def __len__(self):
436
+ return len(self.examples)
437
+
438
+ def __getitem__(self, index):
439
+ return self.examples[index]
440
+
441
+
442
+ def split_text(text, n=100, character=" "):
443
+ """Split the text every ``n``-th occurrence of ``character``"""
444
+ text = text.split(character)
445
+ return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)]
446
+
447
+
448
+ def split_documents(
449
+ documents, split_text_n=100, split_text_character=" ", include_title=True
450
+ ):
451
+ """Split documents into passages"""
452
+ titles, texts = [], []
453
+ if include_title:
454
+ for title, text in zip(documents["title"], documents["text"]):
455
+ if text is not None:
456
+ for passage in split_text(
457
+ text, n=split_text_n, character=split_text_character
458
+ ):
459
+ titles.append(title if title is not None else "")
460
+ texts.append(passage)
461
+ else:
462
+ for text in documents["text"]:
463
+ if text is not None:
464
+ for passage in split_text(
465
+ text, n=split_text_n, character=split_text_character
466
+ ):
467
+ titles.append("")
468
+ texts.append(passage)
469
+ return {"title": titles, "text": texts}
470
+
471
+
472
+ def embed(documents, ctx_encoder, ctx_tokenizer, device):
473
+ """Compute the DPR embeddings of document passages"""
474
+ input_ids = ctx_tokenizer(
475
+ documents["title"],
476
+ documents["text"],
477
+ truncation=True,
478
+ padding="longest",
479
+ return_tensors="pt",
480
+ )["input_ids"]
481
+ embeddings = ctx_encoder(
482
+ input_ids.to(device=device), return_dict=True
483
+ ).pooler_output
484
+ return {"embeddings": embeddings.detach().cpu().numpy()}
485
+
486
+
487
+ def generate_faiss_index_dataset(data, ctx_encoder_name, args, device):
488
+ """
489
+ Adapted from Huggingface example script at https://github.com/huggingface/transformers/blob/master/examples/research_projects/rag/use_own_knowledge_dataset.py
490
+ """
491
+ import faiss
492
+
493
+ if isinstance(data, str):
494
+ if args.include_title_in_knowledge_dataset:
495
+ dataset = load_dataset(
496
+ "csv", data_files=data, delimiter="\t", column_names=["title", "text"]
497
+ )
498
+ else:
499
+ dataset = load_dataset(
500
+ "csv", data_files=data, delimiter="\t", column_names=["text"]
501
+ )
502
+ else:
503
+ dataset = HFDataset.from_pandas(data)
504
+
505
+ dataset = dataset.map(
506
+ partial(
507
+ split_documents,
508
+ split_text_n=args.split_text_n,
509
+ split_text_character=args.split_text_character,
510
+ include_title=args.include_title_in_knowledge_dataset,
511
+ ),
512
+ batched=True,
513
+ num_proc=args.process_count,
514
+ )
515
+
516
+ ctx_encoder = DPRContextEncoder.from_pretrained(ctx_encoder_name).to(device=device)
517
+ ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(ctx_encoder_name)
518
+
519
+ new_features = Features(
520
+ {
521
+ "text": Value("string"),
522
+ "title": Value("string"),
523
+ "embeddings": Sequence(Value("float32")),
524
+ }
525
+ ) # optional, save as float32 instead of float64 to save space
526
+ dataset = dataset.map(
527
+ partial(
528
+ embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer, device=device
529
+ ),
530
+ batched=True,
531
+ batch_size=args.rag_embed_batch_size,
532
+ features=new_features,
533
+ )
534
+ if isinstance(data, str):
535
+ dataset = dataset["train"]
536
+
537
+ if args.save_knowledge_dataset:
538
+ output_dataset_directory = os.path.join(args.output_dir, "knowledge_dataset")
539
+ os.makedirs(output_dataset_directory, exist_ok=True)
540
+ dataset.save_to_disk(output_dataset_directory)
541
+
542
+ index = faiss.IndexHNSWFlat(args.faiss_d, args.faiss_m, faiss.METRIC_INNER_PRODUCT)
543
+ dataset.add_faiss_index("embeddings", custom_index=index)
544
+
545
+ return dataset
546
+
547
+
548
+ def add_faiss_index_to_dataset(dataset):
549
+ import faiss
550
+
551
+ index = faiss.IndexHNSWFlat(768, 128, faiss.METRIC_INNER_PRODUCT)
552
+ dataset.add_faiss_index("embeddings", custom_index=index)
553
+
554
+ return dataset
docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/LICENSE ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2021 Habana Labs, Ltd. an Intel Company
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ Copyright 2019 NVIDIA CORPORATION. All rights reserved.
181
+ APPENDIX: How to apply the Apache License to your work.
182
+
183
+ To apply the Apache License to your work, attach the following
184
+ boilerplate notice, with the fields enclosed by brackets "[]"
185
+ replaced with your own identifying information. (Don't include
186
+ the brackets!) The text should be enclosed in the appropriate
187
+ comment syntax for the file format. We also recommend that a
188
+ file or class name and description of purpose be included on the
189
+ same "printed page" as the copyright notice for easier
190
+ identification within third-party archives.
191
+
192
+ Copyright [yyyy] [name of copyright owner]
193
+
194
+ Licensed under the Apache License, Version 2.0 (the "License");
195
+ you may not use this file except in compliance with the License.
196
+ You may obtain a copy of the License at
197
+
198
+ http://www.apache.org/licenses/LICENSE-2.0
199
+
200
+ Unless required by applicable law or agreed to in writing, software
201
+ distributed under the License is distributed on an "AS IS" BASIS,
202
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
203
+ See the License for the specific language governing permissions and
204
+ limitations under the License.
docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/README.md ADDED
@@ -0,0 +1,898 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BERT for PyTorch
2
+
3
+ This folder contains scripts to pre-train , finetune BERT model and run inference on finetuned BERT model on Intel® Gaudi® AI Accelerator to achieve state-of-the-art accuracy. To obtain model performance data, refer to the [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-training-models/#performance)
4
+
5
+ For more information about training deep learning models using Gaudi, visit [developer.habana.ai](https://developer.habana.ai/resources/).
6
+
7
+ **Note**: BERT is enabled on both Gaudi and Gaudi2.
8
+ ## Table of Contents
9
+ - [Model References](../../../README.md)
10
+ - [Model Overview](#model-overview)
11
+ - [Setup](#setup)
12
+ - [Training and Examples](#training-and-examples)
13
+ - [Inference and Examples](#inference-and-examples)
14
+ - [Pre-trained Model](#pre-trained-model)
15
+ - [Supported Configurations](#supported-configurations)
16
+ - [Changelog](#changelog)
17
+ - [Known Issues](#known-issues)
18
+
19
+ ## Model Overview
20
+ Bidirectional Encoder Representations from Transformers (BERT) is a technique for natural language processing (NLP) pre-training developed by Google.
21
+ The original English-language BERT model comes with two pre-trained general types: (1) the BERTBASE model, a 12-layer, 768-hidden, 12-heads, 110M parameter neural network architecture, and (2) the BERTLARGE model, a 24-layer, 1024-hidden, 16-heads, 340M parameter neural network architecture; both of which were trained on the BooksCorpus with 800M words, and a version of the English Wikipedia with 2,500M words.
22
+ The base training and modeling scripts for pre-training are based on a clone of https://github.com/NVIDIA/DeepLearningExamples.git and fine-tuning is based on https://github.com/huggingface/transformers.git.
23
+
24
+ The scripts included in this release are as follows:
25
+ - BERT Large pre-training for BF16 mixed precision for Wikipedia BookCorpus and Wiki dataset in Lazy mode.
26
+ - BERT Large finetuning for BF16 mixed precision for Wikipedia BookCorpus and SQUAD dataset in Lazy mode.
27
+ - Multi-card (1 server = 8 cards) support for BERT Large pre-training and finetuning with BF16 mixed precision in Lazy mode.
28
+ - Multi-server (4 servers = 32 cards) support for BERT Large pre-training with BF16 mixed precision in Lazy mode.
29
+ - BERT pre-training 1.2B parameters using ZeroRedundancyOptimizer with BF16 mixed precision in Lazy mode.
30
+
31
+
32
+ Additional environment variables are used in training scripts in order to achieve optimal results for each workload.
33
+
34
+ ### Pre-Training
35
+ - Located in: `Model-References/PyTorch/nlp/bert/`
36
+ - Suited for datasets:
37
+ - `wiki`, `bookswiki`(combination of BooksCorpus and Wiki datasets)
38
+ - Uses optimizer: **LAMB** ("Layer-wise Adaptive Moments optimizer for Batch training").
39
+ - Consists of two tasks:
40
+ - Task 1 - **Masked Language Model** - where given a sentence, a randomly chosen word is guessed.
41
+ - Task 2 - **Next Sentence Prediction** - where the model guesses whether sentence B comes after sentence A.
42
+ - The resulting (trained) model weights are language-specific (here: english) and has to be further "fitted" to do a specific task (with fine-tuning).
43
+ - Heavy-weight: the training takes several hours or days.
44
+
45
+ BERT training script supports pre-training of dataset on BERT large for both FP32 and BF16 mixed precision data type using **Lazy mode**.
46
+
47
+ ### Finetuning
48
+ - Located in: `Model-References/PyTorch/nlp/bert/`
49
+ - Suited for dataset:
50
+ - `SQUAD`(Stanford Question Answering Dataset)
51
+ - Uses optimizer: **Fused ADAM**.
52
+ - Light-weight: the finetuning takes several minutes.
53
+
54
+ BERT finetuning script supports fine-tuning of SQUAD dataset on BERT large for both FP32 and BF16 mixed precision data type using **Lazy mode**.
55
+
56
+ ## Setup
57
+ Please follow the instructions provided in the [Gaudi Installation
58
+ Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) to set up the
59
+ environment including the `$PYTHON` environment variable.
60
+ The guide will walk you through the process of setting up your system to run the model on Gaudi.
61
+
62
+ ### Clone Habana Model-References
63
+ In the docker container, clone this repository and switch to the branch that
64
+ matches your SynapseAI version. You can run the
65
+ [`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options)
66
+ utility to determine the SynapseAI version.
67
+
68
+ ```bash
69
+ git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References
70
+ ```
71
+
72
+ ### Install Model Requirements
73
+ 1. In the docker container, go to the BERT directory
74
+ ```bash
75
+ cd Model-References/PyTorch/nlp/bert
76
+ ```
77
+ 2. Install the required packages using pip:
78
+ ```bash
79
+ $PYTHON -m pip install -r requirements.txt
80
+ ```
81
+ ### Vocab File
82
+ Download the Vocab file located [here](https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip).
83
+
84
+ ### Download Dataset
85
+
86
+ #### Pre-Training:
87
+
88
+ `Model-References/PyTorch/nlp/bert/data` provides scripts to download, extract and pre-process [Wikipedia](https://dumps.wikimedia.org/) and [BookCorpus](http://yknzhu.wixsite.com/mbweb) datasets.
89
+
90
+ Go to the `data` folder and run the data preparation script.
91
+ ```
92
+ cd Model-References/PyTorch/nlp/bert/data
93
+ ```
94
+ It is highly recommended to download Wiki dataset alone using the following command.
95
+ ```
96
+ bash create_datasets_from_start.sh
97
+ ```
98
+ Wiki and BookCorpus datasets can be downloaded by running the script as follows.
99
+ ```
100
+ bash create_datasets_from_start.sh wiki_books
101
+ ```
102
+ Note that the pre-training dataset is huge and takes several hours to download. BookCorpus may have access and download constraints. The final accuracy may vary depending on the dataset and its size.
103
+ The script creates formatted dataset for Phase 1 and Phase 2 of the pre-training.
104
+
105
+ #### Finetuning:
106
+ This section provides steps to extract and pre-process Squad Dataset(V1.1).
107
+
108
+ 1. Go to `squad` folder.
109
+ ```
110
+ cd Model-References/PyTorch/nlp/bert/data/squad
111
+ ```
112
+ 2. Download Squad dataset.
113
+ ```
114
+ bash squad_download.sh
115
+ ```
116
+
117
+ ### Packing the Data
118
+ Habana supports using a [Data packing technique](https://github.com/HabanaAI/Gaudi-tutorials/blob/main/TensorFlow/DataPackingMLperfBERT/Data_Packing_Process_for_MLPERF_BERT.ipynb),
119
+ called Non-Negative Least Squares Histogram. Here, instead of padding with zero,
120
+ several short sequences are packed into one multi-sequence of size `max_seq_len`.
121
+ Thus, this removes most of the padding, which can lead to a speedup of up to 2&times;
122
+ in time-to-train (TTT). This packing technique can be applied on other datasets
123
+ with high variability in samples length.
124
+
125
+ Please note that for each NLP dataset with sequential data samples, the speedup
126
+ with data packing is determined by the ratio of `max_seq_len` to
127
+ `average_seq_len` in that particular dataset. The larger the ratio, the higher
128
+ the speedup.
129
+
130
+ To pack the dataset, in docker run:
131
+ ```bash
132
+ cd /root/Model-References/PyTorch/nlp/bert
133
+
134
+ $PYTHON pack_pretraining_data_pytorch.py --input_dir <dataset_path_phase1> --output_dir <packed_dataset_path_phase1> --max_sequence_length 128 --max_predictions_per_sequence 20
135
+
136
+ $PYTHON pack_pretraining_data_pytorch.py --input_dir <dataset_path_phase2> --output_dir <packed_dataset_path_phase2> --max_sequence_length 512 --max_predictions_per_sequence 80
137
+ ```
138
+ **Note:** This will generate json at the path <output_dir>/../<tail_dir>_metadata.json with meta data info like: "avg_seq_per_sample" etc. This json will be
139
+ used as an input to run_pretraining.py to extract "avg_seq_per_sample" in case of packed dataset mode.
140
+
141
+
142
+ ## Training and Examples
143
+
144
+ Please create a log directory to store `dllogger.json` and specify its location for `--json_summary` attribute.
145
+
146
+ ### Single Card and Multi-Card Pre-Training Examples
147
+ **Run training on 1 HPU:**
148
+
149
+ - Using packed data: lazy mode, 1 HPU, BF16 mixed precision, batch size 64 for Phase 1 and batch size 8 for Phase 2:
150
+
151
+ ```bash
152
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
153
+ --autocast --config_file=./bert_config.json \
154
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
155
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \
156
+ --use_fused_lamb \
157
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \
158
+ --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \
159
+ --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128
160
+ ```
161
+
162
+ ```bash
163
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
164
+ --autocast --config_file=./bert_config.json \
165
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
166
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \
167
+ --use_fused_lamb \
168
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \
169
+ --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \
170
+ --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \
171
+ --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2
172
+ ```
173
+
174
+ - Using packed data: Eager mode with torch.compile enabled, 1 HPU, BF16 mixed precision, batch size 64 for Phase 1 on **Gaudi2**::
175
+ ```bash
176
+ export PT_HPU_LAZY_MODE=0
177
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
178
+ --autocast --config_file=./bert_config.json \
179
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
180
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \
181
+ --use_fused_lamb --use_torch_compile \
182
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \
183
+ --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \
184
+ --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128
185
+ ```
186
+
187
+
188
+ - Using packed data: lazy mode, 1 HPU, BF16 mixed precision, batch size 64 for Phase 1 and batch size 16 for Phase 2 on **Gaudi2**:
189
+
190
+ ```bash
191
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
192
+ --autocast --config_file=./bert_config.json \
193
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
194
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \
195
+ --use_fused_lamb \
196
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \
197
+ --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \
198
+ --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128
199
+ ```
200
+
201
+ ```bash
202
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
203
+ --autocast --config_file=./bert_config.json \
204
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
205
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \
206
+ --use_fused_lamb \
207
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \
208
+ --train_batch_size=8192 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \
209
+ --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \
210
+ --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2
211
+ ```
212
+
213
+ - Lazy mode, 1 HPU, unpacked data, BF16 mixed precision, batch size 64 for Phase1 and batch size 8 for Phase2:
214
+
215
+ ```bash
216
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
217
+ --autocast --config_file=./bert_config.json \
218
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
219
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints --use_fused_lamb \
220
+ --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \
221
+ --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \
222
+ --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 \
223
+ --enable_packed_data_mode False
224
+ ```
225
+
226
+
227
+ ```bash
228
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
229
+ --autocast --config_file=./bert_config.json \
230
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
231
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints --use_fused_lamb \
232
+ --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512_max_pred_80_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5/books_wiki_en_corpus \
233
+ --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \
234
+ --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004\
235
+ --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2 \
236
+ --enable_packed_data_mode False
237
+ ```
238
+
239
+ - Lazy mode, 1 HPU, unpacked data, FP32 precision, batch size 32 for Phase 1 and batch size 4 for Phase 2:
240
+
241
+ ```bash
242
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \
243
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
244
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints --use_fused_lamb \
245
+ --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \
246
+ --train_batch_size=512 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \
247
+ --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=32 \
248
+ --enable_packed_data_mode False
249
+ ```
250
+
251
+ ```bash
252
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \
253
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
254
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints --use_fused_lamb \
255
+ --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512/books_wiki_en_corpus \
256
+ --train_batch_size=128 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \
257
+ --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \
258
+ --gradient_accumulation_steps=64 --resume_from_checkpoint --phase1_end_step=7038 --phase2 \
259
+ --enable_packed_data_mode False
260
+ ```
261
+
262
+ **Run training on 8 HPUs:**
263
+
264
+ To run multi-card demo, make sure the host machine has 512 GB of RAM installed. Modify the docker run command to pass 8 Gaudi cards to the docker container. This ensures the docker has access to all the 8 cards required for multi-card demo.
265
+
266
+ **NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Scaling_Guide/DDP_Based_Scaling.html#mpirun-configuration).
267
+
268
+ - Using packed data: lazy mode, 8 HPUs, BF16 mixed precision, per chip batch size of 64 for Phase 1 and 8 for Phase 2:
269
+
270
+ ```bash
271
+ export MASTER_ADDR="localhost"
272
+ export MASTER_PORT="12345"
273
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
274
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json --use_habana \
275
+ --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \
276
+ --output_dir=/tmp/results/checkpoints --use_fused_lamb \
277
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \
278
+ --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \
279
+ --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128
280
+ ```
281
+
282
+ ```bash
283
+ export MASTER_ADDR="localhost"
284
+ export MASTER_PORT="12345"
285
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
286
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json --use_habana \
287
+ --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \
288
+ --output_dir=/tmp/results/checkpoints --use_fused_lamb \
289
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \
290
+ --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \
291
+ --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \
292
+ --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2
293
+ ```
294
+
295
+ - Using packed data: lazy mode, 8 HPUs, BF16 mixed precision, per chip batch size of 64 for Phase 1 and 16 for Phase 2 on **Gaudi2**:
296
+
297
+ ```bash
298
+ export MASTER_ADDR="localhost"
299
+ export MASTER_PORT="12345"
300
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
301
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json --use_habana \
302
+ --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \
303
+ --output_dir=/tmp/results/checkpoints --use_fused_lamb \
304
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \
305
+ --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \
306
+ --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128
307
+ ```
308
+
309
+ ```bash
310
+ export MASTER_ADDR="localhost"
311
+ export MASTER_PORT="12345"
312
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
313
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json --use_habana \
314
+ --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \
315
+ --output_dir=/tmp/results/checkpoints --use_fused_lamb \
316
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \
317
+ --train_batch_size=8192 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \
318
+ --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \
319
+ --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2
320
+ ```
321
+
322
+ - Eager mode with torch.compile enabled, 8 HPUs, packed data, BF16 mixed precision, per chip batch size of 64 for Phase 1 on **Gaudi2**:
323
+
324
+ ```bash
325
+ export PT_HPU_LAZY_MODE=0
326
+ export MASTER_ADDR="localhost"
327
+ export MASTER_PORT="12345"
328
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
329
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
330
+ --autocast --use_torch_compile \
331
+ --config_file=./bert_config.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
332
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \
333
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \
334
+ --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --warmup_proportion=0.2843 \
335
+ --max_steps=7038 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128
336
+ ```
337
+
338
+ - Lazy mode, 8 HPUs, unpacked data, BF16 mixed precision, per chip batch size of 64 for Phase 1 and 8 for Phase 2:
339
+ ```bash
340
+ export MASTER_ADDR="localhost"
341
+ export MASTER_PORT="12345"
342
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
343
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
344
+ --autocast --use_lazy_mode=True \
345
+ --config_file=./bert_config.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
346
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \
347
+ --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \
348
+ --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --warmup_proportion=0.2843 \
349
+ --max_steps=7038 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 \
350
+ --enable_packed_data_mode False
351
+ ```
352
+
353
+ ```bash
354
+ export MASTER_ADDR="localhost"
355
+ export MASTER_PORT="12345"
356
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
357
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
358
+ --autocast --use_lazy_mode=True \
359
+ --config_file=./bert_config.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
360
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \
361
+ --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512/books_wiki_en_corpus \
362
+ --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --warmup_proportion=0.128 \
363
+ --max_steps=5 --num_steps_per_checkpoint=200 --learning_rate=0.004 --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2 \
364
+ --enable_packed_data_mode False
365
+ ```
366
+
367
+ - Lazy mode, 8 HPUs, unpacked data, FP32 precision, per chip batch size of 32 for Phase 1 and 4 for Phase 2:
368
+
369
+ ```bash
370
+ export MASTER_ADDR="localhost"
371
+ export MASTER_PORT="12345"
372
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
373
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \
374
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
375
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \
376
+ --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \
377
+ --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=3 --warmup_proportion=0.2843 \
378
+ --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=256 \
379
+ --enable_packed_data_mode False
380
+ ```
381
+
382
+ ```bash
383
+ export MASTER_ADDR="localhost"
384
+ export MASTER_PORT="12345"
385
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root
386
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \
387
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \
388
+ --output_dir=/tmp/results/checkpoints --use_fused_lamb \
389
+ --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512/books_wiki_en_corpus \
390
+ --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 --warmup_proportion=0.128 \
391
+ --num_steps_per_checkpoint=200 --learning_rate=0.004 --gradient_accumulation_steps=512 \
392
+ --resume_from_checkpoint --phase1_end_step=7038 --phase2 \
393
+ --enable_packed_data_mode False
394
+ ```
395
+
396
+
397
+ ### Single Card and Multi-Card Finetuning Examples
398
+ **Run training on 1 HPU:**
399
+ - Lazy mode, 1 HPU, BF16 mixed precision, batch size 24 for train and batch size 8 for test:
400
+
401
+ ```bash
402
+ $PYTHON run_squad.py --do_train --bert_model=bert-large-uncased \
403
+ --config_file=./bert_config.json \
404
+ --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \
405
+ --json-summary=/tmp/log_directory/dllogger.json \
406
+ --train_batch_size=24 --predict_batch_size=8 --seed=1 --max_seq_length=384 \
407
+ --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \
408
+ --init_checkpoint=<path-to-checkpoint> \
409
+ --vocab_file=<path-to-vocab> \
410
+ --train_file=data/squad/v1.1/train-v1.1.json \
411
+ --skip_cache --do_predict \
412
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
413
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20
414
+ ```
415
+
416
+ - Lazy mode, 1 HPU, FP32 precision, batch size 12 for train and batch size 8 for test:
417
+
418
+ ```bash
419
+ $PYTHON run_squad.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \
420
+ --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \
421
+ --json-summary=/tmp/log_directory/dllogger.json \
422
+ --train_batch_size=12 --predict_batch_size=8 --seed=1 --max_seq_length=384 \
423
+ --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \
424
+ --init_checkpoint=<path-to-checkpoint> \
425
+ --vocab_file=<path-to-vocab> \
426
+ --train_file=data/squad/v1.1/train-v1.1.json \
427
+ --skip_cache --do_predict \
428
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
429
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20
430
+ ```
431
+
432
+ - Eager mode with torch.compile enabled, 1 HPU, FP32 precision, batch size 12 for train and batch size 8 for test:
433
+
434
+ ```bash
435
+ export PT_HPU_LAZY_MODE=0
436
+ $PYTHON run_squad.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \
437
+ --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \
438
+ --json-summary=/tmp/log_directory/dllogger.json --use_torch_compile \
439
+ --train_batch_size=12 --predict_batch_size=8 --seed=1 --max_seq_length=384 \
440
+ --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \
441
+ --init_checkpoint=<path-to-checkpoint> \
442
+ --vocab_file=<path-to-vocab> \
443
+ --train_file=data/squad/v1.1/train-v1.1.json \
444
+ --skip_cache --do_predict \
445
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
446
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20
447
+ ```
448
+
449
+ **Run training on 8 HPUs:**
450
+
451
+ To run multi-card demo, make sure the host machine has 512 GB of RAM installed. Modify the docker run command to pass 8 Gaudi cards to the docker container. This ensures the docker has access to all the 8 cards required for multi-card demo.
452
+
453
+ **NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Scaling_Guide/DDP_Based_Scaling.html#mpirun-configuration).
454
+
455
+ - Lazy mode, 8 HPUs, BF16 mixed precision, per chip batch size of 24 for train and 8 for test:
456
+ ```bash
457
+ export MASTER_ADDR="localhost"
458
+ export MASTER_PORT="12345"
459
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
460
+ $PYTHON run_squad.py --do_train --bert_model=bert-large-uncased \
461
+ --config_file=./bert_config.json \
462
+ --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \
463
+ --json-summary=/tmp/log_directory/dllogger.json \
464
+ --train_batch_size=24 --predict_batch_size=8 --seed=1 --max_seq_length=384 \
465
+ --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \
466
+ --init_checkpoint=<path-to-checkpoint> \
467
+ --vocab_file=<path-to-vocab> \
468
+ --train_file=data/squad/v1.1/train-v1.1.json \
469
+ --skip_cache --do_predict \
470
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
471
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20
472
+ ```
473
+
474
+ - Lazy mode, 8 HPUs, FP32 precision, per chip batch size of 12 for train and 8 for test:
475
+
476
+ ```bash
477
+ export MASTER_ADDR="localhost"
478
+ export MASTER_PORT="12345"
479
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
480
+ $PYTHON run_squad.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \
481
+ --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \
482
+ --json-summary=/tmp/log_directory/dllogger.json \
483
+ --train_batch_size=12 --predict_batch_size=8 --seed=1 --max_seq_length=384 \
484
+ --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \
485
+ --init_checkpoint=<path-to-checkpoint> \
486
+ --vocab_file=<path-to-vocab> \
487
+ --train_file=data/squad/v1.1/train-v1.1.json \
488
+ --skip_cache --do_predict \
489
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
490
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20
491
+ ```
492
+
493
+ - Eager mode with torch.compile enabled, 8 HPUs, BF16 mixed precision, per chip batch size of 24 for train and 8 for test:
494
+ ```bash
495
+ export PT_HPU_LAZY_MODE=0
496
+ export MASTER_ADDR="localhost"
497
+ export MASTER_PORT="12345"
498
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
499
+ $PYTHON run_squad.py --do_train --bert_model=bert-large-uncased \
500
+ --config_file=./bert_config.json --use_torch_compile \
501
+ --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \
502
+ --json-summary=/tmp/log_directory/dllogger.json \
503
+ --train_batch_size=24 --predict_batch_size=8 --seed=1 --max_seq_length=384 \
504
+ --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \
505
+ --init_checkpoint=<path-to-checkpoint> \
506
+ --vocab_file=<path-to-vocab> \
507
+ --train_file=data/squad/v1.1/train-v1.1.json \
508
+ --skip_cache --do_predict \
509
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
510
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20
511
+ ```
512
+
513
+ - Habana provides the pretraining checkpoints for most of the models. The user can simply feed the data from [BERT checkpoint](https://developer.habana.ai/catalog/bert-pretraining-for-pytorch/) to provide the path-to-checkpoint for --init_checkpoint when you run the above model.
514
+
515
+ ### Multi-Server Training Examples
516
+ To run multi-server demo, make sure the host machine has 512 GB of RAM installed.
517
+ Also ensure you followed the [Gaudi Installation
518
+ Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html)
519
+ to install and set up docker, so that the docker has access to all the 8 cards
520
+ required for multi-node demo. Multi-server configuration for BERT PT training up to
521
+ 4 servers, each with 8 Gaudi cards, have been verified.
522
+
523
+ Before execution of the multi-server scripts, make sure all network interfaces are up. You can change the state of each network interface managed by the habanalabs driver using the following command:
524
+ ```
525
+ sudo ip link set <interface_name> up
526
+ ```
527
+ To identify if a specific network interface is managed by the habanalabs driver type, run:
528
+ ```
529
+ sudo ethtool -i <interface_name>
530
+ ```
531
+ #### Docker ssh Port Setup for Multi-Server Training
532
+
533
+ By default, the Habana docker uses `port 22` for ssh. The default port configured in the script is `port 3022`. Run the following commands to configure the selected port number , `port 3022` in example below.
534
+
535
+ ```bash
536
+ sed -i 's/#Port 22/Port 3022/g' /etc/ssh/sshd_config
537
+ sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
538
+ service ssh restart
539
+ ```
540
+ #### Set up password-less ssh
541
+ To set up password-less ssh between all connected servers used in scale-out training, follow the below steps:
542
+
543
+ 1. Run the following in all the nodes' docker sessions:
544
+ ```bash
545
+ mkdir ~/.ssh
546
+ cd ~/.ssh
547
+ ssh-keygen -t rsa -b 4096
548
+ ```
549
+ a. Copy id_rsa.pub contents from every node's docker to every other node's docker's ~/.ssh/authorized_keys (all public keys need to be in all hosts' authorized_keys):
550
+ ```bash
551
+ cat id_rsa.pub > authorized_keys
552
+ vi authorized_keys
553
+ ```
554
+ b. Copy the contents from inside to other systems.
555
+
556
+ c. Paste all hosts' public keys in all hosts' “authorized_keys” file.
557
+
558
+ 2. On each system, add all hosts (including itself) to known_hosts. The IP addresses used below are just for illustration:
559
+ ```bash
560
+ ssh-keyscan -p 3022 -H 10.10.100.101 >> ~/.ssh/known_hosts
561
+ ssh-keyscan -p 3022 -H 10.10.100.102 >> ~/.ssh/known_hosts
562
+ ssh-keyscan -p 3022 -H 10.10.100.103 >> ~/.ssh/known_hosts
563
+ ssh-keyscan -p 3022 -H 10.10.100.104 >> ~/.ssh/known_hosts
564
+ ```
565
+ 3. Install python packages required for BERT Pre-training model
566
+ ```
567
+ pip install -r Model-References/PyTorch/nlp/bert/requirements.txt
568
+ ```
569
+
570
+ **Run training on 32 HPUs:**
571
+
572
+ **NOTE:**
573
+ - mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Scaling_Guide/DDP_Based_Scaling.html#mpirun-configuration).
574
+ - `$MPI_ROOT` environment variable is set automatically during Setup. See [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/GAUDI_Installation_Guide.html) for details.
575
+
576
+ - Using packed data: lazy mode, 32 HPUs, BF16 mixed precision, per chip batch size 64 for Phase 1 and batch size 8 for Phase 2:
577
+ ```bash
578
+ export MASTER_ADDR="10.10.100.101"
579
+ export MASTER_PORT="12345"
580
+ mpirun --allow-run-as-root --mca plm_rsh_args "-p 3022" --bind-to core -n 32 --map-by ppr:4:socket:PE=6 \
581
+ --rank-by core --report-bindings --prefix --mca btl_tcp_if_include 10.10.100.101/16
582
+ $MPI_ROOT -H 10.10.100.101:16,10.10.100.102:16,10.10.100.103:16,10.10.100.104:16 -x LD_LIBRARY_PATH \
583
+ -x HABANA_LOGS -x PYTHONPATH -x MASTER_ADDR \
584
+ -x MASTER_PORT \
585
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json \
586
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
587
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \
588
+ --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \
589
+ --train_batch_size=2048 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \
590
+ --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 \
591
+ --gradient_accumulation_steps=32
592
+ ```
593
+
594
+ ```bash
595
+ export MASTER_ADDR="10.10.100.101"
596
+ export MASTER_PORT="12345"
597
+ mpirun --allow-run-as-root --mca plm_rsh_args "-p 3022" --bind-to core -n 32 --map-by ppr:4:socket:PE=6 \
598
+ --rank-by core --report-bindings --prefix --mca btl_tcp_if_include 10.10.100.101/16 \
599
+ $MPI_ROOT -H 10.10.100.101:16,10.10.100.102:16,10.10.100.103:16,10.10.100.104:16 -x LD_LIBRARY_PATH \
600
+ -x HABANA_LOGS -x PYTHONPATH -x MASTER_ADDR \
601
+ -x MASTER_PORT \
602
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json \
603
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
604
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \
605
+ --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \
606
+ --train_batch_size=1024 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 --warmup_proportion=0.128 \ --num_steps_per_checkpoint=200 --learning_rate=0.004 --gradient_accumulation_steps=128 \
607
+ --resume_from_checkpoint --phase1_end_step=7038 --phase2
608
+ ```
609
+
610
+ - Lazy mode, 32 HPUs, unpacked data, BF16 mixed precision, batch size 64 for Phase 1 and batch size 8 for Phase 2:
611
+
612
+ ```bash
613
+ export MASTER_ADDR="10.10.100.101"
614
+ export MASTER_PORT="12345"
615
+ mpirun --allow-run-as-root --mca plm_rsh_args -p3022 --bind-to core -n 32 --map-by ppr:4:socket:PE=6 \
616
+ --rank-by core --report-bindings --prefix --mca btl_tcp_if_include 10.10.100.101/16 \
617
+ $MPI_ROOT -H 10.10.100.101:16,10.10.100.102:16,10.10.100.103:16,10.10.100.104:16 \
618
+ -x LD_LIBRARY_PATH -x HABANA_LOGS -x PYTHONPATH -x MASTER_ADDR -x MASTER_PORT -x https_proxy -x http_proxy \
619
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \
620
+ --autocast --config_file=./bert_config.json \
621
+ --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
622
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir= /tmp/results/checkpoints \
623
+ --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \
624
+ --train_batch_size=2048 --max_seq_length=128 --max_predictions_per_seq=20
625
+ --max_steps=7038 --warmup_proportion=0.2843 \
626
+ --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=32 \
627
+ --enable_packed_data_mode False
628
+ ```
629
+
630
+ ```bash
631
+ export MASTER_ADDR="10.10.100.101"
632
+ export MASTER_PORT="12345"
633
+ mpirun --allow-run-as-root --mca plm_rsh_args -p3022 --bind-to core -n 32 --map-by ppr:4:socket:PE=6 \
634
+ --rank-by core --report-bindings --prefix --mca btl_tcp_if_include 10.10.100.101/16 \
635
+ $MPI_ROOT -H 10.10.100.101:16,10.10.100.102:16,10.10.100.103:16,10.10.100.104:16 -x LD_LIBRARY_PATH \
636
+ -x HABANA_LOGS -x PYTHONPATH -x MASTER_ADDR -x MASTER_PORT \
637
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast \
638
+ --config_file=./bert_config.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
639
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir= /tmp/results/checkpoints \
640
+ --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512/books_wiki_en_corpus \
641
+ --train_batch_size=1024 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \
642
+ --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \
643
+ --gradient_accumulation_steps=128 --resume_from_checkpoint --phase1_end_step=7038 --phase2 \
644
+ --enable_packed_data_mode False
645
+ ```
646
+
647
+ ### BERT Pre-Training with ZeroRedundancyOptimizer
648
+
649
+ BERT training script supports pre-training of BERT 1.2B parameters using ZeroRedundancyOptimizer with BF16 mixed precision data type in **Lazy mode**.
650
+
651
+ - Lazy mode, 8 HPUs, BF16 mixed precision, per chip batch size 8 for Phase 1 and batch size 2 for Phase 2:
652
+
653
+ ```bash
654
+ export MASTER_ADDR="localhost"
655
+ export MASTER_PORT="12345"
656
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
657
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --use_lazy_mode=True \
658
+ --config_file=./bert_config_1.2B.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
659
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \
660
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \
661
+ --train_batch_size=1024 --max_seq_length=128 --max_predictions_per_seq=20 --warmup_proportion=0.2843 \
662
+ --max_steps=7038 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 \
663
+ --use_zero_optimizer True
664
+
665
+ ```
666
+
667
+ ```bash
668
+ export MASTER_ADDR="localhost"
669
+ export MASTER_PORT="12345"
670
+ mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \
671
+ $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --use_lazy_mode=True \
672
+ --config_file=./bert_config_1.2B.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \
673
+ --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \
674
+ --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \
675
+ --train_batch_size=1024 --max_seq_length=512 --max_predictions_per_seq=80 --warmup_proportion=0.128 \
676
+ --max_steps=1563 --num_steps_per_checkpoint=200 --learning_rate=0.004 --gradient_accumulation_steps=512 \
677
+ --resume_from_checkpoint --phase1_end_step=7038 --phase2 --use_zero_optimizer True
678
+
679
+ ```
680
+ ## Inference and Examples
681
+ **Run inference on 1 HPU:**
682
+ - Lazy mode, 1 HPU, BF16 mixed precision, batch size 24:
683
+
684
+ ```bash
685
+ $PYTHON run_squad.py --bert_model=bert-large-uncased --autocast \
686
+ --config_file=./bert_config.json \
687
+ --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \
688
+ --json-summary=/tmp/log_directory/dllogger.json \
689
+ --predict_batch_size=24 \
690
+ --init_checkpoint=<path-to-checkpoint> \
691
+ --vocab_file=<path-to-vocab> \
692
+ --do_predict \
693
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
694
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py
695
+ ```
696
+
697
+ - HPU graphs, 1 HPU, BF16 mixed precision, batch size 24:
698
+
699
+ ```bash
700
+ $PYTHON run_squad.py --bert_model=bert-large-uncased --autocast --use_hpu_graphs \
701
+ --config_file=./bert_config.json \
702
+ --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \
703
+ --json-summary=/tmp/log_directory/dllogger.json \
704
+ --predict_batch_size=24 \
705
+ --init_checkpoint=<path-to-checkpoint> \
706
+ --vocab_file=<path-to-vocab> \
707
+ --do_predict \
708
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
709
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py
710
+ ```
711
+
712
+ - Lazy mode, 1 HPU, FP16 mixed precision, batch size 24:
713
+
714
+ ```bash
715
+ $PYTHON run_squad.py --bert_model=bert-large-uncased --autocast \
716
+ --config_file=./bert_config.json \
717
+ --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \
718
+ --json-summary=/tmp/log_directory/dllogger.json \
719
+ --predict_batch_size=24 \
720
+ --init_checkpoint=<path-to-checkpoint> \
721
+ --vocab_file=<path-to-vocab> \
722
+ --do_predict --fp16 \
723
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
724
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py
725
+ ```
726
+
727
+ - HPU graphs, 1 HPU, FP16 mixed precision, batch size 24:
728
+
729
+ ```bash
730
+ $PYTHON run_squad.py --bert_model=bert-large-uncased --autocast --use_hpu_graphs \
731
+ --config_file=./bert_config.json \
732
+ --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \
733
+ --json-summary=/tmp/log_directory/dllogger.json \
734
+ --predict_batch_size=24 \
735
+ --init_checkpoint=<path-to-checkpoint> \
736
+ --vocab_file=<path-to-vocab> \
737
+ --do_predict --fp16 \
738
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
739
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py
740
+ ```
741
+
742
+ **Run inference on 1 HPU with torch.compile:**
743
+ - 1 HPU, BF16 mixed precision, batch size 24:
744
+
745
+ ```bash
746
+ $PYTHON run_squad.py --bert_model=bert-large-uncased --autocast \
747
+ --config_file=./bert_config.json \
748
+ --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \
749
+ --json-summary=/tmp/log_directory/dllogger.json \
750
+ --predict_batch_size=24 \
751
+ --init_checkpoint=<path-to-checkpoint> \
752
+ --vocab_file=<path-to-vocab> \
753
+ --do_predict --use_torch_compile \
754
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
755
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py
756
+ ```
757
+
758
+ - 1 HPU, FP16 mixed precision, batch size 24:
759
+
760
+ ```bash
761
+ $PYTHON run_squad.py --bert_model=bert-large-uncased --autocast \
762
+ --config_file=./bert_config.json \
763
+ --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \
764
+ --json-summary=/tmp/log_directory/dllogger.json \
765
+ --predict_batch_size=24 \
766
+ --init_checkpoint=<path-to-checkpoint> \
767
+ --vocab_file=<path-to-vocab> \
768
+ --do_predict --use_torch_compile --fp16 \
769
+ --predict_file=data/squad/v1.1/dev-v1.1.json \
770
+ --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py
771
+ ```
772
+
773
+ When not using torch.compile this model recommends using the ["HPU graph"](https://docs.habana.ai/en/latest/PyTorch/Inference_on_Gaudi/Inference_using_HPU_Graphs/Inference_using_HPU_Graphs.html) model type to minimize the host time spent in the `forward()` call.
774
+
775
+ ## Pre-trained Model and Checkpoint
776
+ PyTorch BERT is trained on Intel Gaudi AI Accelerators and the saved model & checkpoints are provided. You can use it for fine-tuning or transfer learning tasks with your own datasets. To download the saved model file, please refer to [Habana Catalog](https://developer.habana.ai/catalog/bert-pretraining-for-pytorch/) to obtain the URL.
777
+
778
+
779
+ ## Supported Configurations
780
+
781
+ | Validated on | SynapseAI Version | PyTorch Version | Mode |
782
+ |--------|-------------------|-----------------|----------------|
783
+ | Gaudi | 1.14.0 | 2.1.1 | Training |
784
+ | Gaudi | 1.14.0 | 2.1.1 | Inference |
785
+ | Gaudi2 | 1.14.0 | 2.1.1 | Training |
786
+ | Gaudi2 | 1.14.0 | 2.1.1 | Inference |
787
+
788
+ ## Changelog
789
+ ### 1.14.0
790
+ 1. Added support for dynamic shapes in BERT Pretraining
791
+
792
+ ### 1.13.0
793
+ 1. Added tensorboard logging.
794
+ 2. Added support for torch.compile inference.
795
+ 3. Added support for FP16 through autocast.
796
+ 4. Aligned profiler invocation between training and inference loops.
797
+ 5. Added support for dynamic shapes in BERT Finetuning
798
+ 6. Added torch.compile support - performance improvement feature for PyTorch eager mode for
799
+ BERT Pretraining. Supported only for phase1.
800
+ 7. Added torch.compile support - performance improvement feature for PyTorch eager mode for
801
+ BERT Finetuning.
802
+
803
+ ### 1.12.0
804
+ 1. Removed HMP; switched to Autocast.
805
+ 2. Eager mode support is deprecated.
806
+
807
+ ### 1.11.0
808
+ 1. Dynamic Shapes will be enabled by default in future releases. It is currently enabled in BERT Pretraining Model
809
+ training script as a temporary solution.
810
+
811
+ ### 1.10.0
812
+ 1. Support added for cached dataset for finetuning.
813
+
814
+ ### 1.9.0
815
+ 1. Enabled usage of PyTorch autocast
816
+ 2. Enabled BERT finetuning(run_squad.py) with SQUAD dataset (training and inference).
817
+
818
+ ### 1.6.0
819
+ 1. ZeroReduancyOptimer is support is added and tested BERT 1.2B parameter config.
820
+
821
+ ### 1.5.0
822
+ 1. Packed dataset mode is set as default execution mode
823
+ 2. Deprecated the flags `enable_packed_data_mode` and `avg_seq_per_pack` and added support for automatic detection of those parameters based on dataset metadata file.
824
+ 3. Changes related to Saving and Loading checkpoint were removed.
825
+ 4. Removed changes related to padding index and flatten.
826
+ 5. Fixed throughput calculation for packed dataset.
827
+ 6. Demo scripts were removed and references to custom demo script were replaced by community entry points in README
828
+ 7. Reduced the number of distributed barrier calls to once per gradient accumulation steps
829
+ 8. Simplified the distributed Initialization.
830
+ 9. Added support for training on **Gaudi2** supporting up to 8 cards
831
+
832
+ ### 1.4.0
833
+ 1. Lazy mode is set as default execution mode,for eager mode set `use-lazy-mode` as False
834
+ 2. Pretraining with packed dataset is supported
835
+
836
+
837
+ ### 1.3.0
838
+ 1. Single worker thread changes are removed.
839
+ 2. Loss computation brought it back to training script.
840
+ 3. Removed setting the embedding padding index as 0 explicitly.
841
+ 4. Removed the select op implementation using index select and squeeze and retained the default code.
842
+ 5. Permute and view is replaced as flatten.
843
+ 6. Change `python` or `python3` to `$PYTHON` to execute correct version based on environment setup.
844
+
845
+ ### 1.2.0
846
+ 1. Enabled HCCL flow for distributed training.
847
+ 2. Removed changes related to data type conversions for input_ids, segment ids, position_ids and input_mask.
848
+ 3. Removed changes related to position ids from training script.
849
+ 4. Removed changes related to no pinned memory and skip last batch.
850
+
851
+
852
+ ### Training Script Modifications
853
+ The following changes have been added to training (run_pretraining.py and run_squad.py) and modeling (modeling.py) scripts.
854
+
855
+ 1. Added support for Habana devices:
856
+
857
+ a. Load Habana specific library.
858
+
859
+ b. Support required for cpu to work.
860
+
861
+ c. Required environment variables are defined for habana device.
862
+
863
+ d. Added Habana BF16 Mixed precision support.
864
+
865
+ e. Added python version of LAMB optimizer and will be used as default(from lamb.py).
866
+
867
+ f. Support for distributed training on Habana device.
868
+
869
+ g. Added changes to support Lazy mode with required mark_step().
870
+
871
+ h. Added changes to calculate the performance per step and report through dllogger.
872
+
873
+ i. Using conventional torch layernorm, linear and activation functions.
874
+
875
+ j. Changes for dynamic loading of HCCL library.
876
+
877
+ k. Added support for FusedAdamW and FusedClipNorm in run_squad.py.
878
+
879
+ l. optimizer_grouped_parameters config has changed for weight_decay from 0.01 to 0.0.
880
+
881
+
882
+ 2. To improve performance:
883
+
884
+ a. Added support for Fused LAMB optimizer in run_pretraining.py.
885
+
886
+ b. Bucket size set to 230MB for better performance in distributed training.
887
+
888
+ c. Added support to use distributed all_reduce instead of default Distributed Data Parallel in pre-training.
889
+
890
+ d. Added support for lowering print frequency of loss and associated this with log_freq.
891
+
892
+ e. Added support for Fused ADAMW optimizer and FusedClipNorm in run_squad.py.
893
+
894
+
895
+ ## Known Issues
896
+ 1. Placing mark_step() arbitrarily may lead to undefined behaviour. Recommend to keep mark_step() as shown in provided scripts.
897
+ 2. BERT 1.2B parameter model is restricted to showcase the PyTorch ZeroReduancyOptimer feature and not for Model convergence
898
+ 3. Only scripts and configurations mentioned in this README are supported and verified.
docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_probs_dropout_prob": 0.1,
3
+ "hidden_act": "gelu",
4
+ "hidden_dropout_prob": 0.1,
5
+ "hidden_size": 1024,
6
+ "initializer_range": 0.02,
7
+ "intermediate_size": 4096,
8
+ "max_position_embeddings": 512,
9
+ "num_attention_heads": 16,
10
+ "num_hidden_layers": 24,
11
+ "type_vocab_size": 2,
12
+ "vocab_size": 30522
13
+ }
docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config_1.2B.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_probs_dropout_prob": 0.1,
3
+ "hidden_act": "gelu",
4
+ "hidden_dropout_prob": 0.1,
5
+ "hidden_size": 1536,
6
+ "initializer_range": 0.02,
7
+ "intermediate_size": 6144,
8
+ "max_position_embeddings": 512,
9
+ "num_attention_heads": 16,
10
+ "num_hidden_layers": 40,
11
+ "type_vocab_size": 2,
12
+ "vocab_size": 30522
13
+ }