applied-ai-018 commited on
Commit
088918c
·
verified ·
1 Parent(s): 3028fe4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/LICENSE +203 -0
  2. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/README.md +141 -0
  3. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/data/scripts/gen_synth_data.py +64 -0
  4. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/__init__.py +5 -0
  5. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/cli_lora_add.py +187 -0
  6. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/cli_lora_pti.py +1349 -0
  7. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/cli_pt_to_safetensors.py +85 -0
  8. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/cli_svd.py +146 -0
  9. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/dataset.py +311 -0
  10. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/lora.py +1119 -0
  11. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/lora_manager.py +144 -0
  12. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/preprocess_files.py +327 -0
  13. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/safe_open.py +68 -0
  14. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/to_ckpt_v2.py +232 -0
  15. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/utils.py +218 -0
  16. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/xformers_utils.py +70 -0
  17. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/requirements.txt +15 -0
  18. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/setup.py +28 -0
  19. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/stability_ft.sh +49 -0
  20. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/stability_ft_cpu.sh +45 -0
  21. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/stability_ft_tc.sh +51 -0
  22. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/configs/stable-diffusion/v2-inference.yaml +68 -0
  23. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/configs/stable-diffusion/v2-inpainting-inference.yaml +158 -0
  24. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/configs/stable-diffusion/v2-midas-inference.yaml +74 -0
  25. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/configs/stable-diffusion/x4-upscaling.yaml +76 -0
  26. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/data/__init__.py +0 -0
  27. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/data/util.py +24 -0
  28. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/autoencoder.py +219 -0
  29. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/__init__.py +0 -0
  30. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddim.py +158 -0
  31. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddpm.py +1795 -0
  32. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/__init__.py +1 -0
  33. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/dpm_solver.py +1154 -0
  34. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/sampler.py +87 -0
  35. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpmpp_2m.py +78 -0
  36. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/plms.py +244 -0
  37. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampler.py +19 -0
  38. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampling_util.py +22 -0
  39. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/attention.py +352 -0
  40. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/__init__.py +0 -0
  41. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/openaimodel.py +786 -0
  42. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/util.py +270 -0
  43. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/ema.py +80 -0
  44. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/encoders/__init__.py +0 -0
  45. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/encoders/modules.py +213 -0
  46. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/__init__.py +0 -0
  47. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/api.py +170 -0
  48. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/__init__.py +0 -0
  49. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/base_model.py +16 -0
  50. docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/blocks.py +342 -0
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright [yyyy] [name of copyright owner]
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/README.md ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Stable Diffusion 2.1 FineTuning with Low-Rank Adaptation of Large Language Models for PyTorch
2
+ This directory provides scripts to fine-tune Stable Diffusion Model (2.1) which is based on latent text-to-image diffusion model and is tested and maintained by Habana.
3
+ For more information on training and inference of deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/).
4
+
5
+ - [Model-References](../../../README.md)
6
+ - [Model Overview](#model-overview)
7
+ - [Setup](#setup)
8
+ - [Training and Examples](#training)
9
+ - [Supported Configuration](#supported-configuration)
10
+ - [Changelog](#changelog)
11
+
12
+ ## Model Overview
13
+
14
+ This implementation is designed to fine-tune Stable Diffusion model (stabilityai/stable-diffusion-2-1-base) by Low-rank Adaptation which is considered to be very efficient. Here the pipeline is to fine-tune CLIP + Unet + token to gain better results.
15
+
16
+ More details about LoRA and its usage with diffusion can be found at [Blog](https://huggingface.co/blog/lora) and [examples](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image#training-with-lora)
17
+
18
+ ### How to use
19
+ Users acknowledge and understand that the models referenced by Habana are mere examples for models that can be run on Gaudi.
20
+ Users bear sole liability and responsibility to follow and comply with any third party licenses pertaining to such models,
21
+ and Habana Labs disclaims and will bear no any warranty or liability with respect to users' use or compliance with such third party licenses.
22
+
23
+ ## Setup
24
+ Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/PyTorch/Model_Optimization_PyTorch/Optimization_in_Training_Platform.html).
25
+ The guides will walk you through the process of setting up your system to run the model on Gaudi.
26
+
27
+ ### Clone Habana Model-References
28
+ In the docker container, clone this repository and switch to the branch that matches your SynapseAI version.
29
+ You can run the [`hl-smi`](https://docs.habana.ai/en/latest/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) utility to determine the SynapseAI version.
30
+ ```bash
31
+ git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References
32
+ cd Model-References/PyTorch/generative_models/stable-diffusion-finetuning
33
+ ```
34
+ ### Install Model Requirements
35
+ 1. In the docker container, go to the model directory:
36
+ ```bash
37
+ cd Model-References/PyTorch/generative_models/stable-diffusion-finetuning
38
+ ```
39
+
40
+ 2. Install the required packages using pip:
41
+ ```bash
42
+ pip install -r requirements.txt
43
+ pip install .
44
+ ```
45
+
46
+ ## Training
47
+ ### Model Checkpoint
48
+
49
+ The fine-tuning script internally will download checkpoints from https://huggingface.co/stabilityai/stable-diffusion-2-1-base .
50
+
51
+ Users acknowledge and understand that by downloading the checkpoint referenced herein they will be required to comply
52
+ with third party licenses and rights pertaining to the checkpoint, and users will be solely liable and responsible
53
+ for complying with any applicable licenses. Habana Labs disclaims any warranty or liability with respect to users' use
54
+ or compliance with such third party licenses.
55
+
56
+ ### Dataset Preparation
57
+ For the finetuning we have used synthetic dataset.
58
+
59
+ 1. In the docker container, go to the model directory:
60
+ ```bash
61
+ cd Model-References/PyTorch/generative_models/stable-diffusion-finetuning
62
+ ```
63
+
64
+ 2. Generate synthetic dataset:
65
+ ```bash
66
+ python data/scripts/gen_synth_data.py
67
+ ```
68
+
69
+ ### 1. Fine-tuning Stable diffusion with LoRA CLI
70
+
71
+ #### Single Card Training Examples
72
+ **Run training on 1 HPU:**
73
+
74
+ ```bash
75
+ export MODEL_NAME="stabilityai/stable-diffusion-2-1-base"
76
+ export INSTANCE_DIR=<path-to-instances>
77
+ export OUTPUT_DIR=<path-to-output>
78
+
79
+ lora_pti \
80
+ --pretrained_model_name_or_path=$MODEL_NAME \
81
+ --instance_data_dir=$INSTANCE_DIR \
82
+ --output_dir=$OUTPUT_DIR \
83
+ --use_face_segmentation_condition \
84
+ --resolution=512 \
85
+ --train_batch_size=7 \
86
+ --gradient_accumulation_steps=1 \
87
+ --learning_rate_unet=5e-5 \
88
+ --learning_rate_ti=2e-3 \
89
+ --color_jitter \
90
+ --lr_scheduler="linear" --lr_scheduler_lora="linear"\
91
+ --lr_warmup_steps=0 \
92
+ --placeholder_tokens="<s1>|<s2>" \
93
+ --use_template="object"\
94
+ --save_steps=50 \
95
+ --max_train_steps_ti=500 \
96
+ --max_train_steps_tuning=1000 \
97
+ --perform_inversion=True \
98
+ --clip_ti_decay \
99
+ --weight_decay_ti=0.000 \
100
+ --weight_decay_lora=0.001\
101
+ --continue_inversion \
102
+ --continue_inversion_lr=1e-3 \
103
+ --device="hpu" \
104
+ --lora_rank=16 \
105
+ --use_lazy_mode=True \
106
+ --use_fused_adamw=True \
107
+ --print_freq=50 \
108
+ --use_fused_clip_norm=True \
109
+ ```
110
+
111
+ [Refer to reference model to see what these parameters mean](https://github.com/cloneofsimo/lora/discussions/121).
112
+
113
+
114
+ ## Supported Configuration
115
+ | Validated on | SynapseAI Version | PyTorch Version | Mode |
116
+ |---------|-------------------|-----------------|------------|
117
+ | Gaudi | 1.11.0 | 2.0.1 | Training |
118
+ | Gaudi2 | 1.14.0 | 2.1.1 | Training |
119
+
120
+ ## Changelog
121
+
122
+ ### Script Modifications
123
+ ### 1.13.0
124
+ * Modified training script to support diffusers version 0.21.4.
125
+
126
+ ### 1.12.0
127
+ * Dynamic Shapes will be enabled by default in future releases. It is currently disabled in training script.
128
+
129
+ ### 1.11.0
130
+ * Dynamic Shapes will be enabled by default in future releases. It is currently enabled in training script as a temporary solution.
131
+
132
+ ### 1.10.0
133
+ * Modified README
134
+ * Enabled PyTorch autocast on Gaudi
135
+ * Added additional logging
136
+ * Added support for HPU
137
+ * Added FusedAdamW and FusedClipNorm
138
+ * Added Tensorboard logging
139
+ * Added device trace and memory stats reporting
140
+ * Added print frequency change
141
+ * Enabled HPU graph execution for host optimization
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/data/scripts/gen_synth_data.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ from PIL import Image, ImageDraw
5
+ import numpy as np
6
+ import os
7
+ import random
8
+
9
+ def create_image_with_yellow_square_face(idx=0,path='./',dirname='yellow_rect',min_size=400,max_size=2000,min_prop=0.25,max_prop=0.9):
10
+ # Generate random image size (let's say between 100 and 1000)
11
+ X,Y = np.random.randint((min_size,min_size),(max_size,max_size))
12
+
13
+ # Create a white background image
14
+ img = Image.new('RGB', (X, Y), (255, 255, 255))
15
+ # Generate random mask size (let's say between 10 and image size)
16
+ mask_x = random.randint(int(X*min_prop), int(X*max_prop))
17
+ mask_y = random.randint(int(Y*min_prop), int(Y*max_prop))
18
+ # Generate random top-left point for the mask (x and y coordinates)
19
+ mask_start_x = random.randint(0, X - mask_x)
20
+ mask_start_y = random.randint(0, Y - mask_y)
21
+
22
+ # Create an ImageDraw object
23
+ draw = ImageDraw.Draw(img)
24
+ # Draw the mask first
25
+ mask = Image.new('L', (X, Y), 1)
26
+ draw_mask=ImageDraw.Draw(mask)
27
+ draw_mask.rectangle([mask_start_x, mask_start_y, mask_start_x+mask_x, mask_start_y+mask_y], fill=255)
28
+ data_path=os.path.join(path,dirname)
29
+
30
+ if not os.path.exists(data_path):
31
+ os.makedirs(data_path)
32
+ mask.save(os.path.join(data_path,f'{idx}.mask.png'))
33
+
34
+ # Draw a yellow square (mask) on the image
35
+ draw.rectangle([mask_start_x, mask_start_y, mask_start_x+mask_x, mask_start_y+mask_y], fill=(255,255,0))
36
+
37
+ # Draw two eyes on the square
38
+ eye_size = min(mask_x // 10,mask_y//10)
39
+ eye_y = mask_start_y + mask_y // 4
40
+ for i in range(2):
41
+ eye_x = mask_start_x + (i+1) * mask_x // 3 - eye_size // 2
42
+ draw.ellipse([eye_x, eye_y, eye_x+eye_size, eye_y+eye_size], fill=(0,0,0))
43
+
44
+ # Draw a smile on the square
45
+ smile_size = mask_x // 2
46
+ smile_y = mask_start_y + 3 * mask_y // 5
47
+ smile_height = mask_y // 5
48
+ smile_start_x = mask_start_x + mask_x // 4
49
+ draw.arc([smile_start_x, smile_y, smile_start_x+smile_size, smile_y+smile_height], start=0, end=180, fill=(0,0,0))
50
+
51
+ # Save the image to a file
52
+ img.save(os.path.join(data_path,f'image_{idx}.png'))
53
+
54
+ # Return the mask coordinates
55
+ return mask_start_x, mask_start_y
56
+
57
+ if __name__ == '__main__':
58
+ base_path = "./"
59
+ rel_dir_path = "yellow_rect"
60
+
61
+ # Generate dataset of 10 images with corresponding masks
62
+ for i in range(10):
63
+ create_image_with_yellow_square_face(i, path=base_path, dirname=rel_dir_path)
64
+ print("The synthetic dataset is generated at ", os.path.abspath(os.path.join(base_path, rel_dir_path)))
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .lora import *
2
+ from .dataset import *
3
+ from .utils import *
4
+ from .preprocess_files import *
5
+ from .lora_manager import *
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/cli_lora_add.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal, Union, Dict
2
+ import os
3
+ import shutil
4
+ import fire
5
+ from diffusers import StableDiffusionPipeline
6
+ from safetensors.torch import safe_open, save_file
7
+
8
+ import torch
9
+ from .lora import (
10
+ tune_lora_scale,
11
+ patch_pipe,
12
+ collapse_lora,
13
+ monkeypatch_remove_lora,
14
+ )
15
+ from .lora_manager import lora_join
16
+ from .to_ckpt_v2 import convert_to_ckpt
17
+
18
+
19
+ def _text_lora_path(path: str) -> str:
20
+ assert path.endswith(".pt"), "Only .pt files are supported"
21
+ return ".".join(path.split(".")[:-1] + ["text_encoder", "pt"])
22
+
23
+
24
+ def add(
25
+ path_1: str,
26
+ path_2: str,
27
+ output_path: str,
28
+ alpha_1: float = 0.5,
29
+ alpha_2: float = 0.5,
30
+ mode: Literal[
31
+ "lpl",
32
+ "upl",
33
+ "upl-ckpt-v2",
34
+ ] = "lpl",
35
+ with_text_lora: bool = False,
36
+ ):
37
+ print("Lora Add, mode " + mode)
38
+ if mode == "lpl":
39
+ if path_1.endswith(".pt") and path_2.endswith(".pt"):
40
+ for _path_1, _path_2, opt in [(path_1, path_2, "unet")] + (
41
+ [(_text_lora_path(path_1), _text_lora_path(path_2), "text_encoder")]
42
+ if with_text_lora
43
+ else []
44
+ ):
45
+ print("Loading", _path_1, _path_2)
46
+ out_list = []
47
+ if opt == "text_encoder":
48
+ if not os.path.exists(_path_1):
49
+ print(f"No text encoder found in {_path_1}, skipping...")
50
+ continue
51
+ if not os.path.exists(_path_2):
52
+ print(f"No text encoder found in {_path_1}, skipping...")
53
+ continue
54
+
55
+ l1 = torch.load(_path_1)
56
+ l2 = torch.load(_path_2)
57
+
58
+ l1pairs = zip(l1[::2], l1[1::2])
59
+ l2pairs = zip(l2[::2], l2[1::2])
60
+
61
+ for (x1, y1), (x2, y2) in zip(l1pairs, l2pairs):
62
+ # print("Merging", x1.shape, y1.shape, x2.shape, y2.shape)
63
+ x1.data = alpha_1 * x1.data + alpha_2 * x2.data
64
+ y1.data = alpha_1 * y1.data + alpha_2 * y2.data
65
+
66
+ out_list.append(x1)
67
+ out_list.append(y1)
68
+
69
+ if opt == "unet":
70
+
71
+ print("Saving merged UNET to", output_path)
72
+ torch.save(out_list, output_path)
73
+
74
+ elif opt == "text_encoder":
75
+ print("Saving merged text encoder to", _text_lora_path(output_path))
76
+ torch.save(
77
+ out_list,
78
+ _text_lora_path(output_path),
79
+ )
80
+
81
+ elif path_1.endswith(".safetensors") and path_2.endswith(".safetensors"):
82
+ safeloras_1 = safe_open(path_1, framework="pt", device="cpu")
83
+ safeloras_2 = safe_open(path_2, framework="pt", device="cpu")
84
+
85
+ metadata = dict(safeloras_1.metadata())
86
+ metadata.update(dict(safeloras_2.metadata()))
87
+
88
+ ret_tensor = {}
89
+
90
+ for keys in set(list(safeloras_1.keys()) + list(safeloras_2.keys())):
91
+ if keys.startswith("text_encoder") or keys.startswith("unet"):
92
+
93
+ tens1 = safeloras_1.get_tensor(keys)
94
+ tens2 = safeloras_2.get_tensor(keys)
95
+
96
+ tens = alpha_1 * tens1 + alpha_2 * tens2
97
+ ret_tensor[keys] = tens
98
+ else:
99
+ if keys in safeloras_1.keys():
100
+
101
+ tens1 = safeloras_1.get_tensor(keys)
102
+ else:
103
+ tens1 = safeloras_2.get_tensor(keys)
104
+
105
+ ret_tensor[keys] = tens1
106
+
107
+ save_file(ret_tensor, output_path, metadata)
108
+
109
+ elif mode == "upl":
110
+
111
+ print(
112
+ f"Merging UNET/CLIP from {path_1} with LoRA from {path_2} to {output_path}. Merging ratio : {alpha_1}."
113
+ )
114
+
115
+ loaded_pipeline = StableDiffusionPipeline.from_pretrained(
116
+ path_1,
117
+ ).to("cpu")
118
+
119
+ patch_pipe(loaded_pipeline, path_2)
120
+
121
+ collapse_lora(loaded_pipeline.unet, alpha_1)
122
+ collapse_lora(loaded_pipeline.text_encoder, alpha_1)
123
+
124
+ monkeypatch_remove_lora(loaded_pipeline.unet)
125
+ monkeypatch_remove_lora(loaded_pipeline.text_encoder)
126
+
127
+ loaded_pipeline.save_pretrained(output_path)
128
+
129
+ elif mode == "upl-ckpt-v2":
130
+
131
+ assert output_path.endswith(".ckpt"), "Only .ckpt files are supported"
132
+ name = os.path.basename(output_path)[0:-5]
133
+
134
+ print(
135
+ f"You will be using {name} as the token in A1111 webui. Make sure {name} is unique enough token."
136
+ )
137
+
138
+ loaded_pipeline = StableDiffusionPipeline.from_pretrained(
139
+ path_1,
140
+ ).to("cpu")
141
+
142
+ tok_dict = patch_pipe(loaded_pipeline, path_2, patch_ti=False)
143
+
144
+ collapse_lora(loaded_pipeline.unet, alpha_1)
145
+ collapse_lora(loaded_pipeline.text_encoder, alpha_1)
146
+
147
+ monkeypatch_remove_lora(loaded_pipeline.unet)
148
+ monkeypatch_remove_lora(loaded_pipeline.text_encoder)
149
+
150
+ _tmp_output = output_path + ".tmp"
151
+
152
+ loaded_pipeline.save_pretrained(_tmp_output)
153
+ convert_to_ckpt(_tmp_output, output_path, as_half=True)
154
+ # remove the tmp_output folder
155
+ shutil.rmtree(_tmp_output)
156
+
157
+ keys = sorted(tok_dict.keys())
158
+ tok_catted = torch.stack([tok_dict[k] for k in keys])
159
+ ret = {
160
+ "string_to_token": {"*": torch.tensor(265)},
161
+ "string_to_param": {"*": tok_catted},
162
+ "name": name,
163
+ }
164
+
165
+ torch.save(ret, output_path[:-5] + ".pt")
166
+ print(
167
+ f"Textual embedding saved as {output_path[:-5]}.pt, put it in the embedding folder and use it as {name} in A1111 repo, "
168
+ )
169
+ elif mode == "ljl":
170
+ print("Using Join mode : alpha will not have an effect here.")
171
+ assert path_1.endswith(".safetensors") and path_2.endswith(
172
+ ".safetensors"
173
+ ), "Only .safetensors files are supported"
174
+
175
+ safeloras_1 = safe_open(path_1, framework="pt", device="cpu")
176
+ safeloras_2 = safe_open(path_2, framework="pt", device="cpu")
177
+
178
+ total_tensor, total_metadata, _, _ = lora_join([safeloras_1, safeloras_2])
179
+ save_file(total_tensor, output_path, total_metadata)
180
+
181
+ else:
182
+ print("Unknown mode", mode)
183
+ raise ValueError(f"Unknown mode {mode}")
184
+
185
+
186
+ def main():
187
+ fire.Fire(add)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/cli_lora_pti.py ADDED
@@ -0,0 +1,1349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ # Bootstrapped from:
5
+ # https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py
6
+
7
+ import argparse
8
+ import hashlib
9
+ import inspect
10
+ import itertools
11
+ import math
12
+ import os
13
+ import random
14
+ import re
15
+ from pathlib import Path
16
+ from typing import Optional, List, Literal
17
+ import numpy as np
18
+
19
+ import torch
20
+ import torch.nn.functional as F
21
+ import torch.optim as optim
22
+ import torch.utils.checkpoint
23
+ from diffusers import (
24
+ AutoencoderKL,
25
+ DDPMScheduler,
26
+ StableDiffusionPipeline,
27
+ UNet2DConditionModel,
28
+ )
29
+ from diffusers.optimization import get_scheduler
30
+ from huggingface_hub import HfFolder, Repository, whoami
31
+ from PIL import Image
32
+ from torch.utils.data import Dataset
33
+ from torchvision import transforms
34
+ from torchvision.transforms.functional import pil_to_tensor
35
+ from tqdm.auto import tqdm
36
+ from transformers import CLIPTextModel, CLIPTokenizer
37
+ import wandb
38
+ import fire
39
+
40
+ from lora_diffusion import (
41
+ PivotalTuningDatasetCapation,
42
+ extract_lora_ups_down,
43
+ inject_trainable_lora,
44
+ inject_trainable_lora_extended,
45
+ inspect_lora,
46
+ save_lora_weight,
47
+ save_all,
48
+ prepare_clip_model_sets,
49
+ evaluate_pipe,
50
+ UNET_EXTENDED_TARGET_REPLACE,
51
+ )
52
+
53
+ import sys
54
+ try:
55
+ sys.path.append(os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../common/")))
56
+ from tools.synapse_profiler_api import SynapseProfilerApi, TraceType
57
+ except ImportError:
58
+ pass
59
+
60
+ def get_vae_encode_output(vae, data, dtype, device):
61
+ print("use_torch_compile : vae :Model is compiled")
62
+ def vae_encode(vae, data, dtype, device):
63
+ return vae.encode(data.to(dtype).to(device)).latent_dist.sample()
64
+ vae_encode_compiled = torch.compile(vae_encode, backend="aot_hpu_training_backend")
65
+ return vae_encode_compiled(vae, data, dtype, device)
66
+
67
+ def get_models(
68
+ pretrained_model_name_or_path,
69
+ pretrained_vae_name_or_path,
70
+ revision,
71
+ placeholder_tokens: List[str],
72
+ initializer_tokens: List[str],
73
+ device="cuda:0",
74
+ ):
75
+
76
+ tokenizer = CLIPTokenizer.from_pretrained(
77
+ pretrained_model_name_or_path,
78
+ subfolder="tokenizer",
79
+ revision=revision,
80
+ )
81
+
82
+ text_encoder = CLIPTextModel.from_pretrained(
83
+ pretrained_model_name_or_path,
84
+ subfolder="text_encoder",
85
+ revision=revision,
86
+ )
87
+
88
+ placeholder_token_ids = []
89
+
90
+ for token, init_tok in zip(placeholder_tokens, initializer_tokens):
91
+ num_added_tokens = tokenizer.add_tokens(token)
92
+ if num_added_tokens == 0:
93
+ raise ValueError(
94
+ f"The tokenizer already contains the token {token}. Please pass a different"
95
+ " `placeholder_token` that is not already in the tokenizer."
96
+ )
97
+
98
+ placeholder_token_id = tokenizer.convert_tokens_to_ids(token)
99
+
100
+ placeholder_token_ids.append(placeholder_token_id)
101
+
102
+ # Load models and create wrapper for stable diffusion
103
+
104
+ text_encoder.resize_token_embeddings(len(tokenizer))
105
+ token_embeds = text_encoder.get_input_embeddings().weight.data
106
+ if init_tok.startswith("<rand"):
107
+ # <rand-"sigma">, e.g. <rand-0.5>
108
+ sigma_val = float(re.findall(r"<rand-(.*)>", init_tok)[0])
109
+
110
+ token_embeds[placeholder_token_id] = (
111
+ torch.randn_like(token_embeds[0]) * sigma_val
112
+ )
113
+ print(
114
+ f"Initialized {token} with random noise (sigma={sigma_val}), empirically {token_embeds[placeholder_token_id].mean().item():.3f} +- {token_embeds[placeholder_token_id].std().item():.3f}"
115
+ )
116
+ print(f"Norm : {token_embeds[placeholder_token_id].norm():.4f}")
117
+
118
+ elif init_tok == "<zero>":
119
+ token_embeds[placeholder_token_id] = torch.zeros_like(token_embeds[0])
120
+ else:
121
+ token_ids = tokenizer.encode(init_tok, add_special_tokens=False)
122
+ # Check if initializer_token is a single token or a sequence of tokens
123
+ if len(token_ids) > 1:
124
+ raise ValueError("The initializer token must be a single token.")
125
+
126
+ initializer_token_id = token_ids[0]
127
+ token_embeds[placeholder_token_id] = token_embeds[initializer_token_id]
128
+
129
+ vae = AutoencoderKL.from_pretrained(
130
+ pretrained_vae_name_or_path or pretrained_model_name_or_path,
131
+ subfolder=None if pretrained_vae_name_or_path else "vae",
132
+ revision=None if pretrained_vae_name_or_path else revision,
133
+ )
134
+ unet = UNet2DConditionModel.from_pretrained(
135
+ pretrained_model_name_or_path,
136
+ subfolder="unet",
137
+ revision=revision,
138
+ )
139
+
140
+ return (
141
+ text_encoder.to(device),
142
+ vae.to(device),
143
+ unet.to(device),
144
+ tokenizer,
145
+ placeholder_token_ids,
146
+ )
147
+
148
+
149
+ @torch.no_grad()
150
+ def text2img_dataloader(
151
+ train_dataset,
152
+ train_batch_size,
153
+ tokenizer,
154
+ vae,
155
+ text_encoder,
156
+ cached_latents: bool = False,
157
+ use_torch_compile:bool = False,
158
+ ):
159
+
160
+ if cached_latents:
161
+ cached_latents_dataset = []
162
+ for idx in tqdm(range(len(train_dataset))):
163
+ batch = train_dataset[idx]
164
+ # rint(batch)
165
+ latents = None
166
+ if use_torch_compile:
167
+ latents = get_vae_encode_output(vae, batch["instance_images"].unsqueeze(0), vae.dtype, vae.device)
168
+ else:
169
+ latents = vae.encode(
170
+ batch["instance_images"].unsqueeze(0).to(dtype=vae.dtype).to(vae.device)
171
+ ).latent_dist.sample()
172
+ latents = latents * 0.18215
173
+ batch["instance_images"] = latents.squeeze(0)
174
+ cached_latents_dataset.append(batch)
175
+
176
+ def collate_fn(examples):
177
+ input_ids = [example["instance_prompt_ids"] for example in examples]
178
+ pixel_values = [example["instance_images"] for example in examples]
179
+ pixel_values = torch.stack(pixel_values)
180
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
181
+
182
+ input_ids = tokenizer.pad(
183
+ {"input_ids": input_ids},
184
+ padding="max_length",
185
+ max_length=tokenizer.model_max_length,
186
+ return_tensors="pt",
187
+ ).input_ids
188
+
189
+ batch = {
190
+ "input_ids": input_ids,
191
+ "pixel_values": pixel_values,
192
+ }
193
+
194
+ if examples[0].get("mask", None) is not None:
195
+ batch["mask"] = torch.stack([example["mask"] for example in examples])
196
+
197
+ return batch
198
+
199
+ if cached_latents:
200
+
201
+ train_dataloader = torch.utils.data.DataLoader(
202
+ cached_latents_dataset,
203
+ batch_size=train_batch_size,
204
+ shuffle=True,
205
+ collate_fn=collate_fn,
206
+ )
207
+
208
+ print("PTI : Using cached latent.")
209
+
210
+ else:
211
+ train_dataloader = torch.utils.data.DataLoader(
212
+ train_dataset,
213
+ batch_size=train_batch_size,
214
+ shuffle=True,
215
+ collate_fn=collate_fn,
216
+ )
217
+
218
+ return train_dataloader
219
+
220
+
221
+ def inpainting_dataloader(
222
+ train_dataset, train_batch_size, tokenizer, vae, text_encoder
223
+ ):
224
+ def collate_fn(examples):
225
+ input_ids = [example["instance_prompt_ids"] for example in examples]
226
+ pixel_values = [example["instance_images"] for example in examples]
227
+ mask_values = [example["instance_masks"] for example in examples]
228
+ masked_image_values = [
229
+ example["instance_masked_images"] for example in examples
230
+ ]
231
+
232
+ # Concat class and instance examples for prior preservation.
233
+ # We do this to avoid doing two forward passes.
234
+ if examples[0].get("class_prompt_ids", None) is not None:
235
+ input_ids += [example["class_prompt_ids"] for example in examples]
236
+ pixel_values += [example["class_images"] for example in examples]
237
+ mask_values += [example["class_masks"] for example in examples]
238
+ masked_image_values += [
239
+ example["class_masked_images"] for example in examples
240
+ ]
241
+
242
+ pixel_values = (
243
+ torch.stack(pixel_values).to(memory_format=torch.contiguous_format).float()
244
+ )
245
+ mask_values = (
246
+ torch.stack(mask_values).to(memory_format=torch.contiguous_format).float()
247
+ )
248
+ masked_image_values = (
249
+ torch.stack(masked_image_values)
250
+ .to(memory_format=torch.contiguous_format)
251
+ .float()
252
+ )
253
+
254
+ input_ids = tokenizer.pad(
255
+ {"input_ids": input_ids},
256
+ padding="max_length",
257
+ max_length=tokenizer.model_max_length,
258
+ return_tensors="pt",
259
+ ).input_ids
260
+
261
+ batch = {
262
+ "input_ids": input_ids,
263
+ "pixel_values": pixel_values,
264
+ "mask_values": mask_values,
265
+ "masked_image_values": masked_image_values,
266
+ }
267
+
268
+ if examples[0].get("mask", None) is not None:
269
+ batch["mask"] = torch.stack([example["mask"] for example in examples])
270
+
271
+ return batch
272
+
273
+ train_dataloader = torch.utils.data.DataLoader(
274
+ train_dataset,
275
+ batch_size=train_batch_size,
276
+ shuffle=True,
277
+ collate_fn=collate_fn,
278
+ )
279
+
280
+ return train_dataloader
281
+
282
+
283
+ def loss_step(
284
+ batch,
285
+ unet,
286
+ vae,
287
+ text_encoder,
288
+ scheduler,
289
+ train_inpainting=False,
290
+ t_mutliplier=1.0,
291
+ mixed_precision=False,
292
+ mask_temperature=1.0,
293
+ cached_latents: bool = False,
294
+ use_torch_compile: bool = False,
295
+ ):
296
+ weight_dtype = torch.float32
297
+ if not cached_latents:
298
+ latents = None
299
+ if use_torch_compile:
300
+ latents = get_vae_encode_output(vae, batch["pixel_values"], weight_dtype, unet.device)
301
+ else:
302
+ latents = vae.encode(
303
+ batch["pixel_values"].to(dtype=weight_dtype).to(unet.device)
304
+ ).latent_dist.sample()
305
+ latents = latents * 0.18215
306
+
307
+ if train_inpainting:
308
+ masked_image_latents = None
309
+ if use_torch_compile:
310
+ masked_image_latents = get_vae_encode_output(vae, batch["masked_image_values"], weight_dtype, unet.device)
311
+ else:
312
+ masked_image_latents = vae.encode(
313
+ batch["masked_image_values"].to(dtype=weight_dtype).to(unet.device)
314
+ ).latent_dist.sample()
315
+ masked_image_latents = masked_image_latents * 0.18215
316
+ mask = F.interpolate(
317
+ batch["mask_values"].to(dtype=weight_dtype).to(unet.device),
318
+ scale_factor=1 / 8,
319
+ )
320
+ else:
321
+ latents = batch["pixel_values"]
322
+
323
+ if train_inpainting:
324
+ masked_image_latents = batch["masked_image_latents"]
325
+ mask = batch["mask_values"]
326
+
327
+ noise = torch.randn_like(latents)
328
+ bsz = latents.shape[0]
329
+ if use_torch_compile:
330
+ timesteps = torch.randint(
331
+ 0,
332
+ int(scheduler.config.num_train_timesteps * t_mutliplier),
333
+ (bsz,),
334
+ device="cpu",
335
+ )
336
+ timesteps =timesteps.to("hpu")
337
+ else:
338
+ timesteps = torch.randint(
339
+ 0,
340
+ int(scheduler.config.num_train_timesteps * t_mutliplier),
341
+ (bsz,),
342
+ device=latents.device,
343
+ )
344
+
345
+ timesteps = timesteps.long()
346
+
347
+ noisy_latents = scheduler.add_noise(latents, noise, timesteps)
348
+
349
+ if train_inpainting:
350
+ latent_model_input = torch.cat(
351
+ [noisy_latents, mask, masked_image_latents], dim=1
352
+ )
353
+ else:
354
+ latent_model_input = noisy_latents
355
+
356
+ if mixed_precision:
357
+ with torch.autocast(device_type=text_encoder.device.type,
358
+ dtype=torch.bfloat16 if text_encoder.device.type in ["hpu", "cpu"] else torch.float16,
359
+ enabled=mixed_precision):
360
+
361
+ encoder_hidden_states = text_encoder(
362
+ batch["input_ids"].to(text_encoder.device)
363
+ )[0]
364
+
365
+ model_pred = unet(
366
+ latent_model_input, timesteps, encoder_hidden_states, return_dict=False
367
+ )[0]
368
+ else:
369
+
370
+ encoder_hidden_states = text_encoder(
371
+ batch["input_ids"].to(text_encoder.device)
372
+ )[0]
373
+
374
+ model_pred = unet(latent_model_input, timesteps, encoder_hidden_states, return_dict=False)[0]
375
+
376
+ if scheduler.config.prediction_type == "epsilon":
377
+ target = noise
378
+ elif scheduler.config.prediction_type == "v_prediction":
379
+ target = scheduler.get_velocity(latents, noise, timesteps)
380
+ else:
381
+ raise ValueError(f"Unknown prediction type {scheduler.config.prediction_type}")
382
+
383
+ if batch.get("mask", None) is not None:
384
+
385
+ mask = (
386
+ batch["mask"]
387
+ .to(model_pred.device)
388
+ .reshape(
389
+ model_pred.shape[0], 1, model_pred.shape[2] * 8, model_pred.shape[3] * 8
390
+ )
391
+ )
392
+ # resize to match model_pred
393
+ mask = F.interpolate(
394
+ mask.float(),
395
+ size=model_pred.shape[-2:],
396
+ mode="nearest",
397
+ )
398
+
399
+ mask = (mask + 0.01).pow(mask_temperature)
400
+
401
+ mask = mask / mask.max()
402
+
403
+ model_pred = model_pred * mask
404
+
405
+ target = target * mask
406
+
407
+ loss = (
408
+ F.mse_loss(model_pred.float(), target.float(), reduction="none")
409
+ .mean([1, 2, 3])
410
+ .mean()
411
+ )
412
+
413
+ return loss
414
+
415
+
416
+ def train_inversion(
417
+ unet,
418
+ vae,
419
+ text_encoder,
420
+ dataloader,
421
+ num_steps: int,
422
+ scheduler,
423
+ index_no_updates,
424
+ optimizer,
425
+ save_steps: int,
426
+ placeholder_token_ids,
427
+ placeholder_tokens,
428
+ save_path: str,
429
+ tokenizer,
430
+ lr_scheduler,
431
+ test_image_path: str,
432
+ cached_latents: bool,
433
+ accum_iter: int = 1,
434
+ log_wandb: bool = False,
435
+ wandb_log_prompt_cnt: int = 10,
436
+ class_token: str = "person",
437
+ train_inpainting: bool = False,
438
+ mixed_precision: bool = False,
439
+ clip_ti_decay: bool = True,
440
+ htcore = None,
441
+ log_tb: bool = False,
442
+ writer = None,
443
+ use_synapse_profiler: bool = False,
444
+ use_pytorch_profiler: bool = False,
445
+ profiler_step: int = 3,
446
+ profile_ti: bool = False,
447
+ print_freq: int = 50,
448
+ use_torch_compile:bool = False,
449
+ ):
450
+
451
+ progress_bar = tqdm(range(num_steps),mininterval=0,miniters=10, smoothing=1)
452
+ progress_bar.set_description("Steps")
453
+ global_step = 0
454
+
455
+ if not profile_ti:
456
+ use_synapse_profiler = False
457
+ use_pytorch_profiler = False
458
+
459
+ # Original Emb for TI
460
+ orig_embeds_params = text_encoder.get_input_embeddings().weight.data.clone()
461
+
462
+ if log_wandb or log_tb:
463
+ preped_clip = prepare_clip_model_sets()
464
+
465
+ index_updates = ~index_no_updates
466
+ loss_sum = 0.0
467
+ loss_list_ti = []
468
+ pt_prof = None
469
+ syn_prof = None
470
+
471
+ if use_pytorch_profiler:
472
+ activities = [torch.profiler.ProfilerActivity.CPU]
473
+ if htcore:
474
+ activities.append(torch.profiler.ProfilerActivity.HPU)
475
+ pt_prof = torch.profiler.profile(
476
+ activities=activities,
477
+ schedule=torch.profiler.schedule(wait=0, warmup=0, active=1, repeat=1),
478
+ on_trace_ready=torch.profiler.tensorboard_trace_handler("./trace-log"),
479
+ record_shapes=True,
480
+ with_stack=True)
481
+ pt_prof.start()
482
+ if use_synapse_profiler:
483
+ syn_prof = SynapseProfilerApi()
484
+ if htcore:
485
+ torch.hpu.memory.reset_peak_memory_stats()
486
+ for epoch in range(math.ceil(num_steps / len(dataloader))):
487
+ unet.eval()
488
+ text_encoder.train()
489
+ for batch in dataloader:
490
+ if syn_prof and global_step == profiler_step:
491
+ syn_prof.profiler_start(TraceType.TraceDevice,0)
492
+ lr_scheduler.step()
493
+ logs={}
494
+ with torch.set_grad_enabled(True):
495
+ loss = (
496
+ loss_step(
497
+ batch,
498
+ unet,
499
+ vae,
500
+ text_encoder,
501
+ scheduler,
502
+ train_inpainting=train_inpainting,
503
+ mixed_precision=mixed_precision,
504
+ cached_latents=cached_latents,
505
+ use_torch_compile=use_torch_compile,
506
+ )
507
+ / accum_iter
508
+ )
509
+
510
+ loss.backward()
511
+ if htcore:
512
+ htcore.mark_step()
513
+ loss_list_ti.append(loss)
514
+ if global_step % accum_iter == 0:
515
+ # print gradient of text encoder embedding
516
+ embedding_grad_norm = text_encoder.get_input_embeddings().weight.grad[index_updates, :].detach().norm(dim=-1).mean().item()
517
+ #print(embedding_grad_norm)
518
+ logs['ti/embedding_grad_norm']=embedding_grad_norm
519
+ optimizer.step()
520
+ optimizer.zero_grad(set_to_none=True)
521
+ if htcore:
522
+ htcore.mark_step()
523
+
524
+ with torch.no_grad():
525
+
526
+ # normalize embeddings
527
+ if clip_ti_decay:
528
+ pre_norm = (
529
+ text_encoder.get_input_embeddings()
530
+ .weight[index_updates, :]
531
+ .norm(dim=-1, keepdim=True)
532
+ )
533
+
534
+ lambda_ = min(1.0, 100 * lr_scheduler.get_last_lr()[0])
535
+ text_encoder.get_input_embeddings().weight[
536
+ index_updates
537
+ ] = F.normalize(
538
+ text_encoder.get_input_embeddings().weight[
539
+ index_updates, :
540
+ ],
541
+ dim=-1,
542
+ ) * (
543
+ pre_norm + lambda_ * (0.4 - pre_norm)
544
+ )
545
+ #print(pre_norm)
546
+
547
+ current_norm = (
548
+ text_encoder.get_input_embeddings()
549
+ .weight[index_updates, :]
550
+ .norm(dim=-1)
551
+ )
552
+
553
+ text_encoder.get_input_embeddings().weight[
554
+ index_no_updates
555
+ ] = orig_embeds_params[index_no_updates]
556
+
557
+ if htcore:
558
+ htcore.mark_step()
559
+ #print(f"Current Norm : {current_norm}")
560
+ logs["ti/current_norm"]=current_norm.detach().mean().item()
561
+ logs['ti/pre_norm']=pre_norm.detach().mean().item()
562
+
563
+ if pt_prof and global_step == profiler_step:
564
+ pt_prof.step()
565
+ if syn_prof and global_step == profiler_step:
566
+ syn_prof.profiler_sync(0)
567
+ syn_prof.profiler_stop(TraceType.TraceDevice, 0)
568
+ syn_prof.profiler_get_trace_json(TraceType.TraceDevice, 0)
569
+ if(global_step % print_freq == 0):
570
+ progress_bar.update(print_freq)
571
+ logs.update({
572
+ "ti/step_loss": loss.detach().item(),
573
+ "ti/lr": lr_scheduler.get_last_lr()[0],
574
+ })
575
+ for loss_t in loss_list_ti:
576
+ loss_sum += loss_t.detach().item()
577
+ loss_list_ti.clear()
578
+ global_step += 1
579
+ #progress_bar.set_postfix(**logs)
580
+
581
+ if global_step % save_steps == 0:
582
+ save_all(
583
+ unet=unet,
584
+ text_encoder=text_encoder,
585
+ placeholder_token_ids=placeholder_token_ids,
586
+ placeholder_tokens=placeholder_tokens,
587
+ save_path=os.path.join(
588
+ save_path, f"step_inv_{global_step}.{'safetensors' if text_encoder.device.type != 'cpu' else 'pt'}"
589
+ ),
590
+ safe_form=True if text_encoder.device.type != "cpu" else False,
591
+ save_lora=False,
592
+ )
593
+ if log_wandb or log_tb:
594
+ with torch.no_grad():
595
+ pipe = StableDiffusionPipeline(
596
+ vae=vae,
597
+ text_encoder=text_encoder,
598
+ tokenizer=tokenizer,
599
+ unet=unet,
600
+ scheduler=scheduler,
601
+ safety_checker=None,
602
+ feature_extractor=None,
603
+ )
604
+
605
+ # open all images in test_image_path
606
+ images = []
607
+ for file in os.listdir(test_image_path):
608
+ if (
609
+ file.lower().endswith(".png")
610
+ or file.lower().endswith(".jpg")
611
+ or file.lower().endswith(".jpeg")
612
+ ):
613
+ images.append(
614
+ Image.open(os.path.join(test_image_path, file))
615
+ )
616
+
617
+ logs.update({"ti/loss": loss_sum / save_steps})
618
+ loss_sum = 0.0
619
+ evaluation_metrics,images = evaluate_pipe(
620
+ pipe,
621
+ target_images=images,
622
+ class_token=class_token,
623
+ learnt_token="".join(placeholder_tokens),
624
+ n_test=wandb_log_prompt_cnt,
625
+ n_step=50,
626
+ clip_model_sets=preped_clip
627
+ )
628
+ if log_wandb:
629
+ logs.update({ f'{prompt}': wandb.Image(img) for e,(img,prompt) in enumerate(images) })
630
+ elif log_tb:
631
+ logs.update({"ti/images": images})
632
+ logs.update({ f'ti/{k}':v for k,v in evaluation_metrics.items() })
633
+ # logs.update({ f'image_{e}': wandb.Image(img,caption=prompt) for e,(img,prompt) in enumerate(v) if k=='images' else f'ti/{k}':v for k,v in
634
+ # evaluate_pipe(
635
+ # pipe,
636
+ # target_images=images,
637
+ # class_token=class_token,
638
+ # learnt_token="".join(placeholder_tokens),
639
+ # n_test=wandb_log_prompt_cnt,
640
+ # n_step=50,
641
+ # clip_model_sets=preped_clip,
642
+ # ).items()}
643
+ # )
644
+
645
+ progress_bar.set_postfix(**logs)
646
+ progress_bar.refresh()
647
+ if log_wandb:
648
+ wandb.log(logs)
649
+ elif log_tb:
650
+ for key, value in logs.items():
651
+ if key == "ti/images":
652
+ for img, prompt in images:
653
+ writer.add_image(prompt, pil_to_tensor(img), global_step)
654
+ elif key == "ti/image_alignment_all" or key == "ti/text_alignment_all":
655
+ for i in range(len(value)):
656
+ writer.add_scalar(key+str(i), value[i], global_step)
657
+ else:
658
+ writer.add_scalar(key, value, global_step)
659
+ if global_step >= num_steps:
660
+ break
661
+ if pt_prof:
662
+ pt_prof.stop()
663
+
664
+ if htcore:
665
+ max_memory = torch.hpu.memory.max_memory_allocated() / 2 ** 30
666
+ print(f"Inversion Training Peak memory {(max_memory):.2f} GiB")
667
+
668
+ def perform_tuning(
669
+ unet,
670
+ vae,
671
+ text_encoder,
672
+ dataloader,
673
+ num_steps,
674
+ scheduler,
675
+ optimizer,
676
+ save_steps: int,
677
+ placeholder_token_ids,
678
+ placeholder_tokens,
679
+ save_path,
680
+ lr_scheduler_lora,
681
+ lora_unet_target_modules,
682
+ lora_clip_target_modules,
683
+ mask_temperature,
684
+ out_name: str,
685
+ tokenizer,
686
+ test_image_path: str,
687
+ cached_latents: bool,
688
+ log_wandb: bool = False,
689
+ wandb_log_prompt_cnt: int = 10,
690
+ class_token: str = "person",
691
+ train_inpainting: bool = False,
692
+ htcore = None,
693
+ log_tb: bool = False,
694
+ writer = None,
695
+ use_synapse_profiler: bool = False,
696
+ use_pytorch_profiler: bool = False,
697
+ profiler_step: int = 3,
698
+ profile_tuning: bool = False,
699
+ print_freq: int =50,
700
+ use_fused_clip_norm: bool = True,
701
+ use_torch_compile:bool = False,
702
+ ):
703
+
704
+ progress_bar = tqdm(range(num_steps),mininterval=0,miniters=10, smoothing=1)
705
+ progress_bar.set_description("Steps")
706
+ global_step = 0
707
+
708
+ if not profile_tuning:
709
+ use_synapse_profiler = False
710
+ use_pytorch_profiler = False
711
+
712
+ weight_dtype = torch.float16
713
+ if htcore and not use_torch_compile:
714
+ htcore.hpu.ModuleCacher(max_graphs=5)(model=text_encoder,
715
+ inplace=True,
716
+ use_lfu=False,
717
+ verbose=False)
718
+ unet.train()
719
+ text_encoder.train()
720
+
721
+ if log_wandb or log_tb:
722
+ preped_clip = prepare_clip_model_sets()
723
+
724
+ loss_sum = 0.0
725
+ loss_list = []
726
+
727
+ pt_prof = None
728
+ syn_prof = None
729
+ if use_pytorch_profiler:
730
+ activities = [torch.profiler.ProfilerActivity.CPU]
731
+ if htcore:
732
+ activities.append(torch.profiler.ProfilerActivity.HPU)
733
+ pt_prof = torch.profiler.profile(
734
+ activities=activities,
735
+ schedule=torch.profiler.schedule(wait=0, warmup=0, active=1, repeat=1),
736
+ on_trace_ready=torch.profiler.tensorboard_trace_handler("./trace-log"),
737
+ record_shapes=True,
738
+ with_stack=True)
739
+ pt_prof.start()
740
+ if use_synapse_profiler:
741
+ syn_prof = SynapseProfilerApi()
742
+
743
+ if use_fused_clip_norm and htcore:
744
+ from habana_frameworks.torch.hpex.normalization import FusedClipNorm
745
+ fused_clip_norm = FusedClipNorm(itertools.chain(unet.parameters(), text_encoder.parameters()), 1.0)
746
+ if htcore:
747
+ torch.hpu.memory.reset_peak_memory_stats()
748
+ for epoch in range(math.ceil(num_steps / len(dataloader))):
749
+ for batch in dataloader:
750
+ if syn_prof and global_step == profiler_step:
751
+ syn_prof.profiler_start(TraceType.TraceDevice,0)
752
+ lr_scheduler_lora.step()
753
+
754
+ optimizer.zero_grad(set_to_none=True)
755
+
756
+ loss = loss_step(
757
+ batch,
758
+ unet,
759
+ vae,
760
+ text_encoder,
761
+ scheduler,
762
+ train_inpainting=train_inpainting,
763
+ t_mutliplier=0.8,
764
+ mixed_precision=True,
765
+ mask_temperature=mask_temperature,
766
+ cached_latents=cached_latents,
767
+ use_torch_compile=use_torch_compile,
768
+ )
769
+ loss_list.append(loss)
770
+
771
+ loss.backward()
772
+ if htcore:
773
+ htcore.mark_step()
774
+ if use_fused_clip_norm:
775
+ fused_clip_norm.clip_norm(itertools.chain(unet.parameters(), text_encoder.parameters()))
776
+ else:
777
+ torch.nn.utils.clip_grad_norm_(
778
+ itertools.chain(unet.parameters(), text_encoder.parameters()), 1.0
779
+ )
780
+ optimizer.step()
781
+ if htcore:
782
+ htcore.mark_step()
783
+ if(global_step % print_freq == 0):
784
+ progress_bar.update(print_freq)
785
+ logs = {
786
+ "tuning/step_loss": loss.detach().item(),
787
+ "tuning/lr": lr_scheduler_lora.get_last_lr()[0],
788
+ }
789
+ for loss_t in loss_list:
790
+ loss_sum += loss_t.detach().item()
791
+ loss_list.clear()
792
+
793
+ if pt_prof and global_step == profiler_step:
794
+ pt_prof.step()
795
+ if syn_prof and global_step == profiler_step:
796
+ syn_prof.profiler_sync(0)
797
+ syn_prof.profiler_stop(TraceType.TraceDevice, 0)
798
+ syn_prof.profiler_get_trace_json(TraceType.TraceDevice, 0)
799
+ #progress_bar.set_postfix(**logs)
800
+ global_step += 1
801
+
802
+ if global_step % save_steps == 0:
803
+ save_all(
804
+ unet,
805
+ text_encoder,
806
+ placeholder_token_ids=placeholder_token_ids,
807
+ placeholder_tokens=placeholder_tokens,
808
+ save_path=os.path.join(
809
+ save_path, f"step_{global_step}.{'safetensors' if text_encoder.device.type != 'cpu' else 'pt'}"
810
+ ),
811
+ safe_form=True if text_encoder.device.type != "cpu" else False,
812
+ target_replace_module_text=lora_clip_target_modules,
813
+ target_replace_module_unet=lora_unet_target_modules,
814
+ )
815
+ unet_moved = (
816
+ torch.tensor(list(itertools.chain(*inspect_lora(unet).values())))
817
+ .mean()
818
+ .item()
819
+ )
820
+
821
+ #print("LORA Unet Moved", unet_moved)
822
+ clip_moved = (
823
+ torch.tensor(
824
+ list(itertools.chain(*inspect_lora(text_encoder).values()))
825
+ )
826
+ .mean()
827
+ .item()
828
+ )
829
+
830
+ #print("LORA CLIP Moved", clip_moved)
831
+
832
+ if log_wandb or log_tb:
833
+ with torch.no_grad():
834
+ pipe = StableDiffusionPipeline(
835
+ vae=vae,
836
+ text_encoder=text_encoder,
837
+ tokenizer=tokenizer,
838
+ unet=unet,
839
+ scheduler=scheduler,
840
+ safety_checker=None,
841
+ feature_extractor=None,
842
+ )
843
+
844
+ # open all images in test_image_path
845
+ images = []
846
+ for file in os.listdir(test_image_path):
847
+ if file.endswith(".png") or file.endswith(".jpg"):
848
+ images.append(
849
+ Image.open(os.path.join(test_image_path, file))
850
+ )
851
+
852
+ #wandb.log({"loss": loss_sum / save_steps, "unet_lora_step":unet_moved,"clip_lora_moved":clip_moved})
853
+ logs.update({"tuning/loss": loss_sum / save_steps, "tuning/unet_lora_step":unet_moved,"tuning/clip_lora_moved":clip_moved})
854
+ loss_sum = 0.0
855
+ #wandb.log(
856
+ evaluation_metrics,images = evaluate_pipe(
857
+ pipe,
858
+ target_images=images,
859
+ class_token=class_token,
860
+ learnt_token="".join(placeholder_tokens),
861
+ n_test=wandb_log_prompt_cnt,
862
+ n_step=50,
863
+ clip_model_sets=preped_clip
864
+ )
865
+ if log_wandb:
866
+ logs.update({ f'{prompt}': wandb.Image(img) for e,(img,prompt) in enumerate(images) } )
867
+ elif log_tb:
868
+ logs.update({"tuning/images": images})
869
+ logs.update({ f'tuning/{k}':v for k,v in evaluation_metrics.items() })
870
+
871
+ # logs.update({ ( f'image_{e}': wandb.Image(img,caption=prompt) for e,(img,prompt) in enumerate(v) ) if k=='images' else f'tuning/{k}':v for k,v in evaluate_pipe(
872
+ # pipe,
873
+ # target_images=images,
874
+ # class_token=class_token,
875
+ # learnt_token="".join(placeholder_tokens),
876
+ # n_test=wandb_log_prompt_cnt,
877
+ # n_step=50,
878
+ # clip_model_sets=preped_clip,
879
+ # ).items()}
880
+ # )
881
+
882
+ progress_bar.set_postfix(**logs)
883
+ progress_bar.refresh()
884
+
885
+ if log_wandb:
886
+ wandb.log(logs)
887
+ elif log_tb:
888
+ for key, value in logs.items():
889
+ if key == "tuning/images":
890
+ for img, prompt in images:
891
+ writer.add_image(prompt, pil_to_tensor(img), global_step)
892
+ elif key == "tuning/image_alignment_all" or key == "tuning/text_alignment_all":
893
+ for i in range(len(value)):
894
+ writer.add_scalar(key+str(i), value[i], global_step)
895
+ else:
896
+ writer.add_scalar(key, value, global_step)
897
+
898
+ if global_step >= num_steps:
899
+ break
900
+
901
+ if htcore:
902
+ max_memory = torch.hpu.memory.max_memory_allocated() / 2 ** 30
903
+ print(f"Average Peak memory {(max_memory):.2f} GiB")
904
+ if pt_prof:
905
+ pt_prof.stop()
906
+ save_all(
907
+ unet,
908
+ text_encoder,
909
+ placeholder_token_ids=placeholder_token_ids,
910
+ placeholder_tokens=placeholder_tokens,
911
+ save_path=os.path.join(save_path, f"{out_name}.{'safetensors' if text_encoder.device.type != 'cpu' else 'pt'}"),
912
+ safe_form=True if text_encoder.device.type != "cpu" else False,
913
+ target_replace_module_text=lora_clip_target_modules,
914
+ target_replace_module_unet=lora_unet_target_modules,
915
+ )
916
+
917
+
918
+ def train(
919
+ instance_data_dir: str,
920
+ pretrained_model_name_or_path: str,
921
+ output_dir: str,
922
+ train_text_encoder: bool = True,
923
+ pretrained_vae_name_or_path: str = None,
924
+ revision: Optional[str] = None,
925
+ perform_inversion: bool = True,
926
+ use_template: Literal[None, "object", "style"] = None,
927
+ train_inpainting: bool = False,
928
+ placeholder_tokens: str = "",
929
+ placeholder_token_at_data: Optional[str] = None,
930
+ initializer_tokens: Optional[str] = None,
931
+ seed: int = 42,
932
+ resolution: int = 512,
933
+ color_jitter: bool = True,
934
+ train_batch_size: int = 1,
935
+ sample_batch_size: int = 1,
936
+ max_train_steps_tuning: int = 1000,
937
+ max_train_steps_ti: int = 1000,
938
+ save_steps: int = 100,
939
+ gradient_accumulation_steps: int = 4,
940
+ gradient_checkpointing: bool = False,
941
+ lora_rank: int = 4,
942
+ lora_unet_target_modules={"CrossAttention", "Attention", "GEGLU"},
943
+ lora_clip_target_modules={"CLIPAttention"},
944
+ lora_dropout_p: float = 0.0,
945
+ lora_scale: float = 1.0,
946
+ use_extended_lora: bool = False,
947
+ clip_ti_decay: bool = True,
948
+ learning_rate_unet: float = 1e-4,
949
+ learning_rate_text: float = 1e-5,
950
+ learning_rate_ti: float = 5e-4,
951
+ continue_inversion: bool = False,
952
+ continue_inversion_lr: Optional[float] = None,
953
+ use_face_segmentation_condition: bool = False,
954
+ cached_latents: bool = True,
955
+ use_mask_captioned_data: bool = False,
956
+ mask_temperature: float = 1.0,
957
+ scale_lr: bool = False,
958
+ lr_scheduler: str = "linear",
959
+ lr_warmup_steps: int = 0,
960
+ lr_scheduler_lora: str = "linear",
961
+ lr_warmup_steps_lora: int = 0,
962
+ weight_decay_ti: float = 0.00,
963
+ weight_decay_lora: float = 0.001,
964
+ use_8bit_adam: bool = False,
965
+ device="cuda:0",
966
+ extra_args: Optional[dict] = None,
967
+ log_wandb: bool = False,
968
+ wandb_log_prompt_cnt: int = 10,
969
+ wandb_project_name: str = "new_pti_project",
970
+ wandb_entity: str = "new_pti_entity",
971
+ proxy_token: str = "person",
972
+ enable_xformers_memory_efficient_attention: bool = False,
973
+ out_name: str = "final_lora",
974
+ use_lazy_mode: bool = True,
975
+ use_fused_adamw: bool = True,
976
+ log_tb: bool = False,
977
+ log_dir: str = "/tmp/runs/",
978
+ use_synapse_profiler: bool = False,
979
+ use_pytorch_profiler: bool = False,
980
+ profiler_step: int = 3,
981
+ profile_ti: bool = False,
982
+ profile_tuning: bool = False,
983
+ print_freq: int = 50,
984
+ use_fused_clip_norm: bool = True,
985
+ use_torch_compile: bool = False,
986
+ ):
987
+ htcore = None
988
+ writer = None
989
+ if use_torch_compile:
990
+ if device != "hpu":
991
+ use_lazy_mode = False
992
+ assert not use_lazy_mode, f"use_torch_compile and use_lazy_mode both can't be used"
993
+
994
+ if device == "hpu":
995
+ if use_lazy_mode:
996
+ assert os.getenv('PT_HPU_LAZY_MODE') == '1' or os.getenv('PT_HPU_LAZY_MODE') == None, f"use_lazy_mode == True, but PT_HPU_LAZY_MODE={os.getenv('PT_HPU_LAZY_MODE')}. For run lazy mode, set PT_HPU_LAZY_MODE to 1"
997
+ import habana_frameworks.torch.core as htcore
998
+ elif use_torch_compile:
999
+ assert os.getenv('PT_HPU_LAZY_MODE')== '0', f"args.use_torch_compile == True, but PT_HPU_LAZY_MODE={os.getenv('PT_HPU_LAZY_MODE')}. For torch.compile mode, set PT_HPU_LAZY_MODE to 0"
1000
+ import habana_frameworks.torch.core as htcore
1001
+ else:
1002
+ import habana_frameworks.torch.core
1003
+ # Disable hpu dynamic shape
1004
+ try:
1005
+ import habana_frameworks.torch.hpu as hthpu
1006
+ hthpu.disable_dynamic_shape()
1007
+ except ImportError:
1008
+ print("habana_frameworks could not be loaded")
1009
+ else:
1010
+ use_lazy_mode = False
1011
+ use_fused_adamw = False
1012
+ use_fused_clip_norm = False
1013
+
1014
+ torch.manual_seed(seed)
1015
+
1016
+ if log_wandb:
1017
+ wandb.init(
1018
+ project=wandb_project_name,
1019
+ entity=wandb_entity,
1020
+ name=f"steps_{max_train_steps_ti}_lr_{learning_rate_ti}_{instance_data_dir.split('/')[-1]}",
1021
+ reinit=True,
1022
+ config={
1023
+ **(extra_args if extra_args is not None else {}),
1024
+ },
1025
+ )
1026
+ elif log_tb:
1027
+ from torch.utils.tensorboard import SummaryWriter
1028
+ from datetime import datetime
1029
+ writer = SummaryWriter(log_dir=(log_dir+datetime.now().strftime("%b%d_%H-%M-%S")))
1030
+
1031
+ if output_dir is not None:
1032
+ os.makedirs(output_dir, exist_ok=True)
1033
+ # print(placeholder_tokens, initializer_tokens)
1034
+ if len(placeholder_tokens) == 0:
1035
+ placeholder_tokens = []
1036
+ print("PTI : Placeholder Tokens not given, using null token")
1037
+ else:
1038
+ placeholder_tokens = placeholder_tokens.split("|")
1039
+
1040
+ assert (
1041
+ sorted(placeholder_tokens) == placeholder_tokens
1042
+ ), f"Placeholder tokens should be sorted. Use something like {'|'.join(sorted(placeholder_tokens))}'"
1043
+
1044
+ if initializer_tokens is None:
1045
+ print("PTI : Initializer Tokens not given, doing random inits")
1046
+ initializer_tokens = ["<rand-0.017>"] * len(placeholder_tokens)
1047
+ else:
1048
+ initializer_tokens = initializer_tokens.split("|")
1049
+
1050
+ assert len(initializer_tokens) == len(
1051
+ placeholder_tokens
1052
+ ), "Unequal Initializer token for Placeholder tokens."
1053
+
1054
+ if proxy_token is not None:
1055
+ class_token = proxy_token
1056
+ class_token = "".join(initializer_tokens)
1057
+
1058
+ if placeholder_token_at_data is not None:
1059
+ tok, pat = placeholder_token_at_data.split("|")
1060
+ token_map = {tok: pat}
1061
+
1062
+ else:
1063
+ token_map = {"DUMMY": "".join(placeholder_tokens)}
1064
+
1065
+ print("PTI : Placeholder Tokens", placeholder_tokens)
1066
+ print("PTI : Initializer Tokens", initializer_tokens)
1067
+
1068
+ # get the models
1069
+ text_encoder, vae, unet, tokenizer, placeholder_token_ids = get_models(
1070
+ pretrained_model_name_or_path,
1071
+ pretrained_vae_name_or_path,
1072
+ revision,
1073
+ placeholder_tokens,
1074
+ initializer_tokens,
1075
+ device=device,
1076
+ )
1077
+
1078
+ if use_torch_compile:
1079
+ if device == "hpu":
1080
+ print("use_torch_compile : text_encoder :Model is compiled")
1081
+ text_encoder = torch.compile(text_encoder, backend="aot_hpu_training_backend")
1082
+ print("use_torch_compile : unet :Model is compiled")
1083
+ unet = torch.compile(unet, backend="aot_hpu_training_backend")
1084
+
1085
+ noise_scheduler = DDPMScheduler.from_config(
1086
+ pretrained_model_name_or_path, subfolder="scheduler"
1087
+ )
1088
+
1089
+ if gradient_checkpointing:
1090
+ unet.enable_gradient_checkpointing()
1091
+
1092
+ if enable_xformers_memory_efficient_attention:
1093
+ from diffusers.utils.import_utils import is_xformers_available
1094
+
1095
+ if is_xformers_available():
1096
+ unet.enable_xformers_memory_efficient_attention()
1097
+ else:
1098
+ raise ValueError(
1099
+ "xformers is not available. Make sure it is installed correctly"
1100
+ )
1101
+
1102
+ if scale_lr:
1103
+ unet_lr = learning_rate_unet * gradient_accumulation_steps * train_batch_size
1104
+ text_encoder_lr = (
1105
+ learning_rate_text * gradient_accumulation_steps * train_batch_size
1106
+ )
1107
+ ti_lr = learning_rate_ti * gradient_accumulation_steps * train_batch_size
1108
+ else:
1109
+ unet_lr = learning_rate_unet
1110
+ text_encoder_lr = learning_rate_text
1111
+ ti_lr = learning_rate_ti
1112
+
1113
+ train_dataset = PivotalTuningDatasetCapation(
1114
+ instance_data_root=instance_data_dir,
1115
+ token_map=token_map,
1116
+ use_template=use_template,
1117
+ tokenizer=tokenizer,
1118
+ size=resolution,
1119
+ color_jitter=color_jitter,
1120
+ use_face_segmentation_condition=use_face_segmentation_condition,
1121
+ use_mask_captioned_data=use_mask_captioned_data,
1122
+ train_inpainting=train_inpainting,
1123
+ )
1124
+
1125
+ train_dataset.blur_amount = 200
1126
+
1127
+ if train_inpainting:
1128
+ assert not cached_latents, "Cached latents not supported for inpainting"
1129
+
1130
+ train_dataloader = inpainting_dataloader(
1131
+ train_dataset, train_batch_size, tokenizer, vae, text_encoder
1132
+ )
1133
+ else:
1134
+ train_dataloader = text2img_dataloader(
1135
+ train_dataset,
1136
+ train_batch_size,
1137
+ tokenizer,
1138
+ vae,
1139
+ text_encoder,
1140
+ cached_latents=cached_latents,
1141
+ use_torch_compile=use_torch_compile
1142
+ )
1143
+
1144
+ index_no_updates = torch.arange(len(tokenizer)) != -1
1145
+ if htcore and not use_torch_compile:
1146
+ htcore.hpu.ModuleCacher(max_graphs=5)(model=unet,
1147
+ inplace=True,
1148
+ use_lfu=False,
1149
+ verbose=False)
1150
+
1151
+ for tok_id in placeholder_token_ids:
1152
+ index_no_updates[tok_id] = False
1153
+
1154
+ unet.requires_grad_(False)
1155
+ vae.requires_grad_(False)
1156
+
1157
+ params_to_freeze = itertools.chain(
1158
+ text_encoder.text_model.encoder.parameters(),
1159
+ text_encoder.text_model.final_layer_norm.parameters(),
1160
+ text_encoder.text_model.embeddings.position_embedding.parameters(),
1161
+ )
1162
+ for param in params_to_freeze:
1163
+ param.requires_grad = False
1164
+
1165
+ #if cached_latents:
1166
+ #vae = None
1167
+ # STEP 1 : Perform Inversion
1168
+ if perform_inversion:
1169
+ ti_optimizer = None
1170
+ if use_fused_adamw and htcore:
1171
+ from habana_frameworks.torch.hpex.optimizers import FusedAdamW
1172
+ ti_optimizer = FusedAdamW(text_encoder.get_input_embeddings().parameters(), lr=ti_lr, eps=1e-08, weight_decay=weight_decay_ti)
1173
+ else:
1174
+ ti_optimizer = optim.AdamW(
1175
+ text_encoder.get_input_embeddings().parameters(), #weight[~index_no_updates]],
1176
+ lr=ti_lr,
1177
+ betas=(0.9, 0.999),
1178
+ eps=1e-08,
1179
+ weight_decay=weight_decay_ti,
1180
+ )
1181
+
1182
+ lr_scheduler = get_scheduler(
1183
+ lr_scheduler,
1184
+ optimizer=ti_optimizer,
1185
+ num_warmup_steps=lr_warmup_steps,
1186
+ num_training_steps=max_train_steps_ti,
1187
+ )
1188
+
1189
+ train_inversion(
1190
+ unet,
1191
+ vae,
1192
+ text_encoder,
1193
+ train_dataloader,
1194
+ max_train_steps_ti,
1195
+ cached_latents=cached_latents,
1196
+ accum_iter=gradient_accumulation_steps,
1197
+ scheduler=noise_scheduler,
1198
+ index_no_updates=index_no_updates,
1199
+ optimizer=ti_optimizer,
1200
+ lr_scheduler=lr_scheduler,
1201
+ save_steps=save_steps,
1202
+ placeholder_tokens=placeholder_tokens,
1203
+ placeholder_token_ids=placeholder_token_ids,
1204
+ save_path=output_dir,
1205
+ test_image_path=instance_data_dir,
1206
+ log_wandb=log_wandb,
1207
+ wandb_log_prompt_cnt=wandb_log_prompt_cnt,
1208
+ class_token=class_token,
1209
+ train_inpainting=train_inpainting,
1210
+ mixed_precision=False,
1211
+ tokenizer=tokenizer,
1212
+ clip_ti_decay=clip_ti_decay,
1213
+ htcore=htcore,
1214
+ log_tb=log_tb,
1215
+ writer=writer,
1216
+ use_synapse_profiler=use_synapse_profiler,
1217
+ use_pytorch_profiler=use_pytorch_profiler,
1218
+ profiler_step=profiler_step,
1219
+ profile_ti=profile_ti,
1220
+ print_freq=print_freq,
1221
+ use_torch_compile=use_torch_compile,
1222
+ )
1223
+
1224
+ del ti_optimizer
1225
+
1226
+ # Next perform Tuning with LoRA:
1227
+ if not use_extended_lora:
1228
+ unet_lora_params, _ = inject_trainable_lora(
1229
+ unet,
1230
+ r=lora_rank,
1231
+ target_replace_module=lora_unet_target_modules,
1232
+ dropout_p=lora_dropout_p,
1233
+ scale=lora_scale,
1234
+ )
1235
+ else:
1236
+ print("PTI : USING EXTENDED UNET!!!")
1237
+ lora_unet_target_modules = (
1238
+ lora_unet_target_modules | UNET_EXTENDED_TARGET_REPLACE
1239
+ )
1240
+ print("PTI : Will replace modules: ", lora_unet_target_modules)
1241
+
1242
+ unet_lora_params, _ = inject_trainable_lora_extended(
1243
+ unet, r=lora_rank, target_replace_module=lora_unet_target_modules
1244
+ )
1245
+ print(f"PTI : has {len(unet_lora_params)} lora")
1246
+
1247
+ print("PTI : Before training:")
1248
+ inspect_lora(unet)
1249
+
1250
+ params_to_optimize = [
1251
+ {"params": itertools.chain(*unet_lora_params), "lr": unet_lr},
1252
+ ]
1253
+
1254
+ text_encoder.requires_grad_(False)
1255
+
1256
+ if continue_inversion:
1257
+ params_to_optimize += [
1258
+ {
1259
+ "params": text_encoder.get_input_embeddings().parameters(), #.weight[~index_no_updates]],
1260
+ "lr": continue_inversion_lr
1261
+ if continue_inversion_lr is not None
1262
+ else ti_lr,
1263
+ }
1264
+ ]
1265
+ text_encoder.requires_grad_(True)
1266
+ params_to_freeze = itertools.chain(
1267
+ text_encoder.text_model.encoder.parameters(),
1268
+ text_encoder.text_model.final_layer_norm.parameters(),
1269
+ text_encoder.text_model.embeddings.position_embedding.parameters(),
1270
+ )
1271
+ for param in params_to_freeze:
1272
+ param.requires_grad = False
1273
+ else:
1274
+ text_encoder.requires_grad_(False)
1275
+ if train_text_encoder:
1276
+ text_encoder_lora_params, _ = inject_trainable_lora(
1277
+ text_encoder,
1278
+ target_replace_module=lora_clip_target_modules,
1279
+ r=lora_rank,
1280
+ )
1281
+ params_to_optimize += [
1282
+ {
1283
+ "params": itertools.chain(*text_encoder_lora_params),
1284
+ "lr": text_encoder_lr,
1285
+ }
1286
+ ]
1287
+ inspect_lora(text_encoder)
1288
+
1289
+ lora_optimizers = None
1290
+ if use_fused_adamw and htcore:
1291
+ from habana_frameworks.torch.hpex.optimizers import FusedAdamW
1292
+ lora_optimizers = FusedAdamW(params_to_optimize, eps=1e-08, weight_decay=weight_decay_lora)
1293
+ else:
1294
+ lora_optimizers = optim.AdamW(params_to_optimize, weight_decay=weight_decay_lora)
1295
+
1296
+ unet.train()
1297
+ if train_text_encoder:
1298
+ text_encoder.train()
1299
+
1300
+ train_dataset.blur_amount = 70
1301
+
1302
+ lr_scheduler_lora = get_scheduler(
1303
+ lr_scheduler_lora,
1304
+ optimizer=lora_optimizers,
1305
+ num_warmup_steps=lr_warmup_steps_lora,
1306
+ num_training_steps=max_train_steps_tuning,
1307
+ )
1308
+
1309
+ perform_tuning(
1310
+ unet,
1311
+ vae,
1312
+ text_encoder,
1313
+ train_dataloader,
1314
+ max_train_steps_tuning,
1315
+ cached_latents=cached_latents,
1316
+ scheduler=noise_scheduler,
1317
+ optimizer=lora_optimizers,
1318
+ save_steps=save_steps,
1319
+ placeholder_tokens=placeholder_tokens,
1320
+ placeholder_token_ids=placeholder_token_ids,
1321
+ save_path=output_dir,
1322
+ lr_scheduler_lora=lr_scheduler_lora,
1323
+ lora_unet_target_modules=lora_unet_target_modules,
1324
+ lora_clip_target_modules=lora_clip_target_modules,
1325
+ mask_temperature=mask_temperature,
1326
+ tokenizer=tokenizer,
1327
+ out_name=out_name,
1328
+ test_image_path=instance_data_dir,
1329
+ log_wandb=log_wandb,
1330
+ wandb_log_prompt_cnt=wandb_log_prompt_cnt,
1331
+ class_token=class_token,
1332
+ train_inpainting=train_inpainting,
1333
+ htcore=htcore,
1334
+ log_tb=log_tb,
1335
+ writer=writer,
1336
+ use_synapse_profiler=use_synapse_profiler,
1337
+ use_pytorch_profiler=use_pytorch_profiler,
1338
+ profiler_step=profiler_step,
1339
+ profile_tuning=profile_tuning,
1340
+ print_freq=print_freq,
1341
+ use_fused_clip_norm=use_fused_clip_norm,
1342
+ use_torch_compile=use_torch_compile,
1343
+ )
1344
+ if writer:
1345
+ writer.close()
1346
+
1347
+
1348
+ def main():
1349
+ fire.Fire(train)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/cli_pt_to_safetensors.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import fire
4
+ import torch
5
+ from lora_diffusion import (
6
+ DEFAULT_TARGET_REPLACE,
7
+ TEXT_ENCODER_DEFAULT_TARGET_REPLACE,
8
+ UNET_DEFAULT_TARGET_REPLACE,
9
+ convert_loras_to_safeloras_with_embeds,
10
+ safetensors_available,
11
+ )
12
+
13
+ _target_by_name = {
14
+ "unet": UNET_DEFAULT_TARGET_REPLACE,
15
+ "text_encoder": TEXT_ENCODER_DEFAULT_TARGET_REPLACE,
16
+ }
17
+
18
+
19
+ def convert(*paths, outpath, overwrite=False, **settings):
20
+ """
21
+ Converts one or more pytorch Lora and/or Textual Embedding pytorch files
22
+ into a safetensor file.
23
+
24
+ Pass all the input paths as arguments. Whether they are Textual Embedding
25
+ or Lora models will be auto-detected.
26
+
27
+ For Lora models, their name will be taken from the path, i.e.
28
+ "lora_weight.pt" => unet
29
+ "lora_weight.text_encoder.pt" => text_encoder
30
+
31
+ You can also set target_modules and/or rank by providing an argument prefixed
32
+ by the name.
33
+
34
+ So a complete example might be something like:
35
+
36
+ ```
37
+ python -m lora_diffusion.cli_pt_to_safetensors lora_weight.* --outpath lora_weight.safetensor --unet.rank 8
38
+ ```
39
+ """
40
+ modelmap = {}
41
+ embeds = {}
42
+
43
+ if os.path.exists(outpath) and not overwrite:
44
+ raise ValueError(
45
+ f"Output path {outpath} already exists, and overwrite is not True"
46
+ )
47
+
48
+ for path in paths:
49
+ data = torch.load(path)
50
+
51
+ if isinstance(data, dict):
52
+ print(f"Loading textual inversion embeds {data.keys()} from {path}")
53
+ embeds.update(data)
54
+
55
+ else:
56
+ name_parts = os.path.split(path)[1].split(".")
57
+ name = name_parts[-2] if len(name_parts) > 2 else "unet"
58
+
59
+ model_settings = {
60
+ "target_modules": _target_by_name.get(name, DEFAULT_TARGET_REPLACE),
61
+ "rank": 4,
62
+ }
63
+
64
+ prefix = f"{name}."
65
+
66
+ arg_settings = { k[len(prefix) :]: v for k, v in settings.items() if k.startswith(prefix) }
67
+ model_settings = { **model_settings, **arg_settings }
68
+
69
+ print(f"Loading Lora for {name} from {path} with settings {model_settings}")
70
+
71
+ modelmap[name] = (
72
+ path,
73
+ model_settings["target_modules"],
74
+ model_settings["rank"],
75
+ )
76
+
77
+ convert_loras_to_safeloras_with_embeds(modelmap, embeds, outpath)
78
+
79
+
80
+ def main():
81
+ fire.Fire(convert)
82
+
83
+
84
+ if __name__ == "__main__":
85
+ main()
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/cli_svd.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fire
2
+ from diffusers import StableDiffusionPipeline
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ from .lora import (
7
+ save_all,
8
+ _find_modules,
9
+ LoraInjectedConv2d,
10
+ LoraInjectedLinear,
11
+ inject_trainable_lora,
12
+ inject_trainable_lora_extended,
13
+ )
14
+
15
+
16
+ def _iter_lora(model):
17
+ for module in model.modules():
18
+ if isinstance(module, LoraInjectedConv2d) or isinstance(
19
+ module, LoraInjectedLinear
20
+ ):
21
+ yield module
22
+
23
+
24
+ def overwrite_base(base_model, tuned_model, rank, clamp_quantile):
25
+ device = base_model.device
26
+ dtype = base_model.dtype
27
+
28
+ for lor_base, lor_tune in zip(_iter_lora(base_model), _iter_lora(tuned_model)):
29
+
30
+ if isinstance(lor_base, LoraInjectedLinear):
31
+ residual = lor_tune.linear.weight.data - lor_base.linear.weight.data
32
+ # SVD on residual
33
+ print("Distill Linear shape ", residual.shape)
34
+ residual = residual.float()
35
+ U, S, Vh = torch.linalg.svd(residual)
36
+ U = U[:, :rank]
37
+ S = S[:rank]
38
+ U = U @ torch.diag(S)
39
+
40
+ Vh = Vh[:rank, :]
41
+
42
+ dist = torch.cat([U.flatten(), Vh.flatten()])
43
+ hi_val = torch.quantile(dist, clamp_quantile)
44
+ low_val = -hi_val
45
+
46
+ U = U.clamp(low_val, hi_val)
47
+ Vh = Vh.clamp(low_val, hi_val)
48
+
49
+ assert lor_base.lora_up.weight.shape == U.shape
50
+ assert lor_base.lora_down.weight.shape == Vh.shape
51
+
52
+ lor_base.lora_up.weight.data = U.to(device=device, dtype=dtype)
53
+ lor_base.lora_down.weight.data = Vh.to(device=device, dtype=dtype)
54
+
55
+ if isinstance(lor_base, LoraInjectedConv2d):
56
+ residual = lor_tune.conv.weight.data - lor_base.conv.weight.data
57
+ print("Distill Conv shape ", residual.shape)
58
+
59
+ residual = residual.float()
60
+ residual = residual.flatten(start_dim=1)
61
+
62
+ # SVD on residual
63
+ U, S, Vh = torch.linalg.svd(residual)
64
+ U = U[:, :rank]
65
+ S = S[:rank]
66
+ U = U @ torch.diag(S)
67
+
68
+ Vh = Vh[:rank, :]
69
+
70
+ dist = torch.cat([U.flatten(), Vh.flatten()])
71
+ hi_val = torch.quantile(dist, clamp_quantile)
72
+ low_val = -hi_val
73
+
74
+ U = U.clamp(low_val, hi_val)
75
+ Vh = Vh.clamp(low_val, hi_val)
76
+
77
+ # U is (out_channels, rank) with 1x1 conv. So,
78
+ U = U.reshape(U.shape[0], U.shape[1], 1, 1)
79
+ # V is (rank, in_channels * kernel_size1 * kernel_size2)
80
+ # now reshape:
81
+ Vh = Vh.reshape(
82
+ Vh.shape[0],
83
+ lor_base.conv.in_channels,
84
+ lor_base.conv.kernel_size[0],
85
+ lor_base.conv.kernel_size[1],
86
+ )
87
+
88
+ assert lor_base.lora_up.weight.shape == U.shape
89
+ assert lor_base.lora_down.weight.shape == Vh.shape
90
+
91
+ lor_base.lora_up.weight.data = U.to(device=device, dtype=dtype)
92
+ lor_base.lora_down.weight.data = Vh.to(device=device, dtype=dtype)
93
+
94
+
95
+ def svd_distill(
96
+ target_model: str,
97
+ base_model: str,
98
+ rank: int = 4,
99
+ clamp_quantile: float = 0.99,
100
+ device: str = "cuda:0",
101
+ save_path: str = "svd_distill.safetensors",
102
+ ):
103
+ pipe_base = StableDiffusionPipeline.from_pretrained(
104
+ base_model, torch_dtype=torch.float16
105
+ ).to(device)
106
+
107
+ pipe_tuned = StableDiffusionPipeline.from_pretrained(
108
+ target_model, torch_dtype=torch.float16
109
+ ).to(device)
110
+
111
+ # Inject unet
112
+ _ = inject_trainable_lora_extended(pipe_base.unet, r=rank)
113
+ _ = inject_trainable_lora_extended(pipe_tuned.unet, r=rank)
114
+
115
+ overwrite_base(
116
+ pipe_base.unet, pipe_tuned.unet, rank=rank, clamp_quantile=clamp_quantile
117
+ )
118
+
119
+ # Inject text encoder
120
+ _ = inject_trainable_lora(
121
+ pipe_base.text_encoder, r=rank, target_replace_module={"CLIPAttention"}
122
+ )
123
+ _ = inject_trainable_lora(
124
+ pipe_tuned.text_encoder, r=rank, target_replace_module={"CLIPAttention"}
125
+ )
126
+
127
+ overwrite_base(
128
+ pipe_base.text_encoder,
129
+ pipe_tuned.text_encoder,
130
+ rank=rank,
131
+ clamp_quantile=clamp_quantile,
132
+ )
133
+
134
+ save_all(
135
+ unet=pipe_base.unet,
136
+ text_encoder=pipe_base.text_encoder,
137
+ placeholder_token_ids=None,
138
+ placeholder_tokens=None,
139
+ save_path=save_path,
140
+ save_lora=True,
141
+ save_ti=False,
142
+ )
143
+
144
+
145
+ def main():
146
+ fire.Fire(svd_distill)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/dataset.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from pathlib import Path
3
+ from typing import Dict, List, Optional, Tuple, Union
4
+
5
+ from PIL import Image
6
+ from torch import zeros_like
7
+ from torch.utils.data import Dataset
8
+ from torchvision import transforms
9
+ import glob
10
+ from .preprocess_files import face_mask_google_mediapipe
11
+
12
+ OBJECT_TEMPLATE = [
13
+ "a photo of a {}",
14
+ "a rendering of a {}",
15
+ "a cropped photo of the {}",
16
+ "the photo of a {}",
17
+ "a photo of a clean {}",
18
+ "a photo of a dirty {}",
19
+ "a dark photo of the {}",
20
+ "a photo of my {}",
21
+ "a photo of the cool {}",
22
+ "a close-up photo of a {}",
23
+ "a bright photo of the {}",
24
+ "a cropped photo of a {}",
25
+ "a photo of the {}",
26
+ "a good photo of the {}",
27
+ "a photo of one {}",
28
+ "a close-up photo of the {}",
29
+ "a rendition of the {}",
30
+ "a photo of the clean {}",
31
+ "a rendition of a {}",
32
+ "a photo of a nice {}",
33
+ "a good photo of a {}",
34
+ "a photo of the nice {}",
35
+ "a photo of the small {}",
36
+ "a photo of the weird {}",
37
+ "a photo of the large {}",
38
+ "a photo of a cool {}",
39
+ "a photo of a small {}",
40
+ ]
41
+
42
+ STYLE_TEMPLATE = [
43
+ "a painting in the style of {}",
44
+ "a rendering in the style of {}",
45
+ "a cropped painting in the style of {}",
46
+ "the painting in the style of {}",
47
+ "a clean painting in the style of {}",
48
+ "a dirty painting in the style of {}",
49
+ "a dark painting in the style of {}",
50
+ "a picture in the style of {}",
51
+ "a cool painting in the style of {}",
52
+ "a close-up painting in the style of {}",
53
+ "a bright painting in the style of {}",
54
+ "a cropped painting in the style of {}",
55
+ "a good painting in the style of {}",
56
+ "a close-up painting in the style of {}",
57
+ "a rendition in the style of {}",
58
+ "a nice painting in the style of {}",
59
+ "a small painting in the style of {}",
60
+ "a weird painting in the style of {}",
61
+ "a large painting in the style of {}",
62
+ ]
63
+
64
+ NULL_TEMPLATE = ["{}"]
65
+
66
+ TEMPLATE_MAP = {
67
+ "object": OBJECT_TEMPLATE,
68
+ "style": STYLE_TEMPLATE,
69
+ "null": NULL_TEMPLATE,
70
+ }
71
+
72
+
73
+ def _randomset(lis):
74
+ ret = []
75
+ for i in range(len(lis)):
76
+ if random.random() < 0.5:
77
+ ret.append(lis[i])
78
+ return ret
79
+
80
+
81
+ def _shuffle(lis):
82
+
83
+ return random.sample(lis, len(lis))
84
+
85
+
86
+ def _get_cutout_holes(
87
+ height,
88
+ width,
89
+ min_holes=8,
90
+ max_holes=32,
91
+ min_height=16,
92
+ max_height=128,
93
+ min_width=16,
94
+ max_width=128,
95
+ ):
96
+ holes = []
97
+ for _n in range(random.randint(min_holes, max_holes)):
98
+ hole_height = random.randint(min_height, max_height)
99
+ hole_width = random.randint(min_width, max_width)
100
+ y1 = random.randint(0, height - hole_height)
101
+ x1 = random.randint(0, width - hole_width)
102
+ y2 = y1 + hole_height
103
+ x2 = x1 + hole_width
104
+ holes.append((x1, y1, x2, y2))
105
+ return holes
106
+
107
+
108
+ def _generate_random_mask(image):
109
+ mask = zeros_like(image[:1])
110
+ holes = _get_cutout_holes(mask.shape[1], mask.shape[2])
111
+ for (x1, y1, x2, y2) in holes:
112
+ mask[:, y1:y2, x1:x2] = 1.0
113
+ if random.uniform(0, 1) < 0.25:
114
+ mask.fill_(1.0)
115
+ masked_image = image * (mask < 0.5)
116
+ return mask, masked_image
117
+
118
+
119
+ class PivotalTuningDatasetCapation(Dataset):
120
+ """
121
+ A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
122
+ It pre-processes the images and the tokenizes prompts.
123
+ """
124
+
125
+ def __init__(
126
+ self,
127
+ instance_data_root,
128
+ tokenizer,
129
+ token_map: Optional[dict] = None,
130
+ use_template: Optional[str] = None,
131
+ size=512,
132
+ h_flip=True,
133
+ color_jitter=False,
134
+ resize=True,
135
+ use_mask_captioned_data=False,
136
+ use_face_segmentation_condition=False,
137
+ train_inpainting=False,
138
+ blur_amount: int = 70,
139
+ ):
140
+ self.size = size
141
+ self.tokenizer = tokenizer
142
+ self.resize = resize
143
+ self.train_inpainting = train_inpainting
144
+
145
+ instance_data_root = Path(instance_data_root)
146
+ if not instance_data_root.exists():
147
+ raise ValueError("Instance images root doesn't exists.")
148
+
149
+ self.instance_images_path = []
150
+ self.mask_path = []
151
+
152
+ assert not (
153
+ use_mask_captioned_data and use_template
154
+ ), "Can't use both mask caption data and template."
155
+
156
+ # Prepare the instance images
157
+ if use_mask_captioned_data:
158
+ src_imgs = glob.glob(str(instance_data_root) + "/*src.jpg")
159
+ for f in src_imgs:
160
+ idx = int(str(Path(f).stem).split(".")[0])
161
+ mask_path = f"{instance_data_root}/{idx}.mask.png"
162
+
163
+ if Path(mask_path).exists():
164
+ self.instance_images_path.append(f)
165
+ self.mask_path.append(mask_path)
166
+ else:
167
+ print(f"Mask not found for {f}")
168
+
169
+ self.captions = open(f"{instance_data_root}/caption.txt").readlines()
170
+
171
+ else:
172
+ possibily_src_images = (
173
+ glob.glob(str(instance_data_root) + "/*.jpg")
174
+ + glob.glob(str(instance_data_root) + "/*.png")
175
+ + glob.glob(str(instance_data_root) + "/*.jpeg")
176
+ )
177
+ possibily_src_images = (
178
+ set(possibily_src_images)
179
+ - set(glob.glob(str(instance_data_root) + "/*mask.png"))
180
+ - set([str(instance_data_root) + "/caption.txt"])
181
+ )
182
+
183
+ self.instance_images_path = list(set(possibily_src_images))
184
+ self.captions = [
185
+ x.split("/")[-1].split(".")[0] for x in self.instance_images_path
186
+ ]
187
+
188
+ assert (
189
+ len(self.instance_images_path) > 0
190
+ ), "No images found in the instance data root."
191
+
192
+ self.instance_images_path = sorted(self.instance_images_path)
193
+
194
+ self.use_mask = use_face_segmentation_condition or use_mask_captioned_data
195
+ self.use_mask_captioned_data = use_mask_captioned_data
196
+
197
+ if use_face_segmentation_condition:
198
+
199
+ for idx in range(len(self.instance_images_path)):
200
+ targ = f"{instance_data_root}/{idx}.mask.png"
201
+ # see if the mask exists
202
+ if not Path(targ).exists():
203
+ print(f"Mask not found for {targ}")
204
+
205
+ print(
206
+ "Warning : this will pre-process all the images in the instance data root."
207
+ )
208
+
209
+ if len(self.mask_path) > 0:
210
+ print(
211
+ "Warning : masks already exists, but will be overwritten."
212
+ )
213
+
214
+ masks = face_mask_google_mediapipe(
215
+ [
216
+ Image.open(f).convert("RGB")
217
+ for f in self.instance_images_path
218
+ ]
219
+ )
220
+ for idx, mask in enumerate(masks):
221
+ mask.save(f"{instance_data_root}/{idx}.mask.png")
222
+
223
+ break
224
+
225
+ for idx in range(len(self.instance_images_path)):
226
+ self.mask_path.append(f"{instance_data_root}/{idx}.mask.png")
227
+
228
+ self.num_instance_images = len(self.instance_images_path)
229
+ self.token_map = token_map
230
+
231
+ self.use_template = use_template
232
+ if use_template is not None:
233
+ self.templates = TEMPLATE_MAP[use_template]
234
+
235
+ self._length = self.num_instance_images
236
+
237
+ self.h_flip = h_flip
238
+ self.image_transforms = transforms.Compose(
239
+ [
240
+ transforms.Resize(
241
+ size, interpolation=transforms.InterpolationMode.BILINEAR
242
+ )
243
+ if resize
244
+ else transforms.Lambda(lambda x: x),
245
+ transforms.ColorJitter(0.1, 0.1)
246
+ if color_jitter
247
+ else transforms.Lambda(lambda x: x),
248
+ transforms.CenterCrop(size),
249
+ transforms.ToTensor(),
250
+ transforms.Normalize([0.5], [0.5]),
251
+ ]
252
+ )
253
+
254
+ self.blur_amount = blur_amount
255
+
256
+ def __len__(self):
257
+ return self._length
258
+
259
+ def __getitem__(self, index):
260
+ example = {}
261
+ instance_image = Image.open(
262
+ self.instance_images_path[index % self.num_instance_images]
263
+ )
264
+ if not instance_image.mode == "RGB":
265
+ instance_image = instance_image.convert("RGB")
266
+ example["instance_images"] = self.image_transforms(instance_image)
267
+
268
+ if self.train_inpainting:
269
+ (
270
+ example["instance_masks"],
271
+ example["instance_masked_images"],
272
+ ) = _generate_random_mask(example["instance_images"])
273
+
274
+ if self.use_template:
275
+ assert self.token_map is not None
276
+ input_tok = list(self.token_map.values())[0]
277
+
278
+ text = random.choice(self.templates).format(input_tok)
279
+ else:
280
+ text = self.captions[index % self.num_instance_images].strip()
281
+
282
+ if self.token_map is not None:
283
+ for token, value in self.token_map.items():
284
+ text = text.replace(token, value)
285
+
286
+ print(text)
287
+
288
+ if self.use_mask:
289
+ example["mask"] = (
290
+ self.image_transforms(
291
+ Image.open(self.mask_path[index % self.num_instance_images])
292
+ )
293
+ * 0.5
294
+ + 1.0
295
+ )
296
+
297
+ if self.h_flip and random.random() > 0.5:
298
+ hflip = transforms.RandomHorizontalFlip(p=1)
299
+
300
+ example["instance_images"] = hflip(example["instance_images"])
301
+ if self.use_mask:
302
+ example["mask"] = hflip(example["mask"])
303
+
304
+ example["instance_prompt_ids"] = self.tokenizer(
305
+ text,
306
+ padding="do_not_pad",
307
+ truncation=True,
308
+ max_length=self.tokenizer.model_max_length,
309
+ ).input_ids
310
+
311
+ return example
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/lora.py ADDED
@@ -0,0 +1,1119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import json
5
+ import math
6
+ from itertools import groupby
7
+ from typing import Callable, Dict, List, Optional, Set, Tuple, Type, Union
8
+
9
+ import numpy as np
10
+ import PIL
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+
15
+ try:
16
+ from safetensors.torch import safe_open
17
+ from safetensors.torch import save_file as safe_save
18
+
19
+ safetensors_available = True
20
+ except ImportError:
21
+ from .safe_open import safe_open
22
+
23
+ def safe_save(
24
+ tensors: Dict[str, torch.Tensor],
25
+ filename: str,
26
+ metadata: Optional[Dict[str, str]] = None,
27
+ ) -> None:
28
+ raise EnvironmentError(
29
+ "Saving safetensors requires the safetensors library. Please install with pip or similar."
30
+ )
31
+
32
+ safetensors_available = False
33
+
34
+
35
+ class LoraInjectedLinear(nn.Module):
36
+ def __init__(
37
+ self, in_features, out_features, bias=False, r=4, dropout_p=0.1, scale=1.0
38
+ ):
39
+ super().__init__()
40
+
41
+ if r > min(in_features, out_features):
42
+ raise ValueError(
43
+ f"LoRA rank {r} must be less or equal than {min(in_features, out_features)}"
44
+ )
45
+ self.r = r
46
+ self.linear = nn.Linear(in_features, out_features, bias)
47
+ self.lora_down = nn.Linear(in_features, r, bias=False)
48
+ self.dropout = nn.Dropout(dropout_p)
49
+ self.lora_up = nn.Linear(r, out_features, bias=False)
50
+ self.scale = scale
51
+ self.selector = nn.Identity()
52
+
53
+ nn.init.normal_(self.lora_down.weight, std=1 / r)
54
+ nn.init.zeros_(self.lora_up.weight)
55
+
56
+ def forward(self, input, scale=None):
57
+ # Below change is needed to support diffusers>=0.21.0
58
+ if scale is not None:
59
+ self.scale = scale
60
+ return (
61
+ self.linear(input)
62
+ + self.dropout(self.lora_up(self.selector(self.lora_down(input))))
63
+ * self.scale
64
+ )
65
+
66
+ def realize_as_lora(self):
67
+ return self.lora_up.weight.data * self.scale, self.lora_down.weight.data
68
+
69
+ def set_selector_from_diag(self, diag: torch.Tensor):
70
+ # diag is a 1D tensor of size (r,)
71
+ assert diag.shape == (self.r,)
72
+ self.selector = nn.Linear(self.r, self.r, bias=False)
73
+ self.selector.weight.data = torch.diag(diag)
74
+ self.selector.weight.data = self.selector.weight.data.to(
75
+ self.lora_up.weight.device
76
+ ).to(self.lora_up.weight.dtype)
77
+
78
+
79
+ class LoraInjectedConv2d(nn.Module):
80
+ def __init__(
81
+ self,
82
+ in_channels: int,
83
+ out_channels: int,
84
+ kernel_size,
85
+ stride=1,
86
+ padding=0,
87
+ dilation=1,
88
+ groups: int = 1,
89
+ bias: bool = True,
90
+ r: int = 4,
91
+ dropout_p: float = 0.1,
92
+ scale: float = 1.0,
93
+ ):
94
+ super().__init__()
95
+ if r > min(in_channels, out_channels):
96
+ raise ValueError(
97
+ f"LoRA rank {r} must be less or equal than {min(in_channels, out_channels)}"
98
+ )
99
+ self.r = r
100
+ self.conv = nn.Conv2d(
101
+ in_channels=in_channels,
102
+ out_channels=out_channels,
103
+ kernel_size=kernel_size,
104
+ stride=stride,
105
+ padding=padding,
106
+ dilation=dilation,
107
+ groups=groups,
108
+ bias=bias,
109
+ )
110
+
111
+ self.lora_down = nn.Conv2d(
112
+ in_channels=in_channels,
113
+ out_channels=r,
114
+ kernel_size=kernel_size,
115
+ stride=stride,
116
+ padding=padding,
117
+ dilation=dilation,
118
+ groups=groups,
119
+ bias=False,
120
+ )
121
+ self.dropout = nn.Dropout(dropout_p)
122
+ self.lora_up = nn.Conv2d(
123
+ in_channels=r,
124
+ out_channels=out_channels,
125
+ kernel_size=1,
126
+ stride=1,
127
+ padding=0,
128
+ bias=False,
129
+ )
130
+ self.selector = nn.Identity()
131
+ self.scale = scale
132
+
133
+ nn.init.normal_(self.lora_down.weight, std=1 / r)
134
+ nn.init.zeros_(self.lora_up.weight)
135
+
136
+ def forward(self, input):
137
+ return (
138
+ self.conv(input)
139
+ + self.dropout(self.lora_up(self.selector(self.lora_down(input))))
140
+ * self.scale
141
+ )
142
+
143
+ def realize_as_lora(self):
144
+ return self.lora_up.weight.data * self.scale, self.lora_down.weight.data
145
+
146
+ def set_selector_from_diag(self, diag: torch.Tensor):
147
+ # diag is a 1D tensor of size (r,)
148
+ assert diag.shape == (self.r,)
149
+ self.selector = nn.Conv2d(
150
+ in_channels=self.r,
151
+ out_channels=self.r,
152
+ kernel_size=1,
153
+ stride=1,
154
+ padding=0,
155
+ bias=False,
156
+ )
157
+ self.selector.weight.data = torch.diag(diag)
158
+
159
+ # same device + dtype as lora_up
160
+ self.selector.weight.data = self.selector.weight.data.to(
161
+ self.lora_up.weight.device
162
+ ).to(self.lora_up.weight.dtype)
163
+
164
+
165
+ UNET_DEFAULT_TARGET_REPLACE = {"CrossAttention", "Attention", "GEGLU"}
166
+
167
+ UNET_EXTENDED_TARGET_REPLACE = {"ResnetBlock2D", "CrossAttention", "Attention", "GEGLU"}
168
+
169
+ TEXT_ENCODER_DEFAULT_TARGET_REPLACE = {"CLIPAttention"}
170
+
171
+ TEXT_ENCODER_EXTENDED_TARGET_REPLACE = {"CLIPAttention"}
172
+
173
+ DEFAULT_TARGET_REPLACE = UNET_DEFAULT_TARGET_REPLACE
174
+
175
+ EMBED_FLAG = "<embed>"
176
+
177
+
178
+ def _find_children(
179
+ model,
180
+ search_class: List[Type[nn.Module]] = [nn.Linear],
181
+ ):
182
+ """
183
+ Find all modules of a certain class (or union of classes).
184
+
185
+ Returns all matching modules, along with the parent of those moduless and the
186
+ names they are referenced by.
187
+ """
188
+ # For each target find every linear_class module that isn't a child of a LoraInjectedLinear
189
+ for parent in model.modules():
190
+ for name, module in parent.named_children():
191
+ if any([isinstance(module, _class) for _class in search_class]):
192
+ yield parent, name, module
193
+
194
+
195
+ def _find_modules_v2(
196
+ model,
197
+ ancestor_class: Optional[Set[str]] = None,
198
+ search_class: List[Type[nn.Module]] = [nn.Linear],
199
+ exclude_children_of: Optional[List[Type[nn.Module]]] = [
200
+ LoraInjectedLinear,
201
+ LoraInjectedConv2d,
202
+ ],
203
+ ):
204
+ """
205
+ Find all modules of a certain class (or union of classes) that are direct or
206
+ indirect descendants of other modules of a certain class (or union of classes).
207
+
208
+ Returns all matching modules, along with the parent of those moduless and the
209
+ names they are referenced by.
210
+ """
211
+
212
+ # Get the targets we should replace all linears under
213
+ if ancestor_class is not None:
214
+ ancestors = (
215
+ module
216
+ for module in model.modules()
217
+ if module.__class__.__name__ in ancestor_class
218
+ )
219
+ else:
220
+ # this, incase you want to naively iterate over all modules.
221
+ ancestors = [module for module in model.modules()]
222
+
223
+ # For each target find every linear_class module that isn't a child of a LoraInjectedLinear
224
+ for ancestor in ancestors:
225
+ for fullname, module in ancestor.named_modules():
226
+ if any([isinstance(module, _class) for _class in search_class]):
227
+ # Find the direct parent if this is a descendant, not a child, of target
228
+ *path, name = fullname.split(".")
229
+ parent = ancestor
230
+ while path:
231
+ parent = parent.get_submodule(path.pop(0))
232
+ # Skip this linear if it's a child of a LoraInjectedLinear
233
+ if exclude_children_of and any(
234
+ [isinstance(parent, _class) for _class in exclude_children_of]
235
+ ):
236
+ continue
237
+ # Otherwise, yield it
238
+ yield parent, name, module
239
+
240
+
241
+ def _find_modules_old(
242
+ model,
243
+ ancestor_class: Set[str] = DEFAULT_TARGET_REPLACE,
244
+ search_class: List[Type[nn.Module]] = [nn.Linear],
245
+ exclude_children_of: Optional[List[Type[nn.Module]]] = [LoraInjectedLinear],
246
+ ):
247
+ ret = []
248
+ for _module in model.modules():
249
+ if _module.__class__.__name__ in ancestor_class:
250
+
251
+ for name, _child_module in _module.named_modules():
252
+ if _child_module.__class__ in search_class:
253
+ ret.append((_module, name, _child_module))
254
+ print(ret)
255
+ return ret
256
+
257
+
258
+ _find_modules = _find_modules_v2
259
+
260
+
261
+ def inject_trainable_lora(
262
+ model: nn.Module,
263
+ target_replace_module: Set[str] = DEFAULT_TARGET_REPLACE,
264
+ r: int = 4,
265
+ loras=None, # path to lora .pt
266
+ verbose: bool = False,
267
+ dropout_p: float = 0.0,
268
+ scale: float = 1.0,
269
+ ):
270
+ """
271
+ inject lora into model, and returns lora parameter groups.
272
+ """
273
+
274
+ require_grad_params = []
275
+ names = []
276
+
277
+ if loras != None:
278
+ loras = torch.load(loras)
279
+
280
+ for _module, name, _child_module in _find_modules(
281
+ model, target_replace_module, search_class=[nn.Linear]
282
+ ):
283
+ weight = _child_module.weight
284
+ bias = _child_module.bias
285
+ if verbose:
286
+ print("LoRA Injection : injecting lora into ", name)
287
+ print("LoRA Injection : weight shape", weight.shape)
288
+ _tmp = LoraInjectedLinear(
289
+ _child_module.in_features,
290
+ _child_module.out_features,
291
+ _child_module.bias is not None,
292
+ r=r,
293
+ dropout_p=dropout_p,
294
+ scale=scale,
295
+ )
296
+ _tmp.linear.weight = weight
297
+ if bias is not None:
298
+ _tmp.linear.bias = bias
299
+
300
+ # switch the module
301
+ _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)
302
+ _module._modules[name] = _tmp
303
+
304
+ require_grad_params.append(_module._modules[name].lora_up.parameters())
305
+ require_grad_params.append(_module._modules[name].lora_down.parameters())
306
+
307
+ if loras != None:
308
+ _module._modules[name].lora_up.weight = loras.pop(0)
309
+ _module._modules[name].lora_down.weight = loras.pop(0)
310
+
311
+ _module._modules[name].lora_up.weight.requires_grad = True
312
+ _module._modules[name].lora_down.weight.requires_grad = True
313
+ names.append(name)
314
+
315
+ return require_grad_params, names
316
+
317
+
318
+ def inject_trainable_lora_extended(
319
+ model: nn.Module,
320
+ target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,
321
+ r: int = 4,
322
+ loras=None, # path to lora .pt
323
+ ):
324
+ """
325
+ inject lora into model, and returns lora parameter groups.
326
+ """
327
+
328
+ require_grad_params = []
329
+ names = []
330
+
331
+ if loras != None:
332
+ loras = torch.load(loras)
333
+
334
+ for _module, name, _child_module in _find_modules(
335
+ model, target_replace_module, search_class=[nn.Linear, nn.Conv2d]
336
+ ):
337
+ if _child_module.__class__ == nn.Linear:
338
+ weight = _child_module.weight
339
+ bias = _child_module.bias
340
+ _tmp = LoraInjectedLinear(
341
+ _child_module.in_features,
342
+ _child_module.out_features,
343
+ _child_module.bias is not None,
344
+ r=r,
345
+ )
346
+ _tmp.linear.weight = weight
347
+ if bias is not None:
348
+ _tmp.linear.bias = bias
349
+ elif _child_module.__class__ == nn.Conv2d:
350
+ weight = _child_module.weight
351
+ bias = _child_module.bias
352
+ _tmp = LoraInjectedConv2d(
353
+ _child_module.in_channels,
354
+ _child_module.out_channels,
355
+ _child_module.kernel_size,
356
+ _child_module.stride,
357
+ _child_module.padding,
358
+ _child_module.dilation,
359
+ _child_module.groups,
360
+ _child_module.bias is not None,
361
+ r=r,
362
+ )
363
+
364
+ _tmp.conv.weight = weight
365
+ if bias is not None:
366
+ _tmp.conv.bias = bias
367
+
368
+ # switch the module
369
+ _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)
370
+ if bias is not None:
371
+ _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)
372
+
373
+ _module._modules[name] = _tmp
374
+
375
+ require_grad_params.append(_module._modules[name].lora_up.parameters())
376
+ require_grad_params.append(_module._modules[name].lora_down.parameters())
377
+
378
+ if loras != None:
379
+ _module._modules[name].lora_up.weight = loras.pop(0)
380
+ _module._modules[name].lora_down.weight = loras.pop(0)
381
+
382
+ _module._modules[name].lora_up.weight.requires_grad = True
383
+ _module._modules[name].lora_down.weight.requires_grad = True
384
+ names.append(name)
385
+
386
+ return require_grad_params, names
387
+
388
+
389
+ def extract_lora_ups_down(model, target_replace_module=DEFAULT_TARGET_REPLACE):
390
+
391
+ loras = []
392
+
393
+ for _m, _n, _child_module in _find_modules(
394
+ model,
395
+ target_replace_module,
396
+ search_class=[LoraInjectedLinear, LoraInjectedConv2d],
397
+ ):
398
+ loras.append((_child_module.lora_up, _child_module.lora_down))
399
+
400
+ if len(loras) == 0:
401
+ raise ValueError("No lora injected.")
402
+
403
+ return loras
404
+
405
+
406
+ def extract_lora_as_tensor(
407
+ model, target_replace_module=DEFAULT_TARGET_REPLACE, as_fp16=True, as_bf16=True,
408
+ ):
409
+
410
+ loras = []
411
+
412
+ for _m, _n, _child_module in _find_modules(
413
+ model,
414
+ target_replace_module,
415
+ search_class=[LoraInjectedLinear, LoraInjectedConv2d],
416
+ ):
417
+ up, down = _child_module.realize_as_lora()
418
+ if model.device.type == 'hpu' and as_bf16:
419
+ up = up.to(torch.bfloat16)
420
+ down = down.to(torch.bfloat16)
421
+ elif as_fp16:
422
+ up = up.to(torch.float16)
423
+ down = down.to(torch.float16)
424
+
425
+ loras.append((up, down))
426
+
427
+ if len(loras) == 0:
428
+ raise ValueError("No lora injected.")
429
+
430
+ return loras
431
+
432
+
433
+ def save_lora_weight(
434
+ model,
435
+ path="./lora.pt",
436
+ target_replace_module=DEFAULT_TARGET_REPLACE,
437
+ ):
438
+ weights = []
439
+ for _up, _down in extract_lora_ups_down(
440
+ model, target_replace_module=target_replace_module
441
+ ):
442
+ weights.append(_up.weight.to("cpu").to(torch.float16))
443
+ weights.append(_down.weight.to("cpu").to(torch.float16))
444
+
445
+ torch.save(weights, path)
446
+
447
+
448
+ def save_lora_as_json(model, path="./lora.json"):
449
+ weights = []
450
+ for _up, _down in extract_lora_ups_down(model):
451
+ weights.append(_up.weight.detach().cpu().numpy().tolist())
452
+ weights.append(_down.weight.detach().cpu().numpy().tolist())
453
+
454
+ import json
455
+
456
+ with open(path, "w") as f:
457
+ json.dump(weights, f)
458
+
459
+
460
+ def save_safeloras_with_embeds(
461
+ modelmap: Dict[str, Tuple[nn.Module, Set[str]]] = {},
462
+ embeds: Dict[str, torch.Tensor] = {},
463
+ outpath="./lora.safetensors",
464
+ ):
465
+ """
466
+ Saves the Lora from multiple modules in a single safetensor file.
467
+
468
+ modelmap is a dictionary of {
469
+ "module name": (module, target_replace_module)
470
+ }
471
+ """
472
+ weights = {}
473
+ metadata = {}
474
+
475
+ for name, (model, target_replace_module) in modelmap.items():
476
+ metadata[name] = json.dumps(list(target_replace_module))
477
+
478
+ for i, (_up, _down) in enumerate(
479
+ extract_lora_as_tensor(model, target_replace_module)
480
+ ):
481
+ rank = _down.shape[0]
482
+
483
+ metadata[f"{name}:{i}:rank"] = str(rank)
484
+ weights[f"{name}:{i}:up"] = _up
485
+ weights[f"{name}:{i}:down"] = _down
486
+
487
+ for token, tensor in embeds.items():
488
+ metadata[token] = EMBED_FLAG
489
+ weights[token] = tensor
490
+
491
+ print(f"Saving weights to {outpath}")
492
+ safe_save(weights, outpath, metadata)
493
+
494
+
495
+ def save_safeloras(
496
+ modelmap: Dict[str, Tuple[nn.Module, Set[str]]] = {},
497
+ outpath="./lora.safetensors",
498
+ ):
499
+ return save_safeloras_with_embeds(modelmap=modelmap, outpath=outpath)
500
+
501
+
502
+ def convert_loras_to_safeloras_with_embeds(
503
+ modelmap: Dict[str, Tuple[str, Set[str], int]] = {},
504
+ embeds: Dict[str, torch.Tensor] = {},
505
+ outpath="./lora.safetensors",
506
+ ):
507
+ """
508
+ Converts the Lora from multiple pytorch .pt files into a single safetensor file.
509
+
510
+ modelmap is a dictionary of {
511
+ "module name": (pytorch_model_path, target_replace_module, rank)
512
+ }
513
+ """
514
+
515
+ weights = {}
516
+ metadata = {}
517
+
518
+ for name, (path, target_replace_module, r) in modelmap.items():
519
+ metadata[name] = json.dumps(list(target_replace_module))
520
+
521
+ lora = torch.load(path)
522
+ for i, weight in enumerate(lora):
523
+ is_up = i % 2 == 0
524
+ i = i // 2
525
+
526
+ if is_up:
527
+ metadata[f"{name}:{i}:rank"] = str(r)
528
+ weights[f"{name}:{i}:up"] = weight
529
+ else:
530
+ weights[f"{name}:{i}:down"] = weight
531
+
532
+ for token, tensor in embeds.items():
533
+ metadata[token] = EMBED_FLAG
534
+ weights[token] = tensor
535
+
536
+ print(f"Saving weights to {outpath}")
537
+ safe_save(weights, outpath, metadata)
538
+
539
+
540
+ def convert_loras_to_safeloras(
541
+ modelmap: Dict[str, Tuple[str, Set[str], int]] = {},
542
+ outpath="./lora.safetensors",
543
+ ):
544
+ convert_loras_to_safeloras_with_embeds(modelmap=modelmap, outpath=outpath)
545
+
546
+
547
+ def parse_safeloras(
548
+ safeloras,
549
+ ) -> Dict[str, Tuple[List[nn.parameter.Parameter], List[int], List[str]]]:
550
+ """
551
+ Converts a loaded safetensor file that contains a set of module Loras
552
+ into Parameters and other information
553
+
554
+ Output is a dictionary of {
555
+ "module name": (
556
+ [list of weights],
557
+ [list of ranks],
558
+ target_replacement_modules
559
+ )
560
+ }
561
+ """
562
+ loras = {}
563
+ metadata = safeloras.metadata()
564
+
565
+ get_name = lambda k: k.split(":")[0]
566
+
567
+ keys = list(safeloras.keys())
568
+ keys.sort(key=get_name)
569
+
570
+ for name, module_keys in groupby(keys, get_name):
571
+ info = metadata.get(name)
572
+
573
+ if not info:
574
+ raise ValueError(
575
+ f"Tensor {name} has no metadata - is this a Lora safetensor?"
576
+ )
577
+
578
+ # Skip Textual Inversion embeds
579
+ if info == EMBED_FLAG:
580
+ continue
581
+
582
+ # Handle Loras
583
+ # Extract the targets
584
+ target = json.loads(info)
585
+
586
+ # Build the result lists - Python needs us to preallocate lists to insert into them
587
+ module_keys = list(module_keys)
588
+ ranks = [4] * (len(module_keys) // 2)
589
+ weights = [None] * len(module_keys)
590
+
591
+ for key in module_keys:
592
+ # Split the model name and index out of the key
593
+ _, idx, direction = key.split(":")
594
+ idx = int(idx)
595
+
596
+ # Add the rank
597
+ ranks[idx] = int(metadata[f"{name}:{idx}:rank"])
598
+
599
+ # Insert the weight into the list
600
+ idx = idx * 2 + (1 if direction == "down" else 0)
601
+ weights[idx] = nn.parameter.Parameter(safeloras.get_tensor(key))
602
+
603
+ loras[name] = (weights, ranks, target)
604
+
605
+ return loras
606
+
607
+
608
+ def parse_safeloras_embeds(
609
+ safeloras,
610
+ ) -> Dict[str, torch.Tensor]:
611
+ """
612
+ Converts a loaded safetensor file that contains Textual Inversion embeds into
613
+ a dictionary of embed_token: Tensor
614
+ """
615
+ embeds = {}
616
+ metadata = safeloras.metadata()
617
+
618
+ for key in safeloras.keys():
619
+ # Only handle Textual Inversion embeds
620
+ meta = metadata.get(key)
621
+ if not meta or meta != EMBED_FLAG:
622
+ continue
623
+
624
+ embeds[key] = safeloras.get_tensor(key)
625
+
626
+ return embeds
627
+
628
+
629
+ def load_safeloras(path, device="cpu"):
630
+ safeloras = safe_open(path, framework="pt", device=device)
631
+ return parse_safeloras(safeloras)
632
+
633
+
634
+ def load_safeloras_embeds(path, device="cpu"):
635
+ safeloras = safe_open(path, framework="pt", device=device)
636
+ return parse_safeloras_embeds(safeloras)
637
+
638
+
639
+ def load_safeloras_both(path, device="cpu"):
640
+ safeloras = safe_open(path, framework="pt", device=device)
641
+ return parse_safeloras(safeloras), parse_safeloras_embeds(safeloras)
642
+
643
+
644
+ def collapse_lora(model, alpha=1.0):
645
+
646
+ for _module, name, _child_module in _find_modules(
647
+ model,
648
+ UNET_EXTENDED_TARGET_REPLACE | TEXT_ENCODER_EXTENDED_TARGET_REPLACE,
649
+ search_class=[LoraInjectedLinear, LoraInjectedConv2d],
650
+ ):
651
+
652
+ if isinstance(_child_module, LoraInjectedLinear):
653
+ print("Collapsing Lin Lora in", name)
654
+
655
+ _child_module.linear.weight = nn.Parameter(
656
+ _child_module.linear.weight.data
657
+ + alpha
658
+ * (
659
+ _child_module.lora_up.weight.data
660
+ @ _child_module.lora_down.weight.data
661
+ )
662
+ .type(_child_module.linear.weight.dtype)
663
+ .to(_child_module.linear.weight.device)
664
+ )
665
+
666
+ else:
667
+ print("Collapsing Conv Lora in", name)
668
+ _child_module.conv.weight = nn.Parameter(
669
+ _child_module.conv.weight.data
670
+ + alpha
671
+ * (
672
+ _child_module.lora_up.weight.data.flatten(start_dim=1)
673
+ @ _child_module.lora_down.weight.data.flatten(start_dim=1)
674
+ )
675
+ .reshape(_child_module.conv.weight.data.shape)
676
+ .type(_child_module.conv.weight.dtype)
677
+ .to(_child_module.conv.weight.device)
678
+ )
679
+
680
+
681
+ def monkeypatch_or_replace_lora(
682
+ model,
683
+ loras,
684
+ target_replace_module=DEFAULT_TARGET_REPLACE,
685
+ r: Union[int, List[int]] = 4,
686
+ ):
687
+ for _module, name, _child_module in _find_modules(
688
+ model, target_replace_module, search_class=[nn.Linear, LoraInjectedLinear]
689
+ ):
690
+ _source = (
691
+ _child_module.linear
692
+ if isinstance(_child_module, LoraInjectedLinear)
693
+ else _child_module
694
+ )
695
+
696
+ weight = _source.weight
697
+ bias = _source.bias
698
+ _tmp = LoraInjectedLinear(
699
+ _source.in_features,
700
+ _source.out_features,
701
+ _source.bias is not None,
702
+ r=r.pop(0) if isinstance(r, list) else r,
703
+ )
704
+ _tmp.linear.weight = weight
705
+
706
+ if bias is not None:
707
+ _tmp.linear.bias = bias
708
+
709
+ # switch the module
710
+ _module._modules[name] = _tmp
711
+
712
+ up_weight = loras.pop(0)
713
+ down_weight = loras.pop(0)
714
+
715
+ _module._modules[name].lora_up.weight = nn.Parameter(
716
+ up_weight.type(weight.dtype)
717
+ )
718
+ _module._modules[name].lora_down.weight = nn.Parameter(
719
+ down_weight.type(weight.dtype)
720
+ )
721
+
722
+ _module._modules[name].to(weight.device)
723
+
724
+
725
+ def monkeypatch_or_replace_lora_extended(
726
+ model,
727
+ loras,
728
+ target_replace_module=DEFAULT_TARGET_REPLACE,
729
+ r: Union[int, List[int]] = 4,
730
+ ):
731
+ for _module, name, _child_module in _find_modules(
732
+ model,
733
+ target_replace_module,
734
+ search_class=[nn.Linear, LoraInjectedLinear, nn.Conv2d, LoraInjectedConv2d],
735
+ ):
736
+
737
+ if (_child_module.__class__ == nn.Linear) or (
738
+ _child_module.__class__ == LoraInjectedLinear
739
+ ):
740
+ if len(loras[0].shape) != 2:
741
+ continue
742
+
743
+ _source = (
744
+ _child_module.linear
745
+ if isinstance(_child_module, LoraInjectedLinear)
746
+ else _child_module
747
+ )
748
+
749
+ weight = _source.weight
750
+ bias = _source.bias
751
+ _tmp = LoraInjectedLinear(
752
+ _source.in_features,
753
+ _source.out_features,
754
+ _source.bias is not None,
755
+ r=r.pop(0) if isinstance(r, list) else r,
756
+ )
757
+ _tmp.linear.weight = weight
758
+
759
+ if bias is not None:
760
+ _tmp.linear.bias = bias
761
+
762
+ elif (_child_module.__class__ == nn.Conv2d) or (
763
+ _child_module.__class__ == LoraInjectedConv2d
764
+ ):
765
+ if len(loras[0].shape) != 4:
766
+ continue
767
+ _source = (
768
+ _child_module.conv
769
+ if isinstance(_child_module, LoraInjectedConv2d)
770
+ else _child_module
771
+ )
772
+
773
+ weight = _source.weight
774
+ bias = _source.bias
775
+ _tmp = LoraInjectedConv2d(
776
+ _source.in_channels,
777
+ _source.out_channels,
778
+ _source.kernel_size,
779
+ _source.stride,
780
+ _source.padding,
781
+ _source.dilation,
782
+ _source.groups,
783
+ _source.bias is not None,
784
+ r=r.pop(0) if isinstance(r, list) else r,
785
+ )
786
+
787
+ _tmp.conv.weight = weight
788
+
789
+ if bias is not None:
790
+ _tmp.conv.bias = bias
791
+
792
+ # switch the module
793
+ _module._modules[name] = _tmp
794
+
795
+ up_weight = loras.pop(0)
796
+ down_weight = loras.pop(0)
797
+
798
+ _module._modules[name].lora_up.weight = nn.Parameter(
799
+ up_weight.type(weight.dtype)
800
+ )
801
+ _module._modules[name].lora_down.weight = nn.Parameter(
802
+ down_weight.type(weight.dtype)
803
+ )
804
+
805
+ _module._modules[name].to(weight.device)
806
+
807
+
808
+ def monkeypatch_or_replace_safeloras(models, safeloras):
809
+ loras = parse_safeloras(safeloras)
810
+
811
+ for name, (lora, ranks, target) in loras.items():
812
+ model = getattr(models, name, None)
813
+
814
+ if not model:
815
+ print(f"No model provided for {name}, contained in Lora")
816
+ continue
817
+
818
+ monkeypatch_or_replace_lora_extended(model, lora, target, ranks)
819
+
820
+
821
+ def monkeypatch_remove_lora(model):
822
+ for _module, name, _child_module in _find_modules(
823
+ model, search_class=[LoraInjectedLinear, LoraInjectedConv2d]
824
+ ):
825
+ if isinstance(_child_module, LoraInjectedLinear):
826
+ _source = _child_module.linear
827
+ weight, bias = _source.weight, _source.bias
828
+
829
+ _tmp = nn.Linear(
830
+ _source.in_features, _source.out_features, bias is not None
831
+ )
832
+
833
+ _tmp.weight = weight
834
+ if bias is not None:
835
+ _tmp.bias = bias
836
+
837
+ else:
838
+ _source = _child_module.conv
839
+ weight, bias = _source.weight, _source.bias
840
+
841
+ _tmp = nn.Conv2d(
842
+ in_channels=_source.in_channels,
843
+ out_channels=_source.out_channels,
844
+ kernel_size=_source.kernel_size,
845
+ stride=_source.stride,
846
+ padding=_source.padding,
847
+ dilation=_source.dilation,
848
+ groups=_source.groups,
849
+ bias=bias is not None,
850
+ )
851
+
852
+ _tmp.weight = weight
853
+ if bias is not None:
854
+ _tmp.bias = bias
855
+
856
+ _module._modules[name] = _tmp
857
+
858
+
859
+ def monkeypatch_add_lora(
860
+ model,
861
+ loras,
862
+ target_replace_module=DEFAULT_TARGET_REPLACE,
863
+ alpha: float = 1.0,
864
+ beta: float = 1.0,
865
+ ):
866
+ for _module, name, _child_module in _find_modules(
867
+ model, target_replace_module, search_class=[LoraInjectedLinear]
868
+ ):
869
+ weight = _child_module.linear.weight
870
+
871
+ up_weight = loras.pop(0)
872
+ down_weight = loras.pop(0)
873
+
874
+ _module._modules[name].lora_up.weight = nn.Parameter(
875
+ up_weight.type(weight.dtype).to(weight.device) * alpha
876
+ + _module._modules[name].lora_up.weight.to(weight.device) * beta
877
+ )
878
+ _module._modules[name].lora_down.weight = nn.Parameter(
879
+ down_weight.type(weight.dtype).to(weight.device) * alpha
880
+ + _module._modules[name].lora_down.weight.to(weight.device) * beta
881
+ )
882
+
883
+ _module._modules[name].to(weight.device)
884
+
885
+
886
+ def tune_lora_scale(model, alpha: float = 1.0):
887
+ for _module in model.modules():
888
+ if _module.__class__.__name__ in ["LoraInjectedLinear", "LoraInjectedConv2d"]:
889
+ _module.scale = alpha
890
+
891
+
892
+ def set_lora_diag(model, diag: torch.Tensor):
893
+ for _module in model.modules():
894
+ if _module.__class__.__name__ in ["LoraInjectedLinear", "LoraInjectedConv2d"]:
895
+ _module.set_selector_from_diag(diag)
896
+
897
+
898
+ def _text_lora_path(path: str) -> str:
899
+ assert path.endswith(".pt"), "Only .pt files are supported"
900
+ return ".".join(path.split(".")[:-1] + ["text_encoder", "pt"])
901
+
902
+
903
+ def _ti_lora_path(path: str) -> str:
904
+ assert path.endswith(".pt"), "Only .pt files are supported"
905
+ return ".".join(path.split(".")[:-1] + ["ti", "pt"])
906
+
907
+
908
+ def apply_learned_embed_in_clip(
909
+ learned_embeds,
910
+ text_encoder,
911
+ tokenizer,
912
+ token: Optional[Union[str, List[str]]] = None,
913
+ idempotent=False,
914
+ ):
915
+ if isinstance(token, str):
916
+ trained_tokens = [token]
917
+ elif isinstance(token, list):
918
+ assert len(learned_embeds.keys()) == len(
919
+ token
920
+ ), "The number of tokens and the number of embeds should be the same"
921
+ trained_tokens = token
922
+ else:
923
+ trained_tokens = list(learned_embeds.keys())
924
+
925
+ for token in trained_tokens:
926
+ print(token)
927
+ embeds = learned_embeds[token]
928
+
929
+ # cast to dtype of text_encoder
930
+ dtype = text_encoder.get_input_embeddings().weight.dtype
931
+ num_added_tokens = tokenizer.add_tokens(token)
932
+
933
+ i = 1
934
+ if not idempotent:
935
+ while num_added_tokens == 0:
936
+ print(f"The tokenizer already contains the token {token}.")
937
+ token = f"{token[:-1]}-{i}>"
938
+ print(f"Attempting to add the token {token}.")
939
+ num_added_tokens = tokenizer.add_tokens(token)
940
+ i += 1
941
+ elif num_added_tokens == 0 and idempotent:
942
+ print(f"The tokenizer already contains the token {token}.")
943
+ print(f"Replacing {token} embedding.")
944
+
945
+ # resize the token embeddings
946
+ text_encoder.resize_token_embeddings(len(tokenizer))
947
+
948
+ # get the id for the token and assign the embeds
949
+ token_id = tokenizer.convert_tokens_to_ids(token)
950
+ text_encoder.get_input_embeddings().weight.data[token_id] = embeds
951
+ return token
952
+
953
+
954
+ def load_learned_embed_in_clip(
955
+ learned_embeds_path,
956
+ text_encoder,
957
+ tokenizer,
958
+ token: Optional[Union[str, List[str]]] = None,
959
+ idempotent=False,
960
+ ):
961
+ learned_embeds = torch.load(learned_embeds_path)
962
+ apply_learned_embed_in_clip(
963
+ learned_embeds, text_encoder, tokenizer, token, idempotent
964
+ )
965
+
966
+
967
+ def patch_pipe(
968
+ pipe,
969
+ maybe_unet_path,
970
+ token: Optional[str] = None,
971
+ r: int = 4,
972
+ patch_unet=True,
973
+ patch_text=True,
974
+ patch_ti=True,
975
+ idempotent_token=True,
976
+ unet_target_replace_module=DEFAULT_TARGET_REPLACE,
977
+ text_target_replace_module=TEXT_ENCODER_DEFAULT_TARGET_REPLACE,
978
+ ):
979
+ if maybe_unet_path.endswith(".pt"):
980
+ # torch format
981
+
982
+ if maybe_unet_path.endswith(".ti.pt"):
983
+ unet_path = maybe_unet_path[:-6] + ".pt"
984
+ elif maybe_unet_path.endswith(".text_encoder.pt"):
985
+ unet_path = maybe_unet_path[:-16] + ".pt"
986
+ else:
987
+ unet_path = maybe_unet_path
988
+
989
+ ti_path = _ti_lora_path(unet_path)
990
+ text_path = _text_lora_path(unet_path)
991
+
992
+ if patch_unet:
993
+ print("LoRA : Patching Unet")
994
+ monkeypatch_or_replace_lora(
995
+ pipe.unet,
996
+ torch.load(unet_path),
997
+ r=r,
998
+ target_replace_module=unet_target_replace_module,
999
+ )
1000
+
1001
+ if patch_text:
1002
+ print("LoRA : Patching text encoder")
1003
+ monkeypatch_or_replace_lora(
1004
+ pipe.text_encoder,
1005
+ torch.load(text_path),
1006
+ target_replace_module=text_target_replace_module,
1007
+ r=r,
1008
+ )
1009
+ if patch_ti:
1010
+ print("LoRA : Patching token input")
1011
+ token = load_learned_embed_in_clip(
1012
+ ti_path,
1013
+ pipe.text_encoder,
1014
+ pipe.tokenizer,
1015
+ token=token,
1016
+ idempotent=idempotent_token,
1017
+ )
1018
+
1019
+ elif maybe_unet_path.endswith(".safetensors"):
1020
+ safeloras = safe_open(maybe_unet_path, framework="pt", device="cpu")
1021
+ monkeypatch_or_replace_safeloras(pipe, safeloras)
1022
+ tok_dict = parse_safeloras_embeds(safeloras)
1023
+ if patch_ti:
1024
+ apply_learned_embed_in_clip(
1025
+ tok_dict,
1026
+ pipe.text_encoder,
1027
+ pipe.tokenizer,
1028
+ token=token,
1029
+ idempotent=idempotent_token,
1030
+ )
1031
+ return tok_dict
1032
+
1033
+
1034
+ @torch.no_grad()
1035
+ def inspect_lora(model):
1036
+ moved = {}
1037
+
1038
+ for name, _module in model.named_modules():
1039
+ if _module.__class__.__name__ in ["LoraInjectedLinear", "LoraInjectedConv2d"]:
1040
+ ups = _module.lora_up.weight.data.clone()
1041
+ downs = _module.lora_down.weight.data.clone()
1042
+
1043
+ wght: torch.Tensor = ups.flatten(1) @ downs.flatten(1)
1044
+
1045
+ dist = wght.flatten().abs().mean().item()
1046
+ if name in moved:
1047
+ moved[name].append(dist)
1048
+ else:
1049
+ moved[name] = [dist]
1050
+
1051
+ return moved
1052
+
1053
+
1054
+ def save_all(
1055
+ unet,
1056
+ text_encoder,
1057
+ save_path,
1058
+ placeholder_token_ids=None,
1059
+ placeholder_tokens=None,
1060
+ save_lora=True,
1061
+ save_ti=True,
1062
+ target_replace_module_text=TEXT_ENCODER_DEFAULT_TARGET_REPLACE,
1063
+ target_replace_module_unet=DEFAULT_TARGET_REPLACE,
1064
+ safe_form=True,
1065
+ ):
1066
+ if not safe_form:
1067
+ # save ti
1068
+ if save_ti:
1069
+ ti_path = _ti_lora_path(save_path)
1070
+ learned_embeds_dict = {}
1071
+ for tok, tok_id in zip(placeholder_tokens, placeholder_token_ids):
1072
+ learned_embeds = text_encoder.get_input_embeddings().weight[tok_id]
1073
+ print(
1074
+ f"Current Learned Embeddings for {tok}:, id {tok_id} ",
1075
+ learned_embeds[:4],
1076
+ )
1077
+ learned_embeds_dict[tok] = learned_embeds.detach().cpu()
1078
+
1079
+ torch.save(learned_embeds_dict, ti_path)
1080
+ print("Ti saved to ", ti_path)
1081
+
1082
+ # save text encoder
1083
+ if save_lora:
1084
+
1085
+ save_lora_weight(
1086
+ unet, save_path, target_replace_module=target_replace_module_unet
1087
+ )
1088
+ print("Unet saved to ", save_path)
1089
+
1090
+ save_lora_weight(
1091
+ text_encoder,
1092
+ _text_lora_path(save_path),
1093
+ target_replace_module=target_replace_module_text,
1094
+ )
1095
+ print("Text Encoder saved to ", _text_lora_path(save_path))
1096
+
1097
+ else:
1098
+ assert save_path.endswith(
1099
+ ".safetensors"
1100
+ ), f"Save path : {save_path} should end with .safetensors"
1101
+
1102
+ loras = {}
1103
+ embeds = {}
1104
+
1105
+ if save_lora:
1106
+
1107
+ loras["unet"] = (unet, target_replace_module_unet)
1108
+ loras["text_encoder"] = (text_encoder, target_replace_module_text)
1109
+
1110
+ if save_ti:
1111
+ for tok, tok_id in zip(placeholder_tokens, placeholder_token_ids):
1112
+ learned_embeds = text_encoder.get_input_embeddings().weight[tok_id]
1113
+ print(
1114
+ f"Current Learned Embeddings for {tok}:, id {tok_id} ",
1115
+ learned_embeds[:4],
1116
+ )
1117
+ embeds[tok] = learned_embeds.detach().cpu()
1118
+
1119
+ save_safeloras_with_embeds(loras, embeds, save_path)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/lora_manager.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+ from safetensors import safe_open
4
+ from diffusers import StableDiffusionPipeline
5
+ from .lora import (
6
+ monkeypatch_or_replace_safeloras,
7
+ apply_learned_embed_in_clip,
8
+ set_lora_diag,
9
+ parse_safeloras_embeds,
10
+ )
11
+
12
+
13
+ def lora_join(lora_safetenors: list):
14
+ metadatas = [dict(safelora.metadata()) for safelora in lora_safetenors]
15
+ _total_metadata = {}
16
+ total_metadata = {}
17
+ total_tensor = {}
18
+ total_rank = 0
19
+ ranklist = []
20
+ for _metadata in metadatas:
21
+ rankset = []
22
+ for k, v in _metadata.items():
23
+ if k.endswith("rank"):
24
+ rankset.append(int(v))
25
+
26
+ assert len(set(rankset)) <= 1, "Rank should be the same per model"
27
+ if len(rankset) == 0:
28
+ rankset = [0]
29
+
30
+ total_rank += rankset[0]
31
+ _total_metadata.update(_metadata)
32
+ ranklist.append(rankset[0])
33
+
34
+ # remove metadata about tokens
35
+ for k, v in _total_metadata.items():
36
+ if v != "<embed>":
37
+ total_metadata[k] = v
38
+
39
+ tensorkeys = set()
40
+ for safelora in lora_safetenors:
41
+ tensorkeys.update(safelora.keys())
42
+
43
+ for keys in tensorkeys:
44
+ if keys.startswith("text_encoder") or keys.startswith("unet"):
45
+ tensorset = [safelora.get_tensor(keys) for safelora in lora_safetenors]
46
+
47
+ is_down = keys.endswith("down")
48
+
49
+ if is_down:
50
+ _tensor = torch.cat(tensorset, dim=0)
51
+ assert _tensor.shape[0] == total_rank
52
+ else:
53
+ _tensor = torch.cat(tensorset, dim=1)
54
+ assert _tensor.shape[1] == total_rank
55
+
56
+ total_tensor[keys] = _tensor
57
+ keys_rank = ":".join(keys.split(":")[:-1]) + ":rank"
58
+ total_metadata[keys_rank] = str(total_rank)
59
+ token_size_list = []
60
+ for idx, safelora in enumerate(lora_safetenors):
61
+ tokens = [k for k, v in safelora.metadata().items() if v == "<embed>"]
62
+ for jdx, token in enumerate(sorted(tokens)):
63
+
64
+ total_tensor[f"<s{idx}-{jdx}>"] = safelora.get_tensor(token)
65
+ total_metadata[f"<s{idx}-{jdx}>"] = "<embed>"
66
+
67
+ print(f"Embedding {token} replaced to <s{idx}-{jdx}>")
68
+
69
+ token_size_list.append(len(tokens))
70
+
71
+ return total_tensor, total_metadata, ranklist, token_size_list
72
+
73
+
74
+ class DummySafeTensorObject:
75
+ def __init__(self, tensor: dict, metadata):
76
+ self.tensor = tensor
77
+ self._metadata = metadata
78
+
79
+ def keys(self):
80
+ return self.tensor.keys()
81
+
82
+ def metadata(self):
83
+ return self._metadata
84
+
85
+ def get_tensor(self, key):
86
+ return self.tensor[key]
87
+
88
+
89
+ class LoRAManager:
90
+ def __init__(self, lora_paths_list: List[str], pipe: StableDiffusionPipeline):
91
+
92
+ self.lora_paths_list = lora_paths_list
93
+ self.pipe = pipe
94
+ self._setup()
95
+
96
+ def _setup(self):
97
+
98
+ self._lora_safetenors = [
99
+ safe_open(path, framework="pt", device="cpu")
100
+ for path in self.lora_paths_list
101
+ ]
102
+
103
+ (
104
+ total_tensor,
105
+ total_metadata,
106
+ self.ranklist,
107
+ self.token_size_list,
108
+ ) = lora_join(self._lora_safetenors)
109
+
110
+ self.total_safelora = DummySafeTensorObject(total_tensor, total_metadata)
111
+
112
+ monkeypatch_or_replace_safeloras(self.pipe, self.total_safelora)
113
+ tok_dict = parse_safeloras_embeds(self.total_safelora)
114
+
115
+ apply_learned_embed_in_clip(
116
+ tok_dict,
117
+ self.pipe.text_encoder,
118
+ self.pipe.tokenizer,
119
+ token=None,
120
+ idempotent=True,
121
+ )
122
+
123
+ def tune(self, scales):
124
+
125
+ assert len(scales) == len(
126
+ self.ranklist
127
+ ), "Scale list should be the same length as ranklist"
128
+
129
+ diags = []
130
+ for scale, rank in zip(scales, self.ranklist):
131
+ diags = diags + [scale] * rank
132
+
133
+ set_lora_diag(self.pipe.unet, torch.tensor(diags))
134
+
135
+ def prompt(self, prompt):
136
+ if prompt is not None:
137
+ for idx, tok_size in enumerate(self.token_size_list):
138
+ prompt = prompt.replace(
139
+ f"<{idx + 1}>",
140
+ "".join([f"<s{idx}-{jdx}>" for jdx in range(tok_size)]),
141
+ )
142
+ # TODO : Rescale LoRA + Text inputs based on prompt scale params
143
+
144
+ return prompt
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/preprocess_files.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Have SwinIR upsample
2
+ # Have BLIP auto caption
3
+ # Have CLIPSeg auto mask concept
4
+
5
+ from typing import List, Literal, Union, Optional, Tuple
6
+ import os
7
+ from PIL import Image, ImageFilter
8
+ import torch
9
+ import numpy as np
10
+ import fire
11
+ from tqdm import tqdm
12
+ import glob
13
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
14
+
15
+
16
+ @torch.no_grad()
17
+ def swin_ir_sr(
18
+ images: List[Image.Image],
19
+ model_id: Literal[
20
+ "caidas/swin2SR-classical-sr-x2-64", "caidas/swin2SR-classical-sr-x4-48"
21
+ ] = "caidas/swin2SR-classical-sr-x2-64",
22
+ target_size: Optional[Tuple[int, int]] = None,
23
+ device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
24
+ **kwargs,
25
+ ) -> List[Image.Image]:
26
+ """
27
+ Upscales images using SwinIR. Returns a list of PIL images.
28
+ """
29
+ # So this is currently in main branch, so this can be used in the future I guess?
30
+ from transformers import Swin2SRForImageSuperResolution, Swin2SRImageProcessor
31
+
32
+ model = Swin2SRForImageSuperResolution.from_pretrained(
33
+ model_id,
34
+ ).to(device)
35
+ processor = Swin2SRImageProcessor()
36
+
37
+ out_images = []
38
+
39
+ for image in tqdm(images):
40
+
41
+ ori_w, ori_h = image.size
42
+ if target_size is not None:
43
+ if ori_w >= target_size[0] and ori_h >= target_size[1]:
44
+ out_images.append(image)
45
+ continue
46
+
47
+ inputs = processor(image, return_tensors="pt").to(device)
48
+ with torch.no_grad():
49
+ outputs = model(**inputs)
50
+
51
+ output = (
52
+ outputs.reconstruction.data.squeeze().float().cpu().clamp_(0, 1).numpy()
53
+ )
54
+ output = np.moveaxis(output, source=0, destination=-1)
55
+ output = (output * 255.0).round().astype(np.uint8)
56
+ output = Image.fromarray(output)
57
+
58
+ out_images.append(output)
59
+
60
+ return out_images
61
+
62
+
63
+ @torch.no_grad()
64
+ def clipseg_mask_generator(
65
+ images: List[Image.Image],
66
+ target_prompts: Union[List[str], str],
67
+ model_id: Literal[
68
+ "CIDAS/clipseg-rd64-refined", "CIDAS/clipseg-rd16"
69
+ ] = "CIDAS/clipseg-rd64-refined",
70
+ device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
71
+ bias: float = 0.01,
72
+ temp: float = 1.0,
73
+ **kwargs,
74
+ ) -> List[Image.Image]:
75
+ """
76
+ Returns a greyscale mask for each image, where the mask is the probability of the target prompt being present in the image
77
+ """
78
+
79
+ if isinstance(target_prompts, str):
80
+ print(
81
+ f'Warning: only one target prompt "{target_prompts}" was given, so it will be used for all images'
82
+ )
83
+
84
+ target_prompts = [target_prompts] * len(images)
85
+
86
+ processor = CLIPSegProcessor.from_pretrained(model_id)
87
+ model = CLIPSegForImageSegmentation.from_pretrained(model_id).to(device)
88
+
89
+ masks = []
90
+
91
+ for image, prompt in tqdm(zip(images, target_prompts)):
92
+
93
+ original_size = image.size
94
+
95
+ inputs = processor(
96
+ text=[prompt, ""],
97
+ images=[image] * 2,
98
+ padding="max_length",
99
+ truncation=True,
100
+ return_tensors="pt",
101
+ ).to(device)
102
+
103
+ outputs = model(**inputs)
104
+
105
+ logits = outputs.logits
106
+ probs = torch.nn.functional.softmax(logits / temp, dim=0)[0]
107
+ probs = (probs + bias).clamp_(0, 1)
108
+ probs = 255 * probs / probs.max()
109
+
110
+ # make mask greyscale
111
+ mask = Image.fromarray(probs.cpu().numpy()).convert("L")
112
+
113
+ # resize mask to original size
114
+ mask = mask.resize(original_size)
115
+
116
+ masks.append(mask)
117
+
118
+ return masks
119
+
120
+
121
+ @torch.no_grad()
122
+ def blip_captioning_dataset(
123
+ images: List[Image.Image],
124
+ text: Optional[str] = None,
125
+ model_id: Literal[
126
+ "Salesforce/blip-image-captioning-large",
127
+ "Salesforce/blip-image-captioning-base",
128
+ ] = "Salesforce/blip-image-captioning-large",
129
+ device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
130
+ **kwargs,
131
+ ) -> List[str]:
132
+ """
133
+ Returns a list of captions for the given images
134
+ """
135
+
136
+ from transformers import BlipProcessor, BlipForConditionalGeneration
137
+
138
+ processor = BlipProcessor.from_pretrained(model_id)
139
+ model = BlipForConditionalGeneration.from_pretrained(model_id).to(device)
140
+ captions = []
141
+
142
+ for image in tqdm(images):
143
+ inputs = processor(image, text=text, return_tensors="pt").to("cuda")
144
+ out = model.generate(
145
+ **inputs, max_length=150, do_sample=True, top_k=50, temperature=0.7
146
+ )
147
+ caption = processor.decode(out[0], skip_special_tokens=True)
148
+
149
+ captions.append(caption)
150
+
151
+ return captions
152
+
153
+
154
+ def face_mask_google_mediapipe(
155
+ images: List[Image.Image], blur_amount: float = 80.0, bias: float = 0.05
156
+ ) -> List[Image.Image]:
157
+ """
158
+ Returns a list of images with mask on the face parts.
159
+ """
160
+ import mediapipe as mp
161
+
162
+ mp_face_detection = mp.solutions.face_detection
163
+
164
+ face_detection = mp_face_detection.FaceDetection(
165
+ model_selection=1, min_detection_confidence=0.5
166
+ )
167
+
168
+ masks = []
169
+ for image in tqdm(images):
170
+
171
+ image = np.array(image)
172
+
173
+ results = face_detection.process(image)
174
+ black_image = np.ones((image.shape[0], image.shape[1]), dtype=np.uint8)
175
+
176
+ if results.detections:
177
+
178
+ for detection in results.detections:
179
+
180
+ x_min = int(
181
+ detection.location_data.relative_bounding_box.xmin * image.shape[1]
182
+ )
183
+ y_min = int(
184
+ detection.location_data.relative_bounding_box.ymin * image.shape[0]
185
+ )
186
+ width = int(
187
+ detection.location_data.relative_bounding_box.width * image.shape[1]
188
+ )
189
+ height = int(
190
+ detection.location_data.relative_bounding_box.height
191
+ * image.shape[0]
192
+ )
193
+
194
+ # draw the colored rectangle
195
+ black_image[y_min : y_min + height, x_min : x_min + width] = 255
196
+
197
+ black_image = Image.fromarray(black_image)
198
+ masks.append(black_image)
199
+
200
+ return masks
201
+
202
+
203
+ def _crop_to_square(
204
+ image: Image.Image, com: List[Tuple[int, int]], resize_to: Optional[int] = None
205
+ ):
206
+ cx, cy = com
207
+ width, height = image.size
208
+ if width > height:
209
+ left_possible = max(cx - height / 2, 0)
210
+ left = min(left_possible, width - height)
211
+ right = left + height
212
+ top = 0
213
+ bottom = height
214
+ else:
215
+ left = 0
216
+ right = width
217
+ top_possible = max(cy - width / 2, 0)
218
+ top = min(top_possible, height - width)
219
+ bottom = top + width
220
+
221
+ image = image.crop((left, top, right, bottom))
222
+
223
+ if resize_to:
224
+ image = image.resize((resize_to, resize_to), Image.Resampling.LANCZOS)
225
+
226
+ return image
227
+
228
+
229
+ def _center_of_mass(mask: Image.Image):
230
+ """
231
+ Returns the center of mass of the mask
232
+ """
233
+ x, y = np.meshgrid(np.arange(mask.size[0]), np.arange(mask.size[1]))
234
+
235
+ x_ = x * np.array(mask)
236
+ y_ = y * np.array(mask)
237
+
238
+ x = np.sum(x_) / np.sum(mask)
239
+ y = np.sum(y_) / np.sum(mask)
240
+
241
+ return x, y
242
+
243
+
244
+ def load_and_save_masks_and_captions(
245
+ files: Union[str, List[str]],
246
+ output_dir: str,
247
+ caption_text: Optional[str] = None,
248
+ target_prompts: Optional[Union[List[str], str]] = None,
249
+ target_size: int = 512,
250
+ crop_based_on_salience: bool = True,
251
+ use_face_detection_instead: bool = False,
252
+ temp: float = 1.0,
253
+ n_length: int = -1,
254
+ ):
255
+ """
256
+ Loads images from the given files, generates masks for them, and saves the masks and captions and upscale images
257
+ to output dir.
258
+ """
259
+ os.makedirs(output_dir, exist_ok=True)
260
+
261
+ # load images
262
+ if isinstance(files, str):
263
+ # check if it is a directory
264
+ if os.path.isdir(files):
265
+ # get all the .png .jpg in the directory
266
+ files = glob.glob(os.path.join(files, "*.png")) + glob.glob(
267
+ os.path.join(files, "*.jpg")
268
+ )
269
+
270
+ if len(files) == 0:
271
+ raise Exception(
272
+ f"No files found in {files}. Either {files} is not a directory or it does not contain any .png or .jpg files."
273
+ )
274
+ if n_length == -1:
275
+ n_length = len(files)
276
+ files = sorted(files)[:n_length]
277
+
278
+ images = [Image.open(file) for file in files]
279
+
280
+ # captions
281
+ print(f"Generating {len(images)} captions...")
282
+ captions = blip_captioning_dataset(images, text=caption_text)
283
+
284
+ if target_prompts is None:
285
+ target_prompts = captions
286
+
287
+ print(f"Generating {len(images)} masks...")
288
+ if not use_face_detection_instead:
289
+ seg_masks = clipseg_mask_generator(
290
+ images=images, target_prompts=target_prompts, temp=temp
291
+ )
292
+ else:
293
+ seg_masks = face_mask_google_mediapipe(images=images)
294
+
295
+ # find the center of mass of the mask
296
+ if crop_based_on_salience:
297
+ coms = [_center_of_mass(mask) for mask in seg_masks]
298
+ else:
299
+ coms = [(image.size[0] / 2, image.size[1] / 2) for image in images]
300
+ # based on the center of mass, crop the image to a square
301
+ images = [
302
+ _crop_to_square(image, com, resize_to=None) for image, com in zip(images, coms)
303
+ ]
304
+
305
+ print(f"Upscaling {len(images)} images...")
306
+ # upscale images anyways
307
+ images = swin_ir_sr(images, target_size=(target_size, target_size))
308
+ images = [
309
+ image.resize((target_size, target_size), Image.Resampling.LANCZOS)
310
+ for image in images
311
+ ]
312
+
313
+ seg_masks = [
314
+ _crop_to_square(mask, com, resize_to=target_size)
315
+ for mask, com in zip(seg_masks, coms)
316
+ ]
317
+ with open(os.path.join(output_dir, "caption.txt"), "w") as f:
318
+ # save images and masks
319
+ for idx, (image, mask, caption) in enumerate(zip(images, seg_masks, captions)):
320
+ image.save(os.path.join(output_dir, f"{idx}.src.jpg"), quality=99)
321
+ mask.save(os.path.join(output_dir, f"{idx}.mask.png"))
322
+
323
+ f.write(caption + "\n")
324
+
325
+
326
+ def main():
327
+ fire.Fire(load_and_save_masks_and_captions)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/safe_open.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pure python version of Safetensors safe_open
3
+ From https://gist.github.com/Narsil/3edeec2669a5e94e4707aa0f901d2282
4
+ """
5
+
6
+ import json
7
+ import mmap
8
+ import os
9
+
10
+ import torch
11
+
12
+
13
+ class SafetensorsWrapper:
14
+ def __init__(self, metadata, tensors):
15
+ self._metadata = metadata
16
+ self._tensors = tensors
17
+
18
+ def metadata(self):
19
+ return self._metadata
20
+
21
+ def keys(self):
22
+ return self._tensors.keys()
23
+
24
+ def get_tensor(self, k):
25
+ return self._tensors[k]
26
+
27
+
28
+ DTYPES = {
29
+ "F32": torch.float32,
30
+ "F16": torch.float16,
31
+ "BF16": torch.bfloat16,
32
+ }
33
+
34
+
35
+ def create_tensor(storage, info, offset):
36
+ dtype = DTYPES[info["dtype"]]
37
+ shape = info["shape"]
38
+ start, stop = info["data_offsets"]
39
+ return (
40
+ torch.asarray(storage[start + offset : stop + offset], dtype=torch.uint8)
41
+ .view(dtype=dtype)
42
+ .reshape(shape)
43
+ )
44
+
45
+
46
+ def safe_open(filename, framework="pt", device="cpu"):
47
+ if framework != "pt":
48
+ raise ValueError("`framework` must be 'pt'")
49
+
50
+ with open(filename, mode="r", encoding="utf8") as file_obj:
51
+ with mmap.mmap(file_obj.fileno(), length=0, access=mmap.ACCESS_READ) as m:
52
+ header = m.read(8)
53
+ n = int.from_bytes(header, "little")
54
+ metadata_bytes = m.read(n)
55
+ metadata = json.loads(metadata_bytes)
56
+
57
+ size = os.stat(filename).st_size
58
+ storage = torch.ByteStorage.from_file(filename, shared=False, size=size).untyped()
59
+ offset = n + 8
60
+
61
+ return SafetensorsWrapper(
62
+ metadata=metadata.get("__metadata__", {}),
63
+ tensors={
64
+ name: create_tensor(storage, info, offset).to(device)
65
+ for name, info in metadata.items()
66
+ if name != "__metadata__"
67
+ },
68
+ )
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/to_ckpt_v2.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from https://gist.github.com/jachiam/8a5c0b607e38fcc585168b90c686eb05
2
+ # Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
3
+ # *Only* converts the UNet, VAE, and Text Encoder.
4
+ # Does not convert optimizer state or any other thing.
5
+ # Written by jachiam
6
+ import argparse
7
+ import os.path as osp
8
+
9
+ import torch
10
+
11
+
12
+ # =================#
13
+ # UNet Conversion #
14
+ # =================#
15
+
16
+ unet_conversion_map = [
17
+ # (stable-diffusion, HF Diffusers)
18
+ ("time_embed.0.weight", "time_embedding.linear_1.weight"),
19
+ ("time_embed.0.bias", "time_embedding.linear_1.bias"),
20
+ ("time_embed.2.weight", "time_embedding.linear_2.weight"),
21
+ ("time_embed.2.bias", "time_embedding.linear_2.bias"),
22
+ ("input_blocks.0.0.weight", "conv_in.weight"),
23
+ ("input_blocks.0.0.bias", "conv_in.bias"),
24
+ ("out.0.weight", "conv_norm_out.weight"),
25
+ ("out.0.bias", "conv_norm_out.bias"),
26
+ ("out.2.weight", "conv_out.weight"),
27
+ ("out.2.bias", "conv_out.bias"),
28
+ ]
29
+
30
+ unet_conversion_map_resnet = [
31
+ # (stable-diffusion, HF Diffusers)
32
+ ("in_layers.0", "norm1"),
33
+ ("in_layers.2", "conv1"),
34
+ ("out_layers.0", "norm2"),
35
+ ("out_layers.3", "conv2"),
36
+ ("emb_layers.1", "time_emb_proj"),
37
+ ("skip_connection", "conv_shortcut"),
38
+ ]
39
+
40
+ unet_conversion_map_layer = []
41
+ # hardcoded number of downblocks and resnets/attentions...
42
+ # would need smarter logic for other networks.
43
+ for i in range(4):
44
+ # loop over downblocks/upblocks
45
+
46
+ for j in range(2):
47
+ # loop over resnets/attentions for downblocks
48
+ hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
49
+ sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
50
+ unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
51
+
52
+ if i < 3:
53
+ # no attention layers in down_blocks.3
54
+ hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
55
+ sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
56
+ unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
57
+
58
+ for j in range(3):
59
+ # loop over resnets/attentions for upblocks
60
+ hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
61
+ sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
62
+ unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
63
+
64
+ if i > 0:
65
+ # no attention layers in up_blocks.0
66
+ hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
67
+ sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
68
+ unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
69
+
70
+ if i < 3:
71
+ # no downsample in down_blocks.3
72
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
73
+ sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
74
+ unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
75
+
76
+ # no upsample in up_blocks.3
77
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
78
+ sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
79
+ unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
80
+
81
+ hf_mid_atn_prefix = "mid_block.attentions.0."
82
+ sd_mid_atn_prefix = "middle_block.1."
83
+ unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
84
+
85
+ for j in range(2):
86
+ hf_mid_res_prefix = f"mid_block.resnets.{j}."
87
+ sd_mid_res_prefix = f"middle_block.{2*j}."
88
+ unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
89
+
90
+
91
+ def convert_unet_state_dict(unet_state_dict):
92
+ # buyer beware: this is a *brittle* function,
93
+ # and correct output requires that all of these pieces interact in
94
+ # the exact order in which I have arranged them.
95
+ mapping = {k: k for k in unet_state_dict.keys()}
96
+ for sd_name, hf_name in unet_conversion_map:
97
+ mapping[hf_name] = sd_name
98
+ for k, v in mapping.items():
99
+ if "resnets" in k:
100
+ for sd_part, hf_part in unet_conversion_map_resnet:
101
+ v = v.replace(hf_part, sd_part)
102
+ mapping[k] = v
103
+ for k, v in mapping.items():
104
+ for sd_part, hf_part in unet_conversion_map_layer:
105
+ v = v.replace(hf_part, sd_part)
106
+ mapping[k] = v
107
+ new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
108
+ return new_state_dict
109
+
110
+
111
+ # ================#
112
+ # VAE Conversion #
113
+ # ================#
114
+
115
+ vae_conversion_map = [
116
+ # (stable-diffusion, HF Diffusers)
117
+ ("nin_shortcut", "conv_shortcut"),
118
+ ("norm_out", "conv_norm_out"),
119
+ ("mid.attn_1.", "mid_block.attentions.0."),
120
+ ]
121
+
122
+ for i in range(4):
123
+ # down_blocks have two resnets
124
+ for j in range(2):
125
+ hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
126
+ sd_down_prefix = f"encoder.down.{i}.block.{j}."
127
+ vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
128
+
129
+ if i < 3:
130
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
131
+ sd_downsample_prefix = f"down.{i}.downsample."
132
+ vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
133
+
134
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
135
+ sd_upsample_prefix = f"up.{3-i}.upsample."
136
+ vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
137
+
138
+ # up_blocks have three resnets
139
+ # also, up blocks in hf are numbered in reverse from sd
140
+ for j in range(3):
141
+ hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
142
+ sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
143
+ vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
144
+
145
+ # this part accounts for mid blocks in both the encoder and the decoder
146
+ for i in range(2):
147
+ hf_mid_res_prefix = f"mid_block.resnets.{i}."
148
+ sd_mid_res_prefix = f"mid.block_{i+1}."
149
+ vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
150
+
151
+
152
+ vae_conversion_map_attn = [
153
+ # (stable-diffusion, HF Diffusers)
154
+ ("norm.", "group_norm."),
155
+ ("q.", "query."),
156
+ ("k.", "key."),
157
+ ("v.", "value."),
158
+ ("proj_out.", "proj_attn."),
159
+ ]
160
+
161
+
162
+ def reshape_weight_for_sd(w):
163
+ # convert HF linear weights to SD conv2d weights
164
+ return w.reshape(*w.shape, 1, 1)
165
+
166
+
167
+ def convert_vae_state_dict(vae_state_dict):
168
+ mapping = {k: k for k in vae_state_dict.keys()}
169
+ for k, v in mapping.items():
170
+ for sd_part, hf_part in vae_conversion_map:
171
+ v = v.replace(hf_part, sd_part)
172
+ mapping[k] = v
173
+ for k, v in mapping.items():
174
+ if "attentions" in k:
175
+ for sd_part, hf_part in vae_conversion_map_attn:
176
+ v = v.replace(hf_part, sd_part)
177
+ mapping[k] = v
178
+ new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
179
+ weights_to_convert = ["q", "k", "v", "proj_out"]
180
+ for k, v in new_state_dict.items():
181
+ for weight_name in weights_to_convert:
182
+ if f"mid.attn_1.{weight_name}.weight" in k:
183
+ print(f"Reshaping {k} for SD format")
184
+ new_state_dict[k] = reshape_weight_for_sd(v)
185
+ return new_state_dict
186
+
187
+
188
+ # =========================#
189
+ # Text Encoder Conversion #
190
+ # =========================#
191
+ # pretty much a no-op
192
+
193
+
194
+ def convert_text_enc_state_dict(text_enc_dict):
195
+ return text_enc_dict
196
+
197
+
198
+ def convert_to_ckpt(model_path, checkpoint_path, as_half):
199
+
200
+ assert model_path is not None, "Must provide a model path!"
201
+
202
+ assert checkpoint_path is not None, "Must provide a checkpoint path!"
203
+
204
+ unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin")
205
+ vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin")
206
+ text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin")
207
+
208
+ # Convert the UNet model
209
+ unet_state_dict = torch.load(unet_path, map_location="cpu")
210
+ unet_state_dict = convert_unet_state_dict(unet_state_dict)
211
+ unet_state_dict = {
212
+ "model.diffusion_model." + k: v for k, v in unet_state_dict.items()
213
+ }
214
+
215
+ # Convert the VAE model
216
+ vae_state_dict = torch.load(vae_path, map_location="cpu")
217
+ vae_state_dict = convert_vae_state_dict(vae_state_dict)
218
+ vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
219
+
220
+ # Convert the text encoder model
221
+ text_enc_dict = torch.load(text_enc_path, map_location="cpu")
222
+ text_enc_dict = convert_text_enc_state_dict(text_enc_dict)
223
+ text_enc_dict = {
224
+ "cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()
225
+ }
226
+
227
+ # Put together new checkpoint
228
+ state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
229
+ if as_half:
230
+ state_dict = {k: v.half() for k, v in state_dict.items()}
231
+ state_dict = {"state_dict": state_dict}
232
+ torch.save(state_dict, checkpoint_path)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/utils.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ from typing import List, Union
5
+
6
+ import torch
7
+ from PIL import Image
8
+ from transformers import (
9
+ CLIPProcessor,
10
+ CLIPTextModelWithProjection,
11
+ CLIPTokenizer,
12
+ CLIPVisionModelWithProjection,
13
+ )
14
+
15
+ from diffusers import StableDiffusionPipeline
16
+ from .lora import patch_pipe, tune_lora_scale, _text_lora_path, _ti_lora_path
17
+ import os
18
+ import glob
19
+ import math
20
+
21
+ EXAMPLE_PROMPTS = [
22
+ "<obj> swimming in a pool",
23
+ "<obj> at a beach with a view of seashore",
24
+ "<obj> in times square",
25
+ "<obj> wearing sunglasses",
26
+ "<obj> in a construction outfit",
27
+ "<obj> playing with a ball",
28
+ "<obj> wearing headphones",
29
+ "<obj> oil painting ghibli inspired",
30
+ "<obj> working on the laptop",
31
+ "<obj> with mountains and sunset in background",
32
+ "Painting of <obj> at a beach by artist claude monet",
33
+ "<obj> digital painting 3d render geometric style",
34
+ "A screaming <obj>",
35
+ "A depressed <obj>",
36
+ "A sleeping <obj>",
37
+ "A sad <obj>",
38
+ "A joyous <obj>",
39
+ "A frowning <obj>",
40
+ "A sculpture of <obj>",
41
+ "<obj> near a pool",
42
+ "<obj> at a beach with a view of seashore",
43
+ "<obj> in a garden",
44
+ "<obj> in grand canyon",
45
+ "<obj> floating in ocean",
46
+ "<obj> and an armchair",
47
+ "A maple tree on the side of <obj>",
48
+ "<obj> and an orange sofa",
49
+ "<obj> with chocolate cake on it",
50
+ "<obj> with a vase of rose flowers on it",
51
+ "A digital illustration of <obj>",
52
+ "Georgia O'Keeffe style <obj> painting",
53
+ "A watercolor painting of <obj> on a beach",
54
+ ]
55
+
56
+
57
+ def image_grid(_imgs, rows=None, cols=None):
58
+
59
+ if rows is None and cols is None:
60
+ rows = cols = math.ceil(len(_imgs) ** 0.5)
61
+
62
+ if rows is None:
63
+ rows = math.ceil(len(_imgs) / cols)
64
+ if cols is None:
65
+ cols = math.ceil(len(_imgs) / rows)
66
+
67
+ w, h = _imgs[0].size
68
+ grid = Image.new("RGB", size=(cols * w, rows * h))
69
+ grid_w, grid_h = grid.size
70
+
71
+ for i, img in enumerate(_imgs):
72
+ grid.paste(img, box=(i % cols * w, i // cols * h))
73
+ return grid
74
+
75
+
76
+ def text_img_alignment(img_embeds, text_embeds, target_img_embeds):
77
+ # evaluation inspired from textual inversion paper
78
+ # https://arxiv.org/abs/2208.01618
79
+
80
+ # text alignment
81
+ assert img_embeds.shape[0] == text_embeds.shape[0]
82
+ text_img_sim = (img_embeds * text_embeds).sum(dim=-1) / (
83
+ img_embeds.norm(dim=-1) * text_embeds.norm(dim=-1)
84
+ )
85
+
86
+ # image alignment
87
+ img_embed_normalized = img_embeds / img_embeds.norm(dim=-1, keepdim=True)
88
+
89
+ avg_target_img_embed = (
90
+ (target_img_embeds / target_img_embeds.norm(dim=-1, keepdim=True))
91
+ .mean(dim=0)
92
+ .unsqueeze(0)
93
+ .repeat(img_embeds.shape[0], 1)
94
+ )
95
+
96
+ img_img_sim = (img_embed_normalized * avg_target_img_embed).sum(dim=-1)
97
+
98
+ return {
99
+ "text_alignment_avg": text_img_sim.mean().item(),
100
+ "image_alignment_avg": img_img_sim.mean().item(),
101
+ "text_alignment_all": text_img_sim.tolist(),
102
+ "image_alignment_all": img_img_sim.tolist(),
103
+ }
104
+
105
+
106
+ def prepare_clip_model_sets(eval_clip_id: str = "openai/clip-vit-large-patch14"):
107
+ text_model = CLIPTextModelWithProjection.from_pretrained(eval_clip_id)
108
+ tokenizer = CLIPTokenizer.from_pretrained(eval_clip_id)
109
+ vis_model = CLIPVisionModelWithProjection.from_pretrained(eval_clip_id)
110
+ processor = CLIPProcessor.from_pretrained(eval_clip_id)
111
+
112
+ return text_model, tokenizer, vis_model, processor
113
+
114
+
115
+ def evaluate_pipe(
116
+ pipe,
117
+ target_images: List[Image.Image],
118
+ class_token: str = "",
119
+ learnt_token: str = "",
120
+ guidance_scale: float = 5.0,
121
+ seed=0,
122
+ clip_model_sets=None,
123
+ eval_clip_id: str = "openai/clip-vit-large-patch14",
124
+ n_test: int = 10,
125
+ n_step: int = 50,
126
+ ):
127
+
128
+ if clip_model_sets is not None:
129
+ text_model, tokenizer, vis_model, processor = clip_model_sets
130
+ else:
131
+ text_model, tokenizer, vis_model, processor = prepare_clip_model_sets(
132
+ eval_clip_id
133
+ )
134
+
135
+ images = []
136
+ prompts = []
137
+ img_embeds = []
138
+ text_embeds = []
139
+ for prompt in EXAMPLE_PROMPTS[:n_test]:
140
+ prompt = prompt.replace("<obj>", learnt_token)
141
+ torch.manual_seed(seed)
142
+ with torch.autocast("cuda"):
143
+ img = pipe(
144
+ prompt, num_inference_steps=n_step, guidance_scale=guidance_scale
145
+ ).images[0]
146
+ images.append(img)
147
+ prompts.append(prompt)
148
+ # image
149
+ inputs = processor(images=img, return_tensors="pt")
150
+ img_embed = vis_model(**inputs).image_embeds
151
+ img_embeds.append(img_embed)
152
+
153
+ prompt = prompt.replace(learnt_token, class_token)
154
+ # prompts
155
+ inputs = tokenizer([prompt], padding=True, return_tensors="pt")
156
+ outputs = text_model(**inputs)
157
+ text_embed = outputs.text_embeds
158
+ text_embeds.append(text_embed)
159
+
160
+ # target images
161
+ inputs = processor(images=target_images, return_tensors="pt")
162
+ target_img_embeds = vis_model(**inputs).image_embeds
163
+
164
+ img_embeds = torch.cat(img_embeds, dim=0)
165
+ text_embeds = torch.cat(text_embeds, dim=0)
166
+
167
+ return text_img_alignment(img_embeds, text_embeds, target_img_embeds),list(zip(images,prompts))
168
+
169
+
170
+ def visualize_progress(
171
+ path_alls: Union[str, List[str]],
172
+ prompt: str,
173
+ model_id: str = "runwayml/stable-diffusion-v1-5",
174
+ device="cuda:0",
175
+ patch_unet=True,
176
+ patch_text=True,
177
+ patch_ti=True,
178
+ unet_scale=1.0,
179
+ text_sclae=1.0,
180
+ num_inference_steps=50,
181
+ guidance_scale=5.0,
182
+ offset: int = 0,
183
+ limit: int = 10,
184
+ seed: int = 0,
185
+ ):
186
+
187
+ imgs = []
188
+ if isinstance(path_alls, str):
189
+ alls = list(set(glob.glob(path_alls)))
190
+
191
+ alls.sort(key=os.path.getmtime)
192
+ else:
193
+ alls = path_alls
194
+
195
+ pipe = StableDiffusionPipeline.from_pretrained(
196
+ model_id, torch_dtype=torch.float16
197
+ ).to(device)
198
+
199
+ print(f"Found {len(alls)} checkpoints")
200
+ for path in alls[offset:limit]:
201
+ print(path)
202
+
203
+ patch_pipe(
204
+ pipe, path, patch_unet=patch_unet, patch_text=patch_text, patch_ti=patch_ti
205
+ )
206
+
207
+ tune_lora_scale(pipe.unet, unet_scale)
208
+ tune_lora_scale(pipe.text_encoder, text_sclae)
209
+
210
+ torch.manual_seed(seed)
211
+ image = pipe(
212
+ prompt,
213
+ num_inference_steps=num_inference_steps,
214
+ guidance_scale=guidance_scale,
215
+ ).images[0]
216
+ imgs.append(image)
217
+
218
+ return imgs
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/lora_diffusion/xformers_utils.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import torch
4
+ from diffusers.models.attention import BasicTransformerBlock
5
+ from diffusers.utils.import_utils import is_xformers_available
6
+
7
+ from .lora import LoraInjectedLinear
8
+
9
+ if is_xformers_available():
10
+ import xformers
11
+ import xformers.ops
12
+ else:
13
+ xformers = None
14
+
15
+
16
+ @functools.cache
17
+ def test_xformers_backwards(size):
18
+ @torch.enable_grad()
19
+ def _grad(size):
20
+ q = torch.randn((1, 4, size), device="cuda")
21
+ k = torch.randn((1, 4, size), device="cuda")
22
+ v = torch.randn((1, 4, size), device="cuda")
23
+
24
+ q = q.detach().requires_grad_()
25
+ k = k.detach().requires_grad_()
26
+ v = v.detach().requires_grad_()
27
+
28
+ out = xformers.ops.memory_efficient_attention(q, k, v)
29
+ loss = out.sum(2).mean(0).sum()
30
+
31
+ return torch.autograd.grad(loss, v)
32
+
33
+ try:
34
+ _grad(size)
35
+ print(size, "pass")
36
+ return True
37
+ except Exception as e:
38
+ print(size, "fail")
39
+ return False
40
+
41
+
42
+ def set_use_memory_efficient_attention_xformers(
43
+ module: torch.nn.Module, valid: bool
44
+ ) -> None:
45
+ def fn_test_dim_head(module: torch.nn.Module):
46
+ if isinstance(module, BasicTransformerBlock):
47
+ # dim_head isn't stored anywhere, so back-calculate
48
+ source = module.attn1.to_v
49
+ if isinstance(source, LoraInjectedLinear):
50
+ source = source.linear
51
+
52
+ dim_head = source.out_features // module.attn1.heads
53
+
54
+ result = test_xformers_backwards(dim_head)
55
+
56
+ # If dim_head > dim_head_max, turn xformers off
57
+ if not result:
58
+ module.set_use_memory_efficient_attention_xformers(False)
59
+
60
+ for child in module.children():
61
+ fn_test_dim_head(child)
62
+
63
+ if not is_xformers_available() and valid:
64
+ print("XFormers is not available. Skipping.")
65
+ return
66
+
67
+ module.set_use_memory_efficient_attention_xformers(valid)
68
+
69
+ if valid:
70
+ fn_test_dim_head(module)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lightning==2.1.2
2
+ lightning-habana==1.3.0
3
+ diffusers>=0.11.0,<0.21.0
4
+ transformers>=4.25.1
5
+ scipy
6
+ ftfy
7
+ fire
8
+ wandb
9
+ safetensors
10
+ opencv-python
11
+ torchvision
12
+ mediapipe
13
+ triton
14
+ tensorboard
15
+ accelerate
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/setup.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pkg_resources
4
+ from setuptools import find_packages, setup
5
+
6
+ setup(
7
+ name="lora_diffusion",
8
+ py_modules=["lora_diffusion"],
9
+ version="0.1.7",
10
+ description="Low Rank Adaptation for Diffusion Models. Works with Stable Diffusion out-of-the-box.",
11
+ author="Simo Ryu",
12
+ packages=find_packages(),
13
+ entry_points={
14
+ "console_scripts": [
15
+ "lora_add = lora_diffusion.cli_lora_add:main",
16
+ "lora_pti = lora_diffusion.cli_lora_pti:main",
17
+ "lora_distill = lora_diffusion.cli_svd:main",
18
+ "lora_ppim = lora_diffusion.preprocess_files:main",
19
+ ],
20
+ },
21
+ install_requires=[
22
+ str(r)
23
+ for r in pkg_resources.parse_requirements(
24
+ open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
25
+ )
26
+ ],
27
+ include_package_data=True,
28
+ )
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/stability_ft.sh ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ #!/bin/bash
5
+ export PATH=$PATH:~/.local/bin
6
+ #export WANDB_API_KEY=
7
+ export MODEL_NAME="stabilityai/stable-diffusion-2-1-base"
8
+ export INSTANCE_DIR='data/bo'
9
+ export OUTPUT_DIR='results/bo'
10
+ #export MASK_DIR=$SM_CHANNEL_MASK
11
+ export TRAIN_STEPS_TUNING=1000
12
+ export SM_MODEL_DIR=${OUTPUT_DIR}
13
+ mkdir -p $OUTPUT_DIR
14
+
15
+ lora_pti \
16
+ --pretrained_model_name_or_path=$MODEL_NAME \
17
+ --instance_data_dir=$INSTANCE_DIR \
18
+ --output_dir=$OUTPUT_DIR \
19
+ --use_face_segmentation_condition \
20
+ --resolution=512 \
21
+ --train_batch_size=7 \
22
+ --gradient_accumulation_steps=1 \
23
+ --learning_rate_unet=5e-5 \
24
+ --learning_rate_ti=2e-3 \
25
+ --color_jitter \
26
+ --lr_scheduler="linear" --lr_scheduler_lora="linear"\
27
+ --lr_warmup_steps=0 \
28
+ --placeholder_tokens="<s1>|<s2>" \
29
+ --use_template="object"\
30
+ --save_steps=50 \
31
+ --max_train_steps_ti=500 \
32
+ --max_train_steps_tuning=$TRAIN_STEPS_TUNING \
33
+ --perform_inversion=True \
34
+ --clip_ti_decay \
35
+ --weight_decay_ti=0.000 \
36
+ --weight_decay_lora=0.001\
37
+ --continue_inversion \
38
+ --continue_inversion_lr=1e-3 \
39
+ --device="hpu" \
40
+ --lora_rank=16 \
41
+ --use_lazy_mode=True \
42
+ --use_fused_adamw=True \
43
+ --print_freq=50 \
44
+ --use_fused_clip_norm=True \
45
+ 2>&1 |tee log_1x_ft.txt
46
+
47
+
48
+
49
+ cp $OUTPUT_DIR/step_$TRAIN_STEPS_TUNING.safetensors $SM_MODEL_DIR/lora.safetensors
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/stability_ft_cpu.sh ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ #!/bin/bash
5
+ export PATH=$PATH:~/.local/bin
6
+ #export WANDB_API_KEY=
7
+ export MODEL_NAME="stabilityai/stable-diffusion-2-1-base"
8
+ export INSTANCE_DIR='data/bo'
9
+ export OUTPUT_DIR='results/bo'
10
+ #export MASK_DIR=$SM_CHANNEL_MASK
11
+ export TRAIN_STEPS_TI=5
12
+ export TRAIN_STEPS_TUNING=10
13
+ export SM_MODEL_DIR=${OUTPUT_DIR}
14
+ mkdir -p $OUTPUT_DIR
15
+
16
+ lora_pti \
17
+ --pretrained_model_name_or_path=$MODEL_NAME \
18
+ --instance_data_dir=$INSTANCE_DIR \
19
+ --output_dir=$OUTPUT_DIR \
20
+ --use_face_segmentation_condition \
21
+ --resolution=512 \
22
+ --train_batch_size=2 \
23
+ --gradient_accumulation_steps=1 \
24
+ --learning_rate_unet=5e-5 \
25
+ --learning_rate_ti=2e-3 \
26
+ --color_jitter \
27
+ --lr_scheduler="linear" --lr_scheduler_lora="linear"\
28
+ --lr_warmup_steps=0 \
29
+ --placeholder_tokens="<s1>|<s2>" \
30
+ --use_template="object"\
31
+ --save_steps=3 \
32
+ --max_train_steps_ti=$TRAIN_STEPS_TI \
33
+ --max_train_steps_tuning=$TRAIN_STEPS_TUNING \
34
+ --perform_inversion=True \
35
+ --clip_ti_decay \
36
+ --weight_decay_ti=0.000 \
37
+ --weight_decay_lora=0.001\
38
+ --continue_inversion \
39
+ --continue_inversion_lr=1e-3 \
40
+ --device="cpu" \
41
+ --lora_rank=16 \
42
+ --print_freq=2 \
43
+ 2>&1 |tee log_1x_ft_cpu.txt
44
+
45
+
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-finetuning/stability_ft_tc.sh ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ #!/bin/bash
5
+ export PATH=$PATH:~/.local/bin
6
+ #export WANDB_API_KEY=
7
+ export MODEL_NAME="stabilityai/stable-diffusion-2-1-base"
8
+ export INSTANCE_DIR='data/bo'
9
+ export OUTPUT_DIR='results/bo'
10
+ #export MASK_DIR=$SM_CHANNEL_MASK
11
+ export TRAIN_STEPS_TUNING=1000
12
+ export SM_MODEL_DIR=${OUTPUT_DIR}
13
+ mkdir -p $OUTPUT_DIR
14
+
15
+ PT_HPU_LAZY_MODE=0 \
16
+ lora_pti \
17
+ --pretrained_model_name_or_path=$MODEL_NAME \
18
+ --instance_data_dir=$INSTANCE_DIR \
19
+ --output_dir=$OUTPUT_DIR \
20
+ --use_face_segmentation_condition \
21
+ --resolution=512 \
22
+ --train_batch_size=7 \
23
+ --gradient_accumulation_steps=1 \
24
+ --learning_rate_unet=5e-5 \
25
+ --learning_rate_ti=2e-3 \
26
+ --color_jitter \
27
+ --lr_scheduler="linear" --lr_scheduler_lora="linear"\
28
+ --lr_warmup_steps=0 \
29
+ --placeholder_tokens="<s1>|<s2>" \
30
+ --use_template="object"\
31
+ --save_steps=50 \
32
+ --max_train_steps_ti=500 \
33
+ --max_train_steps_tuning=$TRAIN_STEPS_TUNING \
34
+ --perform_inversion=True \
35
+ --clip_ti_decay \
36
+ --weight_decay_ti=0.000 \
37
+ --weight_decay_lora=0.001\
38
+ --continue_inversion \
39
+ --continue_inversion_lr=1e-3 \
40
+ --device="hpu" \
41
+ --lora_rank=16 \
42
+ --use_fused_adamw=True \
43
+ --use_lazy_mode=False \
44
+ --print_freq=1 \
45
+ --use_fused_clip_norm=True \
46
+ --use_torch_compile=True \
47
+ 2>&1 |tee log_1x_ft_hpu_compile.txt
48
+
49
+
50
+
51
+ cp $OUTPUT_DIR/step_$TRAIN_STEPS_TUNING.safetensors $SM_MODEL_DIR/lora.safetensors
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/configs/stable-diffusion/v2-inference.yaml ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-4
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False # we set this to false because this is an inference only config
19
+
20
+ unet_config:
21
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22
+ params:
23
+ use_checkpoint: True
24
+ use_fp16: False
25
+ image_size: 32 # unused
26
+ in_channels: 4
27
+ out_channels: 4
28
+ model_channels: 320
29
+ attention_resolutions: [ 4, 2, 1 ]
30
+ num_res_blocks: 2
31
+ channel_mult: [ 1, 2, 4, 4 ]
32
+ num_head_channels: 64 # need to fix for flash-attn
33
+ use_spatial_transformer: True
34
+ use_linear_in_transformer: True
35
+ transformer_depth: 1
36
+ context_dim: 1024
37
+ legacy: False
38
+
39
+ first_stage_config:
40
+ target: ldm.models.autoencoder.AutoencoderKL
41
+ params:
42
+ embed_dim: 4
43
+ monitor: val/rec_loss
44
+ ddconfig:
45
+ #attn_type: "vanilla-xformers"
46
+ double_z: true
47
+ z_channels: 4
48
+ resolution: 256
49
+ in_channels: 3
50
+ out_ch: 3
51
+ ch: 128
52
+ ch_mult:
53
+ - 1
54
+ - 2
55
+ - 4
56
+ - 4
57
+ num_res_blocks: 2
58
+ attn_resolutions: []
59
+ dropout: 0.0
60
+ lossconfig:
61
+ target: torch.nn.Identity
62
+
63
+ cond_stage_config:
64
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65
+ params:
66
+ freeze: True
67
+ layer: "penultimate"
68
+ device: "hpu"
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/configs/stable-diffusion/v2-inpainting-inference.yaml ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 5.0e-05
3
+ target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: hybrid
16
+ scale_factor: 0.18215
17
+ monitor: val/loss_simple_ema
18
+ finetune_keys: null
19
+ use_ema: False
20
+
21
+ unet_config:
22
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23
+ params:
24
+ use_checkpoint: True
25
+ image_size: 32 # unused
26
+ in_channels: 9
27
+ out_channels: 4
28
+ model_channels: 320
29
+ attention_resolutions: [ 4, 2, 1 ]
30
+ num_res_blocks: 2
31
+ channel_mult: [ 1, 2, 4, 4 ]
32
+ num_head_channels: 64 # need to fix for flash-attn
33
+ use_spatial_transformer: True
34
+ use_linear_in_transformer: True
35
+ transformer_depth: 1
36
+ context_dim: 1024
37
+ legacy: False
38
+
39
+ first_stage_config:
40
+ target: ldm.models.autoencoder.AutoencoderKL
41
+ params:
42
+ embed_dim: 4
43
+ monitor: val/rec_loss
44
+ ddconfig:
45
+ #attn_type: "vanilla-xformers"
46
+ double_z: true
47
+ z_channels: 4
48
+ resolution: 256
49
+ in_channels: 3
50
+ out_ch: 3
51
+ ch: 128
52
+ ch_mult:
53
+ - 1
54
+ - 2
55
+ - 4
56
+ - 4
57
+ num_res_blocks: 2
58
+ attn_resolutions: [ ]
59
+ dropout: 0.0
60
+ lossconfig:
61
+ target: torch.nn.Identity
62
+
63
+ cond_stage_config:
64
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65
+ params:
66
+ freeze: True
67
+ layer: "penultimate"
68
+
69
+
70
+ data:
71
+ target: ldm.data.laion.WebDataModuleFromConfig
72
+ params:
73
+ tar_base: null # for concat as in LAION-A
74
+ p_unsafe_threshold: 0.1
75
+ filter_word_list: "data/filters.yaml"
76
+ max_pwatermark: 0.45
77
+ batch_size: 8
78
+ num_workers: 6
79
+ multinode: True
80
+ min_size: 512
81
+ train:
82
+ shards:
83
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-0/{00000..18699}.tar -"
84
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-1/{00000..18699}.tar -"
85
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-2/{00000..18699}.tar -"
86
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-3/{00000..18699}.tar -"
87
+ - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-4/{00000..18699}.tar -" #{00000-94333}.tar"
88
+ shuffle: 10000
89
+ image_key: jpg
90
+ image_transforms:
91
+ - target: torchvision.transforms.Resize
92
+ params:
93
+ size: 512
94
+ interpolation: 3
95
+ - target: torchvision.transforms.RandomCrop
96
+ params:
97
+ size: 512
98
+ postprocess:
99
+ target: ldm.data.laion.AddMask
100
+ params:
101
+ mode: "512train-large"
102
+ p_drop: 0.25
103
+ # NOTE use enough shards to avoid empty validation loops in workers
104
+ validation:
105
+ shards:
106
+ - "pipe:aws s3 cp s3://deep-floyd-s3/datasets/laion_cleaned-part5/{93001..94333}.tar - "
107
+ shuffle: 0
108
+ image_key: jpg
109
+ image_transforms:
110
+ - target: torchvision.transforms.Resize
111
+ params:
112
+ size: 512
113
+ interpolation: 3
114
+ - target: torchvision.transforms.CenterCrop
115
+ params:
116
+ size: 512
117
+ postprocess:
118
+ target: ldm.data.laion.AddMask
119
+ params:
120
+ mode: "512train-large"
121
+ p_drop: 0.25
122
+
123
+ lightning:
124
+ find_unused_parameters: True
125
+ modelcheckpoint:
126
+ params:
127
+ every_n_train_steps: 5000
128
+
129
+ callbacks:
130
+ metrics_over_trainsteps_checkpoint:
131
+ params:
132
+ every_n_train_steps: 10000
133
+
134
+ image_logger:
135
+ target: main.ImageLogger
136
+ params:
137
+ enable_autocast: False
138
+ disabled: False
139
+ batch_frequency: 1000
140
+ max_images: 4
141
+ increase_log_steps: False
142
+ log_first_step: False
143
+ log_images_kwargs:
144
+ use_ema_scope: False
145
+ inpaint: False
146
+ plot_progressive_rows: False
147
+ plot_diffusion_rows: False
148
+ N: 4
149
+ unconditional_guidance_scale: 5.0
150
+ unconditional_guidance_label: [""]
151
+ ddim_steps: 50 # todo check these out for depth2img,
152
+ ddim_eta: 0.0 # todo check these out for depth2img,
153
+
154
+ trainer:
155
+ benchmark: True
156
+ val_check_interval: 5000000
157
+ num_sanity_val_steps: 0
158
+ accumulate_grad_batches: 1
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/configs/stable-diffusion/v2-midas-inference.yaml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 5.0e-07
3
+ target: ldm.models.diffusion.ddpm.LatentDepth2ImageDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: hybrid
16
+ scale_factor: 0.18215
17
+ monitor: val/loss_simple_ema
18
+ finetune_keys: null
19
+ use_ema: False
20
+
21
+ depth_stage_config:
22
+ target: ldm.modules.midas.api.MiDaSInference
23
+ params:
24
+ model_type: "dpt_hybrid"
25
+
26
+ unet_config:
27
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
28
+ params:
29
+ use_checkpoint: True
30
+ image_size: 32 # unused
31
+ in_channels: 5
32
+ out_channels: 4
33
+ model_channels: 320
34
+ attention_resolutions: [ 4, 2, 1 ]
35
+ num_res_blocks: 2
36
+ channel_mult: [ 1, 2, 4, 4 ]
37
+ num_head_channels: 64 # need to fix for flash-attn
38
+ use_spatial_transformer: True
39
+ use_linear_in_transformer: True
40
+ transformer_depth: 1
41
+ context_dim: 1024
42
+ legacy: False
43
+
44
+ first_stage_config:
45
+ target: ldm.models.autoencoder.AutoencoderKL
46
+ params:
47
+ embed_dim: 4
48
+ monitor: val/rec_loss
49
+ ddconfig:
50
+ #attn_type: "vanilla-xformers"
51
+ double_z: true
52
+ z_channels: 4
53
+ resolution: 256
54
+ in_channels: 3
55
+ out_ch: 3
56
+ ch: 128
57
+ ch_mult:
58
+ - 1
59
+ - 2
60
+ - 4
61
+ - 4
62
+ num_res_blocks: 2
63
+ attn_resolutions: [ ]
64
+ dropout: 0.0
65
+ lossconfig:
66
+ target: torch.nn.Identity
67
+
68
+ cond_stage_config:
69
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
70
+ params:
71
+ freeze: True
72
+ layer: "penultimate"
73
+
74
+
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/configs/stable-diffusion/x4-upscaling.yaml ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentUpscaleDiffusion
4
+ params:
5
+ parameterization: "v"
6
+ low_scale_key: "lr"
7
+ linear_start: 0.0001
8
+ linear_end: 0.02
9
+ num_timesteps_cond: 1
10
+ log_every_t: 200
11
+ timesteps: 1000
12
+ first_stage_key: "jpg"
13
+ cond_stage_key: "txt"
14
+ image_size: 128
15
+ channels: 4
16
+ cond_stage_trainable: false
17
+ conditioning_key: "hybrid-adm"
18
+ monitor: val/loss_simple_ema
19
+ scale_factor: 0.08333
20
+ use_ema: False
21
+
22
+ low_scale_config:
23
+ target: ldm.modules.diffusionmodules.upscaling.ImageConcatWithNoiseAugmentation
24
+ params:
25
+ noise_schedule_config: # image space
26
+ linear_start: 0.0001
27
+ linear_end: 0.02
28
+ max_noise_level: 350
29
+
30
+ unet_config:
31
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
32
+ params:
33
+ use_checkpoint: True
34
+ num_classes: 1000 # timesteps for noise conditioning (here constant, just need one)
35
+ image_size: 128
36
+ in_channels: 7
37
+ out_channels: 4
38
+ model_channels: 256
39
+ attention_resolutions: [ 2,4,8]
40
+ num_res_blocks: 2
41
+ channel_mult: [ 1, 2, 2, 4]
42
+ disable_self_attentions: [True, True, True, False]
43
+ disable_middle_self_attn: False
44
+ num_heads: 8
45
+ use_spatial_transformer: True
46
+ transformer_depth: 1
47
+ context_dim: 1024
48
+ legacy: False
49
+ use_linear_in_transformer: True
50
+
51
+ first_stage_config:
52
+ target: ldm.models.autoencoder.AutoencoderKL
53
+ params:
54
+ embed_dim: 4
55
+ ddconfig:
56
+ # attn_type: "vanilla-xformers" this model needs efficient attention to be feasible on HR data, also the decoder seems to break in half precision (UNet is fine though)
57
+ double_z: True
58
+ z_channels: 4
59
+ resolution: 256
60
+ in_channels: 3
61
+ out_ch: 3
62
+ ch: 128
63
+ ch_mult: [ 1,2,4 ] # num_down = len(ch_mult)-1
64
+ num_res_blocks: 2
65
+ attn_resolutions: [ ]
66
+ dropout: 0.0
67
+
68
+ lossconfig:
69
+ target: torch.nn.Identity
70
+
71
+ cond_stage_config:
72
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
73
+ params:
74
+ freeze: True
75
+ layer: "penultimate"
76
+
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/data/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/data/util.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ldm.modules.midas.api import load_midas_transform
4
+
5
+
6
+ class AddMiDaS(object):
7
+ def __init__(self, model_type):
8
+ super().__init__()
9
+ self.transform = load_midas_transform(model_type)
10
+
11
+ def pt2np(self, x):
12
+ x = ((x + 1.0) * .5).detach().cpu().numpy()
13
+ return x
14
+
15
+ def np2pt(self, x):
16
+ x = torch.from_numpy(x) * 2 - 1.
17
+ return x
18
+
19
+ def __call__(self, sample):
20
+ # sample['jpg'] is tensor hwc in [-1, 1] at this point
21
+ x = self.pt2np(sample['jpg'])
22
+ x = self.transform({"image": x})["image"]
23
+ sample['midas_in'] = x
24
+ return sample
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/autoencoder.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pytorch_lightning as pl
3
+ import torch.nn.functional as F
4
+ from contextlib import contextmanager
5
+
6
+ from ldm.modules.diffusionmodules.model import Encoder, Decoder
7
+ from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
8
+
9
+ from ldm.util import instantiate_from_config
10
+ from ldm.modules.ema import LitEma
11
+
12
+
13
+ class AutoencoderKL(pl.LightningModule):
14
+ def __init__(self,
15
+ ddconfig,
16
+ lossconfig,
17
+ embed_dim,
18
+ ckpt_path=None,
19
+ ignore_keys=[],
20
+ image_key="image",
21
+ colorize_nlabels=None,
22
+ monitor=None,
23
+ ema_decay=None,
24
+ learn_logvar=False
25
+ ):
26
+ super().__init__()
27
+ self.learn_logvar = learn_logvar
28
+ self.image_key = image_key
29
+ self.encoder = Encoder(**ddconfig)
30
+ self.decoder = Decoder(**ddconfig)
31
+ self.loss = instantiate_from_config(lossconfig)
32
+ assert ddconfig["double_z"]
33
+ self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
34
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
35
+ self.embed_dim = embed_dim
36
+ if colorize_nlabels is not None:
37
+ assert type(colorize_nlabels)==int
38
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
39
+ if monitor is not None:
40
+ self.monitor = monitor
41
+
42
+ self.use_ema = ema_decay is not None
43
+ if self.use_ema:
44
+ self.ema_decay = ema_decay
45
+ assert 0. < ema_decay < 1.
46
+ self.model_ema = LitEma(self, decay=ema_decay)
47
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
48
+
49
+ if ckpt_path is not None:
50
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
51
+
52
+ def init_from_ckpt(self, path, ignore_keys=list()):
53
+ sd = torch.load(path, map_location="cpu")["state_dict"]
54
+ keys = list(sd.keys())
55
+ for k in keys:
56
+ for ik in ignore_keys:
57
+ if k.startswith(ik):
58
+ print("Deleting key {} from state_dict.".format(k))
59
+ del sd[k]
60
+ self.load_state_dict(sd, strict=False)
61
+ print(f"Restored from {path}")
62
+
63
+ @contextmanager
64
+ def ema_scope(self, context=None):
65
+ if self.use_ema:
66
+ self.model_ema.store(self.parameters())
67
+ self.model_ema.copy_to(self)
68
+ if context is not None:
69
+ print(f"{context}: Switched to EMA weights")
70
+ try:
71
+ yield None
72
+ finally:
73
+ if self.use_ema:
74
+ self.model_ema.restore(self.parameters())
75
+ if context is not None:
76
+ print(f"{context}: Restored training weights")
77
+
78
+ def on_train_batch_end(self, *args, **kwargs):
79
+ if self.use_ema:
80
+ self.model_ema(self)
81
+
82
+ def encode(self, x):
83
+ h = self.encoder(x)
84
+ moments = self.quant_conv(h)
85
+ posterior = DiagonalGaussianDistribution(moments)
86
+ return posterior
87
+
88
+ def decode(self, z):
89
+ z = self.post_quant_conv(z)
90
+ dec = self.decoder(z)
91
+ return dec
92
+
93
+ def forward(self, input, sample_posterior=True):
94
+ posterior = self.encode(input)
95
+ if sample_posterior:
96
+ z = posterior.sample()
97
+ else:
98
+ z = posterior.mode()
99
+ dec = self.decode(z)
100
+ return dec, posterior
101
+
102
+ def get_input(self, batch, k):
103
+ x = batch[k]
104
+ if len(x.shape) == 3:
105
+ x = x[..., None]
106
+ x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
107
+ return x
108
+
109
+ def training_step(self, batch, batch_idx, optimizer_idx):
110
+ inputs = self.get_input(batch, self.image_key)
111
+ reconstructions, posterior = self(inputs)
112
+
113
+ if optimizer_idx == 0:
114
+ # train encoder+decoder+logvar
115
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
116
+ last_layer=self.get_last_layer(), split="train")
117
+ self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
118
+ self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
119
+ return aeloss
120
+
121
+ if optimizer_idx == 1:
122
+ # train the discriminator
123
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
124
+ last_layer=self.get_last_layer(), split="train")
125
+
126
+ self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
127
+ self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
128
+ return discloss
129
+
130
+ def validation_step(self, batch, batch_idx):
131
+ log_dict = self._validation_step(batch, batch_idx)
132
+ with self.ema_scope():
133
+ log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
134
+ return log_dict
135
+
136
+ def _validation_step(self, batch, batch_idx, postfix=""):
137
+ inputs = self.get_input(batch, self.image_key)
138
+ reconstructions, posterior = self(inputs)
139
+ aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
140
+ last_layer=self.get_last_layer(), split="val"+postfix)
141
+
142
+ discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
143
+ last_layer=self.get_last_layer(), split="val"+postfix)
144
+
145
+ self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
146
+ self.log_dict(log_dict_ae)
147
+ self.log_dict(log_dict_disc)
148
+ return self.log_dict
149
+
150
+ def configure_optimizers(self):
151
+ lr = self.learning_rate
152
+ ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
153
+ self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
154
+ if self.learn_logvar:
155
+ print(f"{self.__class__.__name__}: Learning logvar")
156
+ ae_params_list.append(self.loss.logvar)
157
+ opt_ae = torch.optim.Adam(ae_params_list,
158
+ lr=lr, betas=(0.5, 0.9))
159
+ opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
160
+ lr=lr, betas=(0.5, 0.9))
161
+ return [opt_ae, opt_disc], []
162
+
163
+ def get_last_layer(self):
164
+ return self.decoder.conv_out.weight
165
+
166
+ @torch.no_grad()
167
+ def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
168
+ log = dict()
169
+ x = self.get_input(batch, self.image_key)
170
+ x = x.to(self.device)
171
+ if not only_inputs:
172
+ xrec, posterior = self(x)
173
+ if x.shape[1] > 3:
174
+ # colorize with random projection
175
+ assert xrec.shape[1] > 3
176
+ x = self.to_rgb(x)
177
+ xrec = self.to_rgb(xrec)
178
+ log["samples"] = self.decode(torch.randn_like(posterior.sample()))
179
+ log["reconstructions"] = xrec
180
+ if log_ema or self.use_ema:
181
+ with self.ema_scope():
182
+ xrec_ema, posterior_ema = self(x)
183
+ if x.shape[1] > 3:
184
+ # colorize with random projection
185
+ assert xrec_ema.shape[1] > 3
186
+ xrec_ema = self.to_rgb(xrec_ema)
187
+ log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample()))
188
+ log["reconstructions_ema"] = xrec_ema
189
+ log["inputs"] = x
190
+ return log
191
+
192
+ def to_rgb(self, x):
193
+ assert self.image_key == "segmentation"
194
+ if not hasattr(self, "colorize"):
195
+ self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
196
+ x = F.conv2d(x, weight=self.colorize)
197
+ x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
198
+ return x
199
+
200
+
201
+ class IdentityFirstStage(torch.nn.Module):
202
+ def __init__(self, *args, vq_interface=False, **kwargs):
203
+ self.vq_interface = vq_interface
204
+ super().__init__()
205
+
206
+ def encode(self, x, *args, **kwargs):
207
+ return x
208
+
209
+ def decode(self, x, *args, **kwargs):
210
+ return x
211
+
212
+ def quantize(self, x, *args, **kwargs):
213
+ if self.vq_interface:
214
+ return x, None, [None, None, None]
215
+ return x
216
+
217
+ def forward(self, x, *args, **kwargs):
218
+ return x
219
+
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddim.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import torch
5
+ import numpy as np
6
+
7
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
8
+ from ldm.models.diffusion.sampler import Sampler
9
+
10
+
11
+ class DDIMSampler(Sampler):
12
+ def __init__(self, model, schedule="linear", **kwargs):
13
+ super().__init__()
14
+ self.model = model
15
+ self.model_wrap = model.apply_model
16
+ self.ddpm_num_timesteps = model.num_timesteps
17
+ self.schedule = schedule
18
+ self.rand_scale = 1.0
19
+
20
+ def register_buffer(self, name, attr):
21
+ if self.model.device == "cuda":
22
+ if type(attr) == torch.Tensor:
23
+ if attr.device != torch.device("cuda"):
24
+ attr = attr.to(torch.device("cuda"))
25
+ setattr(self, name, attr)
26
+
27
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
28
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
29
+ num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
30
+ alphas_cumprod = self.model.alphas_cumprod
31
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
32
+
33
+ def to_torch(x): return x.clone().detach().to(
34
+ torch.float32).to(self.model.device)
35
+
36
+ self.register_buffer('betas', to_torch(self.model.betas))
37
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
38
+ self.register_buffer('alphas_cumprod_prev', to_torch(
39
+ self.model.alphas_cumprod_prev))
40
+
41
+ # calculations for diffusion q(x_t | x_{t-1}) and others
42
+ self.register_buffer('sqrt_alphas_cumprod',
43
+ to_torch(np.sqrt(alphas_cumprod.cpu())))
44
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(
45
+ np.sqrt(1. - alphas_cumprod.cpu())))
46
+ self.register_buffer('log_one_minus_alphas_cumprod',
47
+ to_torch(np.log(1. - alphas_cumprod.cpu())))
48
+ self.register_buffer('sqrt_recip_alphas_cumprod',
49
+ to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
50
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(
51
+ np.sqrt(1. / alphas_cumprod.cpu() - 1)))
52
+
53
+ # ddim sampling parameters
54
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
55
+ ddim_timesteps=self.ddim_timesteps,
56
+ eta=ddim_eta, verbose=verbose)
57
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
58
+ self.register_buffer('ddim_alphas', ddim_alphas)
59
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
60
+ self.register_buffer('ddim_sqrt_one_minus_alphas',
61
+ np.sqrt(1. - ddim_alphas))
62
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
63
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
64
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
65
+ self.register_buffer('ddim_sigmas_for_original_num_steps',
66
+ sigmas_for_original_sampling_steps)
67
+
68
+ @torch.no_grad()
69
+ def compile(self,
70
+ S,
71
+ shape,
72
+ batch_size=1,
73
+ eta=0.,
74
+ temperature=1.,
75
+ verbose=False,
76
+ unconditional_guidance_scale=1.,
77
+ use_original_steps=False,
78
+ **kwargs
79
+ ):
80
+
81
+ self.steps = S
82
+ self.batch_size = batch_size
83
+ self.shape = shape
84
+ self.eta = eta
85
+ self.temperature = temperature
86
+ self.cond_scale = unconditional_guidance_scale
87
+ self.x_shape = (self.batch_size,
88
+ self.shape[0], self.shape[1], self.shape[2])
89
+
90
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
91
+ # sampling
92
+ C, H, W = shape
93
+ size = (batch_size, C, H, W)
94
+ print(f'Data shape for DDIM sampling is {size}, eta {eta}')
95
+
96
+ self.ts_list = torch.Tensor(
97
+ np.expand_dims(self.ddim_timesteps, axis=0))
98
+ self.ts_list = self.ts_list.fliplr().to(torch.int32).to(self.model.device)
99
+
100
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
101
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
102
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
103
+ sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
104
+ alphas_prev = torch.Tensor(alphas_prev)
105
+
106
+ self.params_init = [
107
+ ('alpha', alphas),
108
+ ('alpha_prev', alphas_prev),
109
+ ('rsqrt(alpha)', alphas.rsqrt()),
110
+ ('sqrt(alpha_prev)', alphas_prev.sqrt()),
111
+ ('sqrt(1-alpha)', sqrt_one_minus_alphas),
112
+ ('sigma', torch.Tensor(sigmas)),
113
+ ('dir', torch.sqrt(1. - alphas_prev - sigmas**2))
114
+ ]
115
+
116
+ self.params = torch.stack(list(map(lambda x: x[1], self.params_init)))
117
+ self.params = self.params.fliplr().to(
118
+ self.model.betas.dtype).to(self.model.device)
119
+
120
+ def one_step(self, x, c_in, ts_t, param_t):
121
+ ts = ts_t[0].broadcast_to((self.batch_size)).contiguous()
122
+
123
+ param = {}
124
+ for idx, val in enumerate(self.params_init):
125
+ param[val[0]] = param_t[idx].broadcast_to(
126
+ (self.batch_size, 1, 1, 1)).contiguous()
127
+
128
+ model_output = self.run_model(x, c_in, ts)
129
+
130
+ if self.model.parameterization == "v":
131
+ e_t = self.model.predict_eps_from_z_and_v(x, ts, model_output)
132
+ else:
133
+ e_t = model_output
134
+
135
+ # current prediction for x_0
136
+ if self.model.parameterization != "v":
137
+ pred_x0 = (x - param['sqrt(1-alpha)'] *
138
+ e_t) * param['rsqrt(alpha)']
139
+ else:
140
+ pred_x0 = self.model.predict_start_from_z_and_v(
141
+ x, ts, model_output)
142
+
143
+ # direction pointing to x_t
144
+ dir_xt = param['dir'] * e_t
145
+ noise = param['sigma'] * \
146
+ noise_like(x.shape, self.model.device, False) * self.temperature
147
+ x = param['sqrt(alpha_prev)'] * pred_x0 + dir_xt + noise
148
+ return x
149
+
150
+ def sampler_step(self, arg):
151
+ x, c_in, ts, params = arg
152
+ x = self.one_step(x, c_in, ts[:, 0], params[:, 0])
153
+ ts = torch.roll(ts, shifts=-1, dims=1)
154
+ params = torch.roll(params, shifts=-1, dims=1)
155
+ return [x, c_in, ts, params]
156
+
157
+ def init_loop(self, x, c_in):
158
+ return [x, c_in, self.ts_list.clone(), self.params.clone()]
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddpm.py ADDED
@@ -0,0 +1,1795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ wild mixture of
3
+ https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
+ https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
+ https://github.com/CompVis/taming-transformers
6
+ -- merci
7
+ """
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import numpy as np
12
+ import pytorch_lightning as pl
13
+ from torch.optim.lr_scheduler import LambdaLR
14
+ from einops import rearrange, repeat
15
+ from contextlib import contextmanager, nullcontext
16
+ from functools import partial
17
+ import itertools
18
+ from tqdm import tqdm
19
+ from torchvision.utils import make_grid
20
+ from pytorch_lightning.utilities.rank_zero import rank_zero_only
21
+ from omegaconf import ListConfig
22
+
23
+ from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
24
+ from ldm.modules.ema import LitEma
25
+ from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
26
+ from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
27
+ from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
28
+ from ldm.models.diffusion.ddim import DDIMSampler
29
+
30
+
31
+ __conditioning_keys__ = {'concat': 'c_concat',
32
+ 'crossattn': 'c_crossattn',
33
+ 'adm': 'y'}
34
+
35
+
36
+ def disabled_train(self, mode=True):
37
+ """Overwrite model.train with this function to make sure train/eval mode
38
+ does not change anymore."""
39
+ return self
40
+
41
+
42
+ def uniform_on_device(r1, r2, shape, device):
43
+ return (r1 - r2) * torch.rand(*shape, device=device) + r2
44
+
45
+
46
+ class DDPM(pl.LightningModule):
47
+ # classic DDPM with Gaussian diffusion, in image space
48
+ def __init__(self,
49
+ unet_config,
50
+ timesteps=1000,
51
+ beta_schedule="linear",
52
+ loss_type="l2",
53
+ ckpt_path=None,
54
+ ignore_keys=[],
55
+ load_only_unet=False,
56
+ monitor="val/loss",
57
+ use_ema=True,
58
+ first_stage_key="image",
59
+ image_size=256,
60
+ channels=3,
61
+ log_every_t=100,
62
+ clip_denoised=True,
63
+ linear_start=1e-4,
64
+ linear_end=2e-2,
65
+ cosine_s=8e-3,
66
+ given_betas=None,
67
+ original_elbo_weight=0.,
68
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
69
+ l_simple_weight=1.,
70
+ conditioning_key=None,
71
+ parameterization="eps", # all assuming fixed variance schedules
72
+ scheduler_config=None,
73
+ use_positional_encodings=False,
74
+ learn_logvar=False,
75
+ logvar_init=0.,
76
+ make_it_fit=False,
77
+ ucg_training=None,
78
+ reset_ema=False,
79
+ reset_num_ema_updates=False,
80
+ ):
81
+ super().__init__()
82
+ assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
83
+ self.parameterization = parameterization
84
+ print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
85
+ self.cond_stage_model = None
86
+ self.clip_denoised = clip_denoised
87
+ self.log_every_t = log_every_t
88
+ self.first_stage_key = first_stage_key
89
+ self.image_size = image_size # try conv?
90
+ self.channels = channels
91
+ self.use_positional_encodings = use_positional_encodings
92
+ self.model = DiffusionWrapper(unet_config, conditioning_key)
93
+ count_params(self.model, verbose=True)
94
+ self.use_ema = use_ema
95
+ if self.use_ema:
96
+ self.model_ema = LitEma(self.model)
97
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
98
+
99
+ self.use_scheduler = scheduler_config is not None
100
+ if self.use_scheduler:
101
+ self.scheduler_config = scheduler_config
102
+
103
+ self.v_posterior = v_posterior
104
+ self.original_elbo_weight = original_elbo_weight
105
+ self.l_simple_weight = l_simple_weight
106
+
107
+ if monitor is not None:
108
+ self.monitor = monitor
109
+ self.make_it_fit = make_it_fit
110
+ if reset_ema: assert exists(ckpt_path)
111
+ if ckpt_path is not None:
112
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
113
+ if reset_ema:
114
+ assert self.use_ema
115
+ print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
116
+ self.model_ema = LitEma(self.model)
117
+ if reset_num_ema_updates:
118
+ print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
119
+ assert self.use_ema
120
+ self.model_ema.reset_num_updates()
121
+
122
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
123
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
124
+
125
+ self.loss_type = loss_type
126
+
127
+ self.learn_logvar = learn_logvar
128
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
129
+ if self.learn_logvar:
130
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
131
+
132
+ self.ucg_training = ucg_training or dict()
133
+ if self.ucg_training:
134
+ self.ucg_prng = np.random.RandomState()
135
+
136
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
137
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
138
+ if exists(given_betas):
139
+ betas = given_betas
140
+ else:
141
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
142
+ cosine_s=cosine_s)
143
+ alphas = 1. - betas
144
+ alphas_cumprod = np.cumprod(alphas, axis=0)
145
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
146
+
147
+ timesteps, = betas.shape
148
+ self.num_timesteps = int(timesteps)
149
+ self.linear_start = linear_start
150
+ self.linear_end = linear_end
151
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
152
+
153
+ to_torch = partial(torch.tensor, dtype=torch.float32)
154
+
155
+ self.register_buffer('betas', to_torch(betas))
156
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
157
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
158
+
159
+ # calculations for diffusion q(x_t | x_{t-1}) and others
160
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
161
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
162
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
163
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
164
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
165
+
166
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
167
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
168
+ 1. - alphas_cumprod) + self.v_posterior * betas
169
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
170
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
171
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
172
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
173
+ self.register_buffer('posterior_mean_coef1', to_torch(
174
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
175
+ self.register_buffer('posterior_mean_coef2', to_torch(
176
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
177
+
178
+ if self.parameterization == "eps":
179
+ lvlb_weights = self.betas ** 2 / (
180
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
181
+ elif self.parameterization == "x0":
182
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
183
+ elif self.parameterization == "v":
184
+ lvlb_weights = torch.ones_like(self.betas ** 2 / (
185
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
186
+ else:
187
+ raise NotImplementedError("mu not supported")
188
+ lvlb_weights[0] = lvlb_weights[1]
189
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
190
+ assert not torch.isnan(self.lvlb_weights).all()
191
+
192
+ @contextmanager
193
+ def ema_scope(self, context=None):
194
+ if self.use_ema:
195
+ self.model_ema.store(self.model.parameters())
196
+ self.model_ema.copy_to(self.model)
197
+ if context is not None:
198
+ print(f"{context}: Switched to EMA weights")
199
+ try:
200
+ yield None
201
+ finally:
202
+ if self.use_ema:
203
+ self.model_ema.restore(self.model.parameters())
204
+ if context is not None:
205
+ print(f"{context}: Restored training weights")
206
+
207
+ @torch.no_grad()
208
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
209
+ sd = torch.load(path, map_location="cpu")
210
+ if "state_dict" in list(sd.keys()):
211
+ sd = sd["state_dict"]
212
+ keys = list(sd.keys())
213
+ for k in keys:
214
+ for ik in ignore_keys:
215
+ if k.startswith(ik):
216
+ print("Deleting key {} from state_dict.".format(k))
217
+ del sd[k]
218
+ if self.make_it_fit:
219
+ n_params = len([name for name, _ in
220
+ itertools.chain(self.named_parameters(),
221
+ self.named_buffers())])
222
+ for name, param in tqdm(
223
+ itertools.chain(self.named_parameters(),
224
+ self.named_buffers()),
225
+ desc="Fitting old weights to new weights",
226
+ total=n_params
227
+ ):
228
+ if not name in sd:
229
+ continue
230
+ old_shape = sd[name].shape
231
+ new_shape = param.shape
232
+ assert len(old_shape) == len(new_shape)
233
+ if len(new_shape) > 2:
234
+ # we only modify first two axes
235
+ assert new_shape[2:] == old_shape[2:]
236
+ # assumes first axis corresponds to output dim
237
+ if not new_shape == old_shape:
238
+ new_param = param.clone()
239
+ old_param = sd[name]
240
+ if len(new_shape) == 1:
241
+ for i in range(new_param.shape[0]):
242
+ new_param[i] = old_param[i % old_shape[0]]
243
+ elif len(new_shape) >= 2:
244
+ for i in range(new_param.shape[0]):
245
+ for j in range(new_param.shape[1]):
246
+ new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
247
+
248
+ n_used_old = torch.ones(old_shape[1])
249
+ for j in range(new_param.shape[1]):
250
+ n_used_old[j % old_shape[1]] += 1
251
+ n_used_new = torch.zeros(new_shape[1])
252
+ for j in range(new_param.shape[1]):
253
+ n_used_new[j] = n_used_old[j % old_shape[1]]
254
+
255
+ n_used_new = n_used_new[None, :]
256
+ while len(n_used_new.shape) < len(new_shape):
257
+ n_used_new = n_used_new.unsqueeze(-1)
258
+ new_param /= n_used_new
259
+
260
+ sd[name] = new_param
261
+
262
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
263
+ sd, strict=False)
264
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
265
+ if len(missing) > 0:
266
+ print(f"Missing Keys:\n {missing}")
267
+ if len(unexpected) > 0:
268
+ print(f"\nUnexpected Keys:\n {unexpected}")
269
+
270
+ def q_mean_variance(self, x_start, t):
271
+ """
272
+ Get the distribution q(x_t | x_0).
273
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
274
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
275
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
276
+ """
277
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
278
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
279
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
280
+ return mean, variance, log_variance
281
+
282
+ def predict_start_from_noise(self, x_t, t, noise):
283
+ return (
284
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
285
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
286
+ )
287
+
288
+ def predict_start_from_z_and_v(self, x_t, t, v):
289
+ # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
290
+ # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
291
+ return (
292
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
293
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
294
+ )
295
+
296
+ def predict_eps_from_z_and_v(self, x_t, t, v):
297
+ return (
298
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
299
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
300
+ )
301
+
302
+ def q_posterior(self, x_start, x_t, t):
303
+ posterior_mean = (
304
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
305
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
306
+ )
307
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
308
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
309
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
310
+
311
+ def p_mean_variance(self, x, t, clip_denoised: bool):
312
+ model_out = self.model(x, t)
313
+ if self.parameterization == "eps":
314
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
315
+ elif self.parameterization == "x0":
316
+ x_recon = model_out
317
+ if clip_denoised:
318
+ x_recon.clamp_(-1., 1.)
319
+
320
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
321
+ return model_mean, posterior_variance, posterior_log_variance
322
+
323
+ @torch.no_grad()
324
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
325
+ b, *_, device = *x.shape, x.device
326
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
327
+ noise = noise_like(x.shape, device, repeat_noise)
328
+ # no noise when t == 0
329
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
330
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
331
+
332
+ @torch.no_grad()
333
+ def p_sample_loop(self, shape, return_intermediates=False):
334
+ device = self.betas.device
335
+ b = shape[0]
336
+ img = torch.randn(shape, device=device)
337
+ intermediates = [img]
338
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
339
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
340
+ clip_denoised=self.clip_denoised)
341
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
342
+ intermediates.append(img)
343
+ if return_intermediates:
344
+ return img, intermediates
345
+ return img
346
+
347
+ @torch.no_grad()
348
+ def sample(self, batch_size=16, return_intermediates=False):
349
+ image_size = self.image_size
350
+ channels = self.channels
351
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
352
+ return_intermediates=return_intermediates)
353
+
354
+ def q_sample(self, x_start, t, noise=None):
355
+ noise = default(noise, lambda: torch.randn_like(x_start))
356
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
357
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
358
+
359
+ def get_v(self, x, noise, t):
360
+ return (
361
+ extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
362
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
363
+ )
364
+
365
+ def get_loss(self, pred, target, mean=True):
366
+ if self.loss_type == 'l1':
367
+ loss = (target - pred).abs()
368
+ if mean:
369
+ loss = loss.mean()
370
+ elif self.loss_type == 'l2':
371
+ if mean:
372
+ loss = torch.nn.functional.mse_loss(target, pred)
373
+ else:
374
+ loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
375
+ else:
376
+ raise NotImplementedError("unknown loss type '{loss_type}'")
377
+
378
+ return loss
379
+
380
+ def p_losses(self, x_start, t, noise=None):
381
+ noise = default(noise, lambda: torch.randn_like(x_start))
382
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
383
+ model_out = self.model(x_noisy, t)
384
+
385
+ loss_dict = {}
386
+ if self.parameterization == "eps":
387
+ target = noise
388
+ elif self.parameterization == "x0":
389
+ target = x_start
390
+ elif self.parameterization == "v":
391
+ target = self.get_v(x_start, noise, t)
392
+ else:
393
+ raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
394
+
395
+ loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
396
+
397
+ log_prefix = 'train' if self.training else 'val'
398
+
399
+ loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
400
+ loss_simple = loss.mean() * self.l_simple_weight
401
+
402
+ loss_vlb = (self.lvlb_weights[t] * loss).mean()
403
+ loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
404
+
405
+ loss = loss_simple + self.original_elbo_weight * loss_vlb
406
+
407
+ loss_dict.update({f'{log_prefix}/loss': loss})
408
+
409
+ return loss, loss_dict
410
+
411
+ def forward(self, x, *args, **kwargs):
412
+ # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
413
+ # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
414
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
415
+ return self.p_losses(x, t, *args, **kwargs)
416
+
417
+ def get_input(self, batch, k):
418
+ x = batch[k]
419
+ if len(x.shape) == 3:
420
+ x = x[..., None]
421
+ x = rearrange(x, 'b h w c -> b c h w')
422
+ x = x.to(memory_format=torch.contiguous_format).float()
423
+ return x
424
+
425
+ def shared_step(self, batch):
426
+ x = self.get_input(batch, self.first_stage_key)
427
+ loss, loss_dict = self(x)
428
+ return loss, loss_dict
429
+
430
+ def training_step(self, batch, batch_idx):
431
+ for k in self.ucg_training:
432
+ p = self.ucg_training[k]["p"]
433
+ val = self.ucg_training[k]["val"]
434
+ if val is None:
435
+ val = ""
436
+ for i in range(len(batch[k])):
437
+ if self.ucg_prng.choice(2, p=[1 - p, p]):
438
+ batch[k][i] = val
439
+
440
+ loss, loss_dict = self.shared_step(batch)
441
+
442
+ self.log_dict(loss_dict, prog_bar=True,
443
+ logger=True, on_step=True, on_epoch=True)
444
+
445
+ self.log("global_step", self.global_step,
446
+ prog_bar=True, logger=True, on_step=True, on_epoch=False)
447
+
448
+ if self.use_scheduler:
449
+ lr = self.optimizers().param_groups[0]['lr']
450
+ self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
451
+
452
+ return loss
453
+
454
+ @torch.no_grad()
455
+ def validation_step(self, batch, batch_idx):
456
+ _, loss_dict_no_ema = self.shared_step(batch)
457
+ with self.ema_scope():
458
+ _, loss_dict_ema = self.shared_step(batch)
459
+ loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
460
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
461
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
462
+
463
+ def on_train_batch_end(self, *args, **kwargs):
464
+ if self.use_ema:
465
+ self.model_ema(self.model)
466
+
467
+ def _get_rows_from_list(self, samples):
468
+ n_imgs_per_row = len(samples)
469
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
470
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
471
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
472
+ return denoise_grid
473
+
474
+ @torch.no_grad()
475
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
476
+ log = dict()
477
+ x = self.get_input(batch, self.first_stage_key)
478
+ N = min(x.shape[0], N)
479
+ n_row = min(x.shape[0], n_row)
480
+ x = x.to(self.device)[:N]
481
+ log["inputs"] = x
482
+
483
+ # get diffusion row
484
+ diffusion_row = list()
485
+ x_start = x[:n_row]
486
+
487
+ for t in range(self.num_timesteps):
488
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
489
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
490
+ t = t.to(self.device).long()
491
+ noise = torch.randn_like(x_start)
492
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
493
+ diffusion_row.append(x_noisy)
494
+
495
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
496
+
497
+ if sample:
498
+ # get denoise row
499
+ with self.ema_scope("Plotting"):
500
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
501
+
502
+ log["samples"] = samples
503
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
504
+
505
+ if return_keys:
506
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
507
+ return log
508
+ else:
509
+ return {key: log[key] for key in return_keys}
510
+ return log
511
+
512
+ def configure_optimizers(self):
513
+ lr = self.learning_rate
514
+ params = list(self.model.parameters())
515
+ if self.learn_logvar:
516
+ params = params + [self.logvar]
517
+ opt = torch.optim.AdamW(params, lr=lr)
518
+ return opt
519
+
520
+
521
+ class LatentDiffusion(DDPM):
522
+ """main class"""
523
+
524
+ def __init__(self,
525
+ first_stage_config,
526
+ cond_stage_config,
527
+ num_timesteps_cond=None,
528
+ cond_stage_key="image",
529
+ cond_stage_trainable=False,
530
+ concat_mode=True,
531
+ cond_stage_forward=None,
532
+ conditioning_key=None,
533
+ scale_factor=1.0,
534
+ scale_by_std=False,
535
+ force_null_conditioning=False,
536
+ *args, **kwargs):
537
+ self.force_null_conditioning = force_null_conditioning
538
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
539
+ self.scale_by_std = scale_by_std
540
+ assert self.num_timesteps_cond <= kwargs['timesteps']
541
+ # for backwards compatibility after implementation of DiffusionWrapper
542
+ if conditioning_key is None:
543
+ conditioning_key = 'concat' if concat_mode else 'crossattn'
544
+ if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:
545
+ conditioning_key = None
546
+ ckpt_path = kwargs.pop("ckpt_path", None)
547
+ reset_ema = kwargs.pop("reset_ema", False)
548
+ reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
549
+ ignore_keys = kwargs.pop("ignore_keys", [])
550
+ super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
551
+ self.concat_mode = concat_mode
552
+ self.cond_stage_trainable = cond_stage_trainable
553
+ self.cond_stage_key = cond_stage_key
554
+ try:
555
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
556
+ except:
557
+ self.num_downs = 0
558
+ if not scale_by_std:
559
+ self.scale_factor = scale_factor
560
+ else:
561
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
562
+ self.instantiate_first_stage(first_stage_config)
563
+ self.instantiate_cond_stage(cond_stage_config)
564
+ self.cond_stage_forward = cond_stage_forward
565
+ self.clip_denoised = False
566
+ self.bbox_tokenizer = None
567
+
568
+ self.restarted_from_ckpt = False
569
+ if ckpt_path is not None:
570
+ self.init_from_ckpt(ckpt_path, ignore_keys)
571
+ self.restarted_from_ckpt = True
572
+ if reset_ema:
573
+ assert self.use_ema
574
+ print(
575
+ f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
576
+ self.model_ema = LitEma(self.model)
577
+ if reset_num_ema_updates:
578
+ print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
579
+ assert self.use_ema
580
+ self.model_ema.reset_num_updates()
581
+
582
+ def make_cond_schedule(self, ):
583
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
584
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
585
+ self.cond_ids[:self.num_timesteps_cond] = ids
586
+
587
+ @rank_zero_only
588
+ @torch.no_grad()
589
+ def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
590
+ # only for very first batch
591
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
592
+ assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
593
+ # set rescale weight to 1./std of encodings
594
+ print("### USING STD-RESCALING ###")
595
+ x = super().get_input(batch, self.first_stage_key)
596
+ x = x.to(self.device)
597
+ encoder_posterior = self.encode_first_stage(x)
598
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
599
+ del self.scale_factor
600
+ self.register_buffer('scale_factor', 1. / z.flatten().std())
601
+ print(f"setting self.scale_factor to {self.scale_factor}")
602
+ print("### USING STD-RESCALING ###")
603
+
604
+ def register_schedule(self,
605
+ given_betas=None, beta_schedule="linear", timesteps=1000,
606
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
607
+ super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
608
+
609
+ self.shorten_cond_schedule = self.num_timesteps_cond > 1
610
+ if self.shorten_cond_schedule:
611
+ self.make_cond_schedule()
612
+
613
+ def instantiate_first_stage(self, config):
614
+ model = instantiate_from_config(config)
615
+ self.first_stage_model = model.eval()
616
+ self.first_stage_model.train = disabled_train
617
+ for param in self.first_stage_model.parameters():
618
+ param.requires_grad = False
619
+
620
+ def instantiate_cond_stage(self, config):
621
+ if not self.cond_stage_trainable:
622
+ if config == "__is_first_stage__":
623
+ print("Using first stage also as cond stage.")
624
+ self.cond_stage_model = self.first_stage_model
625
+ elif config == "__is_unconditional__":
626
+ print(f"Training {self.__class__.__name__} as an unconditional model.")
627
+ self.cond_stage_model = None
628
+ # self.be_unconditional = True
629
+ else:
630
+ model = instantiate_from_config(config)
631
+ self.cond_stage_model = model.eval()
632
+ self.cond_stage_model.train = disabled_train
633
+ for param in self.cond_stage_model.parameters():
634
+ param.requires_grad = False
635
+ else:
636
+ assert config != '__is_first_stage__'
637
+ assert config != '__is_unconditional__'
638
+ model = instantiate_from_config(config)
639
+ self.cond_stage_model = model
640
+
641
+ def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
642
+ denoise_row = []
643
+ for zd in tqdm(samples, desc=desc):
644
+ denoise_row.append(self.decode_first_stage(zd.to(self.device),
645
+ force_not_quantize=force_no_decoder_quantization))
646
+ n_imgs_per_row = len(denoise_row)
647
+ denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
648
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
649
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
650
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
651
+ return denoise_grid
652
+
653
+ def get_first_stage_encoding(self, encoder_posterior):
654
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
655
+ z = encoder_posterior.sample()
656
+ elif isinstance(encoder_posterior, torch.Tensor):
657
+ z = encoder_posterior
658
+ else:
659
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
660
+ return self.scale_factor * z
661
+
662
+ def get_learned_conditioning(self, c):
663
+ if self.cond_stage_forward is None:
664
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
665
+ c = self.cond_stage_model.encode(c)
666
+ if isinstance(c, DiagonalGaussianDistribution):
667
+ c = c.mode()
668
+ else:
669
+ c = self.cond_stage_model(c)
670
+ else:
671
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
672
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
673
+ return c
674
+
675
+ def meshgrid(self, h, w):
676
+ y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
677
+ x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
678
+
679
+ arr = torch.cat([y, x], dim=-1)
680
+ return arr
681
+
682
+ def delta_border(self, h, w):
683
+ """
684
+ :param h: height
685
+ :param w: width
686
+ :return: normalized distance to image border,
687
+ wtith min distance = 0 at border and max dist = 0.5 at image center
688
+ """
689
+ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
690
+ arr = self.meshgrid(h, w) / lower_right_corner
691
+ dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
692
+ dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
693
+ edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
694
+ return edge_dist
695
+
696
+ def get_weighting(self, h, w, Ly, Lx, device):
697
+ weighting = self.delta_border(h, w)
698
+ weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
699
+ self.split_input_params["clip_max_weight"], )
700
+ weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
701
+
702
+ if self.split_input_params["tie_braker"]:
703
+ L_weighting = self.delta_border(Ly, Lx)
704
+ L_weighting = torch.clip(L_weighting,
705
+ self.split_input_params["clip_min_tie_weight"],
706
+ self.split_input_params["clip_max_tie_weight"])
707
+
708
+ L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
709
+ weighting = weighting * L_weighting
710
+ return weighting
711
+
712
+ def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
713
+ """
714
+ :param x: img of size (bs, c, h, w)
715
+ :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
716
+ """
717
+ bs, nc, h, w = x.shape
718
+
719
+ # number of crops in image
720
+ Ly = (h - kernel_size[0]) // stride[0] + 1
721
+ Lx = (w - kernel_size[1]) // stride[1] + 1
722
+
723
+ if uf == 1 and df == 1:
724
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
725
+ unfold = torch.nn.Unfold(**fold_params)
726
+
727
+ fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
728
+
729
+ weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
730
+ normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
731
+ weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
732
+
733
+ elif uf > 1 and df == 1:
734
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
735
+ unfold = torch.nn.Unfold(**fold_params)
736
+
737
+ fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
738
+ dilation=1, padding=0,
739
+ stride=(stride[0] * uf, stride[1] * uf))
740
+ fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
741
+
742
+ weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
743
+ normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
744
+ weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
745
+
746
+ elif df > 1 and uf == 1:
747
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
748
+ unfold = torch.nn.Unfold(**fold_params)
749
+
750
+ fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
751
+ dilation=1, padding=0,
752
+ stride=(stride[0] // df, stride[1] // df))
753
+ fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
754
+
755
+ weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
756
+ normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
757
+ weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
758
+
759
+ else:
760
+ raise NotImplementedError
761
+
762
+ return fold, unfold, normalization, weighting
763
+
764
+ @torch.no_grad()
765
+ def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
766
+ cond_key=None, return_original_cond=False, bs=None, return_x=False):
767
+ x = super().get_input(batch, k)
768
+ if bs is not None:
769
+ x = x[:bs]
770
+ x = x.to(self.device)
771
+ encoder_posterior = self.encode_first_stage(x)
772
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
773
+
774
+ if self.model.conditioning_key is not None and not self.force_null_conditioning:
775
+ if cond_key is None:
776
+ cond_key = self.cond_stage_key
777
+ if cond_key != self.first_stage_key:
778
+ if cond_key in ['caption', 'coordinates_bbox', "txt"]:
779
+ xc = batch[cond_key]
780
+ elif cond_key in ['class_label', 'cls']:
781
+ xc = batch
782
+ else:
783
+ xc = super().get_input(batch, cond_key).to(self.device)
784
+ else:
785
+ xc = x
786
+ if not self.cond_stage_trainable or force_c_encode:
787
+ if isinstance(xc, dict) or isinstance(xc, list):
788
+ c = self.get_learned_conditioning(xc)
789
+ else:
790
+ c = self.get_learned_conditioning(xc.to(self.device))
791
+ else:
792
+ c = xc
793
+ if bs is not None:
794
+ c = c[:bs]
795
+
796
+ if self.use_positional_encodings:
797
+ pos_x, pos_y = self.compute_latent_shifts(batch)
798
+ ckey = __conditioning_keys__[self.model.conditioning_key]
799
+ c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
800
+
801
+ else:
802
+ c = None
803
+ xc = None
804
+ if self.use_positional_encodings:
805
+ pos_x, pos_y = self.compute_latent_shifts(batch)
806
+ c = {'pos_x': pos_x, 'pos_y': pos_y}
807
+ out = [z, c]
808
+ if return_first_stage_outputs:
809
+ xrec = self.decode_first_stage(z)
810
+ out.extend([x, xrec])
811
+ if return_x:
812
+ out.extend([x])
813
+ if return_original_cond:
814
+ out.append(xc)
815
+ return out
816
+
817
+ @torch.no_grad()
818
+ def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
819
+ if predict_cids:
820
+ if z.dim() == 4:
821
+ z = torch.argmax(z.exp(), dim=1).long()
822
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
823
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
824
+
825
+ z = 1. / self.scale_factor * z
826
+ return self.first_stage_model.decode(z)
827
+
828
+ @torch.no_grad()
829
+ def encode_first_stage(self, x):
830
+ return self.first_stage_model.encode(x)
831
+
832
+ def shared_step(self, batch, **kwargs):
833
+ x, c = self.get_input(batch, self.first_stage_key)
834
+ loss = self(x, c)
835
+ return loss
836
+
837
+ def forward(self, x, c, *args, **kwargs):
838
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
839
+ if self.model.conditioning_key is not None:
840
+ assert c is not None
841
+ if self.cond_stage_trainable:
842
+ c = self.get_learned_conditioning(c)
843
+ if self.shorten_cond_schedule: # TODO: drop this option
844
+ tc = self.cond_ids[t].to(self.device)
845
+ c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
846
+ return self.p_losses(x, c, t, *args, **kwargs)
847
+
848
+ def apply_model(self, x_noisy, t, cond, return_ids=False):
849
+ if isinstance(cond, dict):
850
+ # hybrid case, cond is expected to be a dict
851
+ pass
852
+ else:
853
+ if not isinstance(cond, list):
854
+ cond = [cond]
855
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
856
+ cond = {key: cond}
857
+
858
+ x_recon = self.model(x_noisy, t, **cond)
859
+
860
+ if isinstance(x_recon, tuple) and not return_ids:
861
+ return x_recon[0]
862
+ else:
863
+ return x_recon
864
+
865
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
866
+ return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
867
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
868
+
869
+ def _prior_bpd(self, x_start):
870
+ """
871
+ Get the prior KL term for the variational lower-bound, measured in
872
+ bits-per-dim.
873
+ This term can't be optimized, as it only depends on the encoder.
874
+ :param x_start: the [N x C x ...] tensor of inputs.
875
+ :return: a batch of [N] KL values (in bits), one per batch element.
876
+ """
877
+ batch_size = x_start.shape[0]
878
+ t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
879
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
880
+ kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
881
+ return mean_flat(kl_prior) / np.log(2.0)
882
+
883
+ def p_losses(self, x_start, cond, t, noise=None):
884
+ noise = default(noise, lambda: torch.randn_like(x_start))
885
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
886
+ model_output = self.apply_model(x_noisy, t, cond)
887
+
888
+ loss_dict = {}
889
+ prefix = 'train' if self.training else 'val'
890
+
891
+ if self.parameterization == "x0":
892
+ target = x_start
893
+ elif self.parameterization == "eps":
894
+ target = noise
895
+ elif self.parameterization == "v":
896
+ target = self.get_v(x_start, noise, t)
897
+ else:
898
+ raise NotImplementedError()
899
+
900
+ loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
901
+ loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
902
+
903
+ logvar_t = self.logvar[t].to(self.device)
904
+ loss = loss_simple / torch.exp(logvar_t) + logvar_t
905
+ # loss = loss_simple / torch.exp(self.logvar) + self.logvar
906
+ if self.learn_logvar:
907
+ loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
908
+ loss_dict.update({'logvar': self.logvar.data.mean()})
909
+
910
+ loss = self.l_simple_weight * loss.mean()
911
+
912
+ loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
913
+ loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
914
+ loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
915
+ loss += (self.original_elbo_weight * loss_vlb)
916
+ loss_dict.update({f'{prefix}/loss': loss})
917
+
918
+ return loss, loss_dict
919
+
920
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
921
+ return_x0=False, score_corrector=None, corrector_kwargs=None):
922
+ t_in = t
923
+ model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
924
+
925
+ if score_corrector is not None:
926
+ assert self.parameterization == "eps"
927
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
928
+
929
+ if return_codebook_ids:
930
+ model_out, logits = model_out
931
+
932
+ if self.parameterization == "eps":
933
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
934
+ elif self.parameterization == "x0":
935
+ x_recon = model_out
936
+ else:
937
+ raise NotImplementedError()
938
+
939
+ if clip_denoised:
940
+ x_recon.clamp_(-1., 1.)
941
+ if quantize_denoised:
942
+ x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
943
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
944
+ if return_codebook_ids:
945
+ return model_mean, posterior_variance, posterior_log_variance, logits
946
+ elif return_x0:
947
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
948
+ else:
949
+ return model_mean, posterior_variance, posterior_log_variance
950
+
951
+ @torch.no_grad()
952
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
953
+ return_codebook_ids=False, quantize_denoised=False, return_x0=False,
954
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
955
+ b, *_, device = *x.shape, x.device
956
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
957
+ return_codebook_ids=return_codebook_ids,
958
+ quantize_denoised=quantize_denoised,
959
+ return_x0=return_x0,
960
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
961
+ if return_codebook_ids:
962
+ raise DeprecationWarning("Support dropped.")
963
+ model_mean, _, model_log_variance, logits = outputs
964
+ elif return_x0:
965
+ model_mean, _, model_log_variance, x0 = outputs
966
+ else:
967
+ model_mean, _, model_log_variance = outputs
968
+
969
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
970
+ if noise_dropout > 0.:
971
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
972
+ # no noise when t == 0
973
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
974
+
975
+ if return_codebook_ids:
976
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
977
+ if return_x0:
978
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
979
+ else:
980
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
981
+
982
+ @torch.no_grad()
983
+ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
984
+ img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
985
+ score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
986
+ log_every_t=None):
987
+ if not log_every_t:
988
+ log_every_t = self.log_every_t
989
+ timesteps = self.num_timesteps
990
+ if batch_size is not None:
991
+ b = batch_size if batch_size is not None else shape[0]
992
+ shape = [batch_size] + list(shape)
993
+ else:
994
+ b = batch_size = shape[0]
995
+ if x_T is None:
996
+ img = torch.randn(shape, device=self.device)
997
+ else:
998
+ img = x_T
999
+ intermediates = []
1000
+ if cond is not None:
1001
+ if isinstance(cond, dict):
1002
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1003
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1004
+ else:
1005
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1006
+
1007
+ if start_T is not None:
1008
+ timesteps = min(timesteps, start_T)
1009
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1010
+ total=timesteps) if verbose else reversed(
1011
+ range(0, timesteps))
1012
+ if type(temperature) == float:
1013
+ temperature = [temperature] * timesteps
1014
+
1015
+ for i in iterator:
1016
+ ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1017
+ if self.shorten_cond_schedule:
1018
+ assert self.model.conditioning_key != 'hybrid'
1019
+ tc = self.cond_ids[ts].to(cond.device)
1020
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1021
+
1022
+ img, x0_partial = self.p_sample(img, cond, ts,
1023
+ clip_denoised=self.clip_denoised,
1024
+ quantize_denoised=quantize_denoised, return_x0=True,
1025
+ temperature=temperature[i], noise_dropout=noise_dropout,
1026
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1027
+ if mask is not None:
1028
+ assert x0 is not None
1029
+ img_orig = self.q_sample(x0, ts)
1030
+ img = img_orig * mask + (1. - mask) * img
1031
+
1032
+ if i % log_every_t == 0 or i == timesteps - 1:
1033
+ intermediates.append(x0_partial)
1034
+ if callback: callback(i)
1035
+ if img_callback: img_callback(img, i)
1036
+ return img, intermediates
1037
+
1038
+ @torch.no_grad()
1039
+ def p_sample_loop(self, cond, shape, return_intermediates=False,
1040
+ x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1041
+ mask=None, x0=None, img_callback=None, start_T=None,
1042
+ log_every_t=None):
1043
+
1044
+ if not log_every_t:
1045
+ log_every_t = self.log_every_t
1046
+ device = self.betas.device
1047
+ b = shape[0]
1048
+ if x_T is None:
1049
+ img = torch.randn(shape, device=device)
1050
+ else:
1051
+ img = x_T
1052
+
1053
+ intermediates = [img]
1054
+ if timesteps is None:
1055
+ timesteps = self.num_timesteps
1056
+
1057
+ if start_T is not None:
1058
+ timesteps = min(timesteps, start_T)
1059
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1060
+ range(0, timesteps))
1061
+
1062
+ if mask is not None:
1063
+ assert x0 is not None
1064
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1065
+
1066
+ for i in iterator:
1067
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
1068
+ if self.shorten_cond_schedule:
1069
+ assert self.model.conditioning_key != 'hybrid'
1070
+ tc = self.cond_ids[ts].to(cond.device)
1071
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1072
+
1073
+ img = self.p_sample(img, cond, ts,
1074
+ clip_denoised=self.clip_denoised,
1075
+ quantize_denoised=quantize_denoised)
1076
+ if mask is not None:
1077
+ img_orig = self.q_sample(x0, ts)
1078
+ img = img_orig * mask + (1. - mask) * img
1079
+
1080
+ if i % log_every_t == 0 or i == timesteps - 1:
1081
+ intermediates.append(img)
1082
+ if callback: callback(i)
1083
+ if img_callback: img_callback(img, i)
1084
+
1085
+ if return_intermediates:
1086
+ return img, intermediates
1087
+ return img
1088
+
1089
+ @torch.no_grad()
1090
+ def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1091
+ verbose=True, timesteps=None, quantize_denoised=False,
1092
+ mask=None, x0=None, shape=None, **kwargs):
1093
+ if shape is None:
1094
+ shape = (batch_size, self.channels, self.image_size, self.image_size)
1095
+ if cond is not None:
1096
+ if isinstance(cond, dict):
1097
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1098
+ list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
1099
+ else:
1100
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1101
+ return self.p_sample_loop(cond,
1102
+ shape,
1103
+ return_intermediates=return_intermediates, x_T=x_T,
1104
+ verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1105
+ mask=mask, x0=x0)
1106
+
1107
+ @torch.no_grad()
1108
+ def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
1109
+ if ddim:
1110
+ ddim_sampler = DDIMSampler(self)
1111
+ shape = (self.channels, self.image_size, self.image_size)
1112
+ samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
1113
+ shape, cond, verbose=False, **kwargs)
1114
+
1115
+ else:
1116
+ samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1117
+ return_intermediates=True, **kwargs)
1118
+
1119
+ return samples, intermediates
1120
+
1121
+ @torch.no_grad()
1122
+ def get_unconditional_conditioning(self, batch_size, null_label=None):
1123
+ if null_label is not None:
1124
+ xc = null_label
1125
+ if isinstance(xc, ListConfig):
1126
+ xc = list(xc)
1127
+ if isinstance(xc, dict) or isinstance(xc, list):
1128
+ c = self.get_learned_conditioning(xc)
1129
+ else:
1130
+ if hasattr(xc, "to"):
1131
+ xc = xc.to(self.device)
1132
+ c = self.get_learned_conditioning(xc)
1133
+ else:
1134
+ if self.cond_stage_key in ["class_label", "cls"]:
1135
+ xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
1136
+ return self.get_learned_conditioning(xc)
1137
+ else:
1138
+ raise NotImplementedError("todo")
1139
+ if isinstance(c, list): # in case the encoder gives us a list
1140
+ for i in range(len(c)):
1141
+ c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
1142
+ else:
1143
+ c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
1144
+ return c
1145
+
1146
+ @torch.no_grad()
1147
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
1148
+ quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1149
+ plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1150
+ use_ema_scope=True,
1151
+ **kwargs):
1152
+ ema_scope = self.ema_scope if use_ema_scope else nullcontext
1153
+ use_ddim = ddim_steps is not None
1154
+
1155
+ log = dict()
1156
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1157
+ return_first_stage_outputs=True,
1158
+ force_c_encode=True,
1159
+ return_original_cond=True,
1160
+ bs=N)
1161
+ N = min(x.shape[0], N)
1162
+ n_row = min(x.shape[0], n_row)
1163
+ log["inputs"] = x
1164
+ log["reconstruction"] = xrec
1165
+ if self.model.conditioning_key is not None:
1166
+ if hasattr(self.cond_stage_model, "decode"):
1167
+ xc = self.cond_stage_model.decode(c)
1168
+ log["conditioning"] = xc
1169
+ elif self.cond_stage_key in ["caption", "txt"]:
1170
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1171
+ log["conditioning"] = xc
1172
+ elif self.cond_stage_key in ['class_label', "cls"]:
1173
+ try:
1174
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1175
+ log['conditioning'] = xc
1176
+ except KeyError:
1177
+ # probably no "human_label" in batch
1178
+ pass
1179
+ elif isimage(xc):
1180
+ log["conditioning"] = xc
1181
+ if ismap(xc):
1182
+ log["original_conditioning"] = self.to_rgb(xc)
1183
+
1184
+ if plot_diffusion_rows:
1185
+ # get diffusion row
1186
+ diffusion_row = list()
1187
+ z_start = z[:n_row]
1188
+ for t in range(self.num_timesteps):
1189
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1190
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1191
+ t = t.to(self.device).long()
1192
+ noise = torch.randn_like(z_start)
1193
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1194
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1195
+
1196
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1197
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1198
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1199
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1200
+ log["diffusion_row"] = diffusion_grid
1201
+
1202
+ if sample:
1203
+ # get denoise row
1204
+ with ema_scope("Sampling"):
1205
+ samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1206
+ ddim_steps=ddim_steps, eta=ddim_eta)
1207
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1208
+ x_samples = self.decode_first_stage(samples)
1209
+ log["samples"] = x_samples
1210
+ if plot_denoise_rows:
1211
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1212
+ log["denoise_row"] = denoise_grid
1213
+
1214
+ if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1215
+ self.first_stage_model, IdentityFirstStage):
1216
+ # also display when quantizing x0 while sampling
1217
+ with ema_scope("Plotting Quantized Denoised"):
1218
+ samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1219
+ ddim_steps=ddim_steps, eta=ddim_eta,
1220
+ quantize_denoised=True)
1221
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1222
+ # quantize_denoised=True)
1223
+ x_samples = self.decode_first_stage(samples.to(self.device))
1224
+ log["samples_x0_quantized"] = x_samples
1225
+
1226
+ if unconditional_guidance_scale > 1.0:
1227
+ uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1228
+ if self.model.conditioning_key == "crossattn-adm":
1229
+ uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
1230
+ with ema_scope("Sampling with classifier-free guidance"):
1231
+ samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1232
+ ddim_steps=ddim_steps, eta=ddim_eta,
1233
+ unconditional_guidance_scale=unconditional_guidance_scale,
1234
+ unconditional_conditioning=uc,
1235
+ )
1236
+ x_samples_cfg = self.decode_first_stage(samples_cfg)
1237
+ log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1238
+
1239
+ if inpaint:
1240
+ # make a simple center square
1241
+ b, h, w = z.shape[0], z.shape[2], z.shape[3]
1242
+ mask = torch.ones(N, h, w).to(self.device)
1243
+ # zeros will be filled in
1244
+ mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1245
+ mask = mask[:, None, ...]
1246
+ with ema_scope("Plotting Inpaint"):
1247
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1248
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1249
+ x_samples = self.decode_first_stage(samples.to(self.device))
1250
+ log["samples_inpainting"] = x_samples
1251
+ log["mask"] = mask
1252
+
1253
+ # outpaint
1254
+ mask = 1. - mask
1255
+ with ema_scope("Plotting Outpaint"):
1256
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
1257
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1258
+ x_samples = self.decode_first_stage(samples.to(self.device))
1259
+ log["samples_outpainting"] = x_samples
1260
+
1261
+ if plot_progressive_rows:
1262
+ with ema_scope("Plotting Progressives"):
1263
+ img, progressives = self.progressive_denoising(c,
1264
+ shape=(self.channels, self.image_size, self.image_size),
1265
+ batch_size=N)
1266
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1267
+ log["progressive_row"] = prog_row
1268
+
1269
+ if return_keys:
1270
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1271
+ return log
1272
+ else:
1273
+ return {key: log[key] for key in return_keys}
1274
+ return log
1275
+
1276
+ def configure_optimizers(self):
1277
+ lr = self.learning_rate
1278
+ params = list(self.model.parameters())
1279
+ if self.cond_stage_trainable:
1280
+ print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1281
+ params = params + list(self.cond_stage_model.parameters())
1282
+ if self.learn_logvar:
1283
+ print('Diffusion model optimizing logvar')
1284
+ params.append(self.logvar)
1285
+ opt = torch.optim.AdamW(params, lr=lr)
1286
+ if self.use_scheduler:
1287
+ assert 'target' in self.scheduler_config
1288
+ scheduler = instantiate_from_config(self.scheduler_config)
1289
+
1290
+ print("Setting up LambdaLR scheduler...")
1291
+ scheduler = [
1292
+ {
1293
+ 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1294
+ 'interval': 'step',
1295
+ 'frequency': 1
1296
+ }]
1297
+ return [opt], scheduler
1298
+ return opt
1299
+
1300
+ @torch.no_grad()
1301
+ def to_rgb(self, x):
1302
+ x = x.float()
1303
+ if not hasattr(self, "colorize"):
1304
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1305
+ x = nn.functional.conv2d(x, weight=self.colorize)
1306
+ x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1307
+ return x
1308
+
1309
+
1310
+ class DiffusionWrapper(pl.LightningModule):
1311
+ def __init__(self, diff_model_config, conditioning_key):
1312
+ super().__init__()
1313
+ self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False)
1314
+ self.diffusion_model = instantiate_from_config(diff_model_config)
1315
+ self.conditioning_key = conditioning_key
1316
+ assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
1317
+
1318
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None):
1319
+ if self.conditioning_key is None:
1320
+ out = self.diffusion_model(x, t)
1321
+ elif self.conditioning_key == 'concat':
1322
+ xc = torch.cat([x] + c_concat, dim=1)
1323
+ out = self.diffusion_model(xc, t)
1324
+ elif self.conditioning_key == 'crossattn':
1325
+ if not self.sequential_cross_attn:
1326
+ cc = torch.cat(c_crossattn, 1)
1327
+ else:
1328
+ cc = c_crossattn
1329
+ out = self.diffusion_model(x, t, context=cc)
1330
+ elif self.conditioning_key == 'hybrid':
1331
+ xc = torch.cat([x] + c_concat, dim=1)
1332
+ cc = torch.cat(c_crossattn, 1)
1333
+ out = self.diffusion_model(xc, t, context=cc)
1334
+ elif self.conditioning_key == 'hybrid-adm':
1335
+ assert c_adm is not None
1336
+ xc = torch.cat([x] + c_concat, dim=1)
1337
+ cc = torch.cat(c_crossattn, 1)
1338
+ out = self.diffusion_model(xc, t, context=cc, y=c_adm)
1339
+ elif self.conditioning_key == 'crossattn-adm':
1340
+ assert c_adm is not None
1341
+ cc = torch.cat(c_crossattn, 1)
1342
+ out = self.diffusion_model(x, t, context=cc, y=c_adm)
1343
+ elif self.conditioning_key == 'adm':
1344
+ cc = c_crossattn[0]
1345
+ out = self.diffusion_model(x, t, y=cc)
1346
+ else:
1347
+ raise NotImplementedError()
1348
+
1349
+ return out
1350
+
1351
+
1352
+ class LatentUpscaleDiffusion(LatentDiffusion):
1353
+ def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs):
1354
+ super().__init__(*args, **kwargs)
1355
+ # assumes that neither the cond_stage nor the low_scale_model contain trainable params
1356
+ assert not self.cond_stage_trainable
1357
+ self.instantiate_low_stage(low_scale_config)
1358
+ self.low_scale_key = low_scale_key
1359
+ self.noise_level_key = noise_level_key
1360
+
1361
+ def instantiate_low_stage(self, config):
1362
+ model = instantiate_from_config(config)
1363
+ self.low_scale_model = model.eval()
1364
+ self.low_scale_model.train = disabled_train
1365
+ for param in self.low_scale_model.parameters():
1366
+ param.requires_grad = False
1367
+
1368
+ @torch.no_grad()
1369
+ def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False):
1370
+ if not log_mode:
1371
+ z, c = super().get_input(batch, k, force_c_encode=True, bs=bs)
1372
+ else:
1373
+ z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1374
+ force_c_encode=True, return_original_cond=True, bs=bs)
1375
+ x_low = batch[self.low_scale_key][:bs]
1376
+ x_low = rearrange(x_low, 'b h w c -> b c h w')
1377
+ x_low = x_low.to(memory_format=torch.contiguous_format).float()
1378
+ zx, noise_level = self.low_scale_model(x_low)
1379
+ if self.noise_level_key is not None:
1380
+ # get noise level from batch instead, e.g. when extracting a custom noise level for bsr
1381
+ raise NotImplementedError('TODO')
1382
+
1383
+ all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level}
1384
+ if log_mode:
1385
+ # TODO: maybe disable if too expensive
1386
+ x_low_rec = self.low_scale_model.decode(zx)
1387
+ return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level
1388
+ return z, all_conds
1389
+
1390
+ @torch.no_grad()
1391
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1392
+ plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True,
1393
+ unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True,
1394
+ **kwargs):
1395
+ ema_scope = self.ema_scope if use_ema_scope else nullcontext
1396
+ use_ddim = ddim_steps is not None
1397
+
1398
+ log = dict()
1399
+ z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N,
1400
+ log_mode=True)
1401
+ N = min(x.shape[0], N)
1402
+ n_row = min(x.shape[0], n_row)
1403
+ log["inputs"] = x
1404
+ log["reconstruction"] = xrec
1405
+ log["x_lr"] = x_low
1406
+ log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec
1407
+ if self.model.conditioning_key is not None:
1408
+ if hasattr(self.cond_stage_model, "decode"):
1409
+ xc = self.cond_stage_model.decode(c)
1410
+ log["conditioning"] = xc
1411
+ elif self.cond_stage_key in ["caption", "txt"]:
1412
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1413
+ log["conditioning"] = xc
1414
+ elif self.cond_stage_key in ['class_label', 'cls']:
1415
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1416
+ log['conditioning'] = xc
1417
+ elif isimage(xc):
1418
+ log["conditioning"] = xc
1419
+ if ismap(xc):
1420
+ log["original_conditioning"] = self.to_rgb(xc)
1421
+
1422
+ if plot_diffusion_rows:
1423
+ # get diffusion row
1424
+ diffusion_row = list()
1425
+ z_start = z[:n_row]
1426
+ for t in range(self.num_timesteps):
1427
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1428
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1429
+ t = t.to(self.device).long()
1430
+ noise = torch.randn_like(z_start)
1431
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1432
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1433
+
1434
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1435
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1436
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1437
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1438
+ log["diffusion_row"] = diffusion_grid
1439
+
1440
+ if sample:
1441
+ # get denoise row
1442
+ with ema_scope("Sampling"):
1443
+ samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1444
+ ddim_steps=ddim_steps, eta=ddim_eta)
1445
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1446
+ x_samples = self.decode_first_stage(samples)
1447
+ log["samples"] = x_samples
1448
+ if plot_denoise_rows:
1449
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1450
+ log["denoise_row"] = denoise_grid
1451
+
1452
+ if unconditional_guidance_scale > 1.0:
1453
+ uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1454
+ # TODO explore better "unconditional" choices for the other keys
1455
+ # maybe guide away from empty text label and highest noise level and maximally degraded zx?
1456
+ uc = dict()
1457
+ for k in c:
1458
+ if k == "c_crossattn":
1459
+ assert isinstance(c[k], list) and len(c[k]) == 1
1460
+ uc[k] = [uc_tmp]
1461
+ elif k == "c_adm": # todo: only run with text-based guidance?
1462
+ assert isinstance(c[k], torch.Tensor)
1463
+ #uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level
1464
+ uc[k] = c[k]
1465
+ elif isinstance(c[k], list):
1466
+ uc[k] = [c[k][i] for i in range(len(c[k]))]
1467
+ else:
1468
+ uc[k] = c[k]
1469
+
1470
+ with ema_scope("Sampling with classifier-free guidance"):
1471
+ samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
1472
+ ddim_steps=ddim_steps, eta=ddim_eta,
1473
+ unconditional_guidance_scale=unconditional_guidance_scale,
1474
+ unconditional_conditioning=uc,
1475
+ )
1476
+ x_samples_cfg = self.decode_first_stage(samples_cfg)
1477
+ log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1478
+
1479
+ if plot_progressive_rows:
1480
+ with ema_scope("Plotting Progressives"):
1481
+ img, progressives = self.progressive_denoising(c,
1482
+ shape=(self.channels, self.image_size, self.image_size),
1483
+ batch_size=N)
1484
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1485
+ log["progressive_row"] = prog_row
1486
+
1487
+ return log
1488
+
1489
+
1490
+ class LatentFinetuneDiffusion(LatentDiffusion):
1491
+ """
1492
+ Basis for different finetunas, such as inpainting or depth2image
1493
+ To disable finetuning mode, set finetune_keys to None
1494
+ """
1495
+
1496
+ def __init__(self,
1497
+ concat_keys: tuple,
1498
+ finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
1499
+ "model_ema.diffusion_modelinput_blocks00weight"
1500
+ ),
1501
+ keep_finetune_dims=4,
1502
+ # if model was trained without concat mode before and we would like to keep these channels
1503
+ c_concat_log_start=None, # to log reconstruction of c_concat codes
1504
+ c_concat_log_end=None,
1505
+ *args, **kwargs
1506
+ ):
1507
+ ckpt_path = kwargs.pop("ckpt_path", None)
1508
+ ignore_keys = kwargs.pop("ignore_keys", list())
1509
+ super().__init__(*args, **kwargs)
1510
+ self.finetune_keys = finetune_keys
1511
+ self.concat_keys = concat_keys
1512
+ self.keep_dims = keep_finetune_dims
1513
+ self.c_concat_log_start = c_concat_log_start
1514
+ self.c_concat_log_end = c_concat_log_end
1515
+ if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
1516
+ if exists(ckpt_path):
1517
+ self.init_from_ckpt(ckpt_path, ignore_keys)
1518
+
1519
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
1520
+ sd = torch.load(path, map_location="cpu")
1521
+ if "state_dict" in list(sd.keys()):
1522
+ sd = sd["state_dict"]
1523
+ keys = list(sd.keys())
1524
+ for k in keys:
1525
+ for ik in ignore_keys:
1526
+ if k.startswith(ik):
1527
+ print("Deleting key {} from state_dict.".format(k))
1528
+ del sd[k]
1529
+
1530
+ # make it explicit, finetune by including extra input channels
1531
+ if exists(self.finetune_keys) and k in self.finetune_keys:
1532
+ new_entry = None
1533
+ for name, param in self.named_parameters():
1534
+ if name in self.finetune_keys:
1535
+ print(
1536
+ f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
1537
+ new_entry = torch.zeros_like(param) # zero init
1538
+ assert exists(new_entry), 'did not find matching parameter to modify'
1539
+ new_entry[:, :self.keep_dims, ...] = sd[k]
1540
+ sd[k] = new_entry
1541
+
1542
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
1543
+ sd, strict=False)
1544
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
1545
+ if len(missing) > 0:
1546
+ print(f"Missing Keys: {missing}")
1547
+ if len(unexpected) > 0:
1548
+ print(f"Unexpected Keys: {unexpected}")
1549
+
1550
+ @torch.no_grad()
1551
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1552
+ quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1553
+ plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1554
+ use_ema_scope=True,
1555
+ **kwargs):
1556
+ ema_scope = self.ema_scope if use_ema_scope else nullcontext
1557
+ use_ddim = ddim_steps is not None
1558
+
1559
+ log = dict()
1560
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
1561
+ c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
1562
+ N = min(x.shape[0], N)
1563
+ n_row = min(x.shape[0], n_row)
1564
+ log["inputs"] = x
1565
+ log["reconstruction"] = xrec
1566
+ if self.model.conditioning_key is not None:
1567
+ if hasattr(self.cond_stage_model, "decode"):
1568
+ xc = self.cond_stage_model.decode(c)
1569
+ log["conditioning"] = xc
1570
+ elif self.cond_stage_key in ["caption", "txt"]:
1571
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
1572
+ log["conditioning"] = xc
1573
+ elif self.cond_stage_key in ['class_label', 'cls']:
1574
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
1575
+ log['conditioning'] = xc
1576
+ elif isimage(xc):
1577
+ log["conditioning"] = xc
1578
+ if ismap(xc):
1579
+ log["original_conditioning"] = self.to_rgb(xc)
1580
+
1581
+ if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
1582
+ log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
1583
+
1584
+ if plot_diffusion_rows:
1585
+ # get diffusion row
1586
+ diffusion_row = list()
1587
+ z_start = z[:n_row]
1588
+ for t in range(self.num_timesteps):
1589
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1590
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1591
+ t = t.to(self.device).long()
1592
+ noise = torch.randn_like(z_start)
1593
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1594
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1595
+
1596
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1597
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1598
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1599
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1600
+ log["diffusion_row"] = diffusion_grid
1601
+
1602
+ if sample:
1603
+ # get denoise row
1604
+ with ema_scope("Sampling"):
1605
+ samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1606
+ batch_size=N, ddim=use_ddim,
1607
+ ddim_steps=ddim_steps, eta=ddim_eta)
1608
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1609
+ x_samples = self.decode_first_stage(samples)
1610
+ log["samples"] = x_samples
1611
+ if plot_denoise_rows:
1612
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1613
+ log["denoise_row"] = denoise_grid
1614
+
1615
+ if unconditional_guidance_scale > 1.0:
1616
+ uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1617
+ uc_cat = c_cat
1618
+ uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
1619
+ with ema_scope("Sampling with classifier-free guidance"):
1620
+ samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1621
+ batch_size=N, ddim=use_ddim,
1622
+ ddim_steps=ddim_steps, eta=ddim_eta,
1623
+ unconditional_guidance_scale=unconditional_guidance_scale,
1624
+ unconditional_conditioning=uc_full,
1625
+ )
1626
+ x_samples_cfg = self.decode_first_stage(samples_cfg)
1627
+ log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1628
+
1629
+ return log
1630
+
1631
+
1632
+ class LatentInpaintDiffusion(LatentFinetuneDiffusion):
1633
+ """
1634
+ can either run as pure inpainting model (only concat mode) or with mixed conditionings,
1635
+ e.g. mask as concat and text via cross-attn.
1636
+ To disable finetuning mode, set finetune_keys to None
1637
+ """
1638
+
1639
+ def __init__(self,
1640
+ concat_keys=("mask", "masked_image"),
1641
+ masked_image_key="masked_image",
1642
+ *args, **kwargs
1643
+ ):
1644
+ super().__init__(concat_keys, *args, **kwargs)
1645
+ self.masked_image_key = masked_image_key
1646
+ assert self.masked_image_key in concat_keys
1647
+
1648
+ @torch.no_grad()
1649
+ def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1650
+ # note: restricted to non-trainable encoders currently
1651
+ assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
1652
+ z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1653
+ force_c_encode=True, return_original_cond=True, bs=bs)
1654
+
1655
+ assert exists(self.concat_keys)
1656
+ c_cat = list()
1657
+ for ck in self.concat_keys:
1658
+ cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1659
+ if bs is not None:
1660
+ cc = cc[:bs]
1661
+ cc = cc.to(self.device)
1662
+ bchw = z.shape
1663
+ if ck != self.masked_image_key:
1664
+ cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
1665
+ else:
1666
+ cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
1667
+ c_cat.append(cc)
1668
+ c_cat = torch.cat(c_cat, dim=1)
1669
+ all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1670
+ if return_first_stage_outputs:
1671
+ return z, all_conds, x, xrec, xc
1672
+ return z, all_conds
1673
+
1674
+ @torch.no_grad()
1675
+ def log_images(self, *args, **kwargs):
1676
+ log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
1677
+ log["masked_image"] = rearrange(args[0]["masked_image"],
1678
+ 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1679
+ return log
1680
+
1681
+
1682
+ class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion):
1683
+ """
1684
+ condition on monocular depth estimation
1685
+ """
1686
+
1687
+ def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs):
1688
+ super().__init__(concat_keys=concat_keys, *args, **kwargs)
1689
+ self.depth_model = instantiate_from_config(depth_stage_config)
1690
+ self.depth_stage_key = concat_keys[0]
1691
+
1692
+ @torch.no_grad()
1693
+ def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1694
+ # note: restricted to non-trainable encoders currently
1695
+ assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img'
1696
+ z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1697
+ force_c_encode=True, return_original_cond=True, bs=bs)
1698
+
1699
+ assert exists(self.concat_keys)
1700
+ assert len(self.concat_keys) == 1
1701
+ c_cat = list()
1702
+ for ck in self.concat_keys:
1703
+ cc = batch[ck]
1704
+ if bs is not None:
1705
+ cc = cc[:bs]
1706
+ cc = cc.to(self.device)
1707
+ cc = self.depth_model(cc)
1708
+ cc = torch.nn.functional.interpolate(
1709
+ cc,
1710
+ size=z.shape[2:],
1711
+ mode="bicubic",
1712
+ align_corners=False,
1713
+ )
1714
+
1715
+ depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
1716
+ keepdim=True)
1717
+ cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.
1718
+ c_cat.append(cc)
1719
+ c_cat = torch.cat(c_cat, dim=1)
1720
+ all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1721
+ if return_first_stage_outputs:
1722
+ return z, all_conds, x, xrec, xc
1723
+ return z, all_conds
1724
+
1725
+ @torch.no_grad()
1726
+ def log_images(self, *args, **kwargs):
1727
+ log = super().log_images(*args, **kwargs)
1728
+ depth = self.depth_model(args[0][self.depth_stage_key])
1729
+ depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \
1730
+ torch.amax(depth, dim=[1, 2, 3], keepdim=True)
1731
+ log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1.
1732
+ return log
1733
+
1734
+
1735
+ class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion):
1736
+ """
1737
+ condition on low-res image (and optionally on some spatial noise augmentation)
1738
+ """
1739
+ def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None,
1740
+ low_scale_config=None, low_scale_key=None, *args, **kwargs):
1741
+ super().__init__(concat_keys=concat_keys, *args, **kwargs)
1742
+ self.reshuffle_patch_size = reshuffle_patch_size
1743
+ self.low_scale_model = None
1744
+ if low_scale_config is not None:
1745
+ print("Initializing a low-scale model")
1746
+ assert exists(low_scale_key)
1747
+ self.instantiate_low_stage(low_scale_config)
1748
+ self.low_scale_key = low_scale_key
1749
+
1750
+ def instantiate_low_stage(self, config):
1751
+ model = instantiate_from_config(config)
1752
+ self.low_scale_model = model.eval()
1753
+ self.low_scale_model.train = disabled_train
1754
+ for param in self.low_scale_model.parameters():
1755
+ param.requires_grad = False
1756
+
1757
+ @torch.no_grad()
1758
+ def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1759
+ # note: restricted to non-trainable encoders currently
1760
+ assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft'
1761
+ z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1762
+ force_c_encode=True, return_original_cond=True, bs=bs)
1763
+
1764
+ assert exists(self.concat_keys)
1765
+ assert len(self.concat_keys) == 1
1766
+ # optionally make spatial noise_level here
1767
+ c_cat = list()
1768
+ noise_level = None
1769
+ for ck in self.concat_keys:
1770
+ cc = batch[ck]
1771
+ cc = rearrange(cc, 'b h w c -> b c h w')
1772
+ if exists(self.reshuffle_patch_size):
1773
+ assert isinstance(self.reshuffle_patch_size, int)
1774
+ cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',
1775
+ p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size)
1776
+ if bs is not None:
1777
+ cc = cc[:bs]
1778
+ cc = cc.to(self.device)
1779
+ if exists(self.low_scale_model) and ck == self.low_scale_key:
1780
+ cc, noise_level = self.low_scale_model(cc)
1781
+ c_cat.append(cc)
1782
+ c_cat = torch.cat(c_cat, dim=1)
1783
+ if exists(noise_level):
1784
+ all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level}
1785
+ else:
1786
+ all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1787
+ if return_first_stage_outputs:
1788
+ return z, all_conds, x, xrec, xc
1789
+ return z, all_conds
1790
+
1791
+ @torch.no_grad()
1792
+ def log_images(self, *args, **kwargs):
1793
+ log = super().log_images(*args, **kwargs)
1794
+ log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w')
1795
+ return log
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .sampler import DPMSolverSampler
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/dpm_solver.py ADDED
@@ -0,0 +1,1154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import math
4
+ from tqdm import tqdm
5
+
6
+
7
+ class NoiseScheduleVP:
8
+ def __init__(
9
+ self,
10
+ schedule='discrete',
11
+ betas=None,
12
+ alphas_cumprod=None,
13
+ continuous_beta_0=0.1,
14
+ continuous_beta_1=20.,
15
+ ):
16
+ """Create a wrapper class for the forward SDE (VP type).
17
+ ***
18
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
+ ***
21
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
22
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
23
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
24
+ log_alpha_t = self.marginal_log_mean_coeff(t)
25
+ sigma_t = self.marginal_std(t)
26
+ lambda_t = self.marginal_lambda(t)
27
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
28
+ t = self.inverse_lambda(lambda_t)
29
+ ===============================================================
30
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
31
+ 1. For discrete-time DPMs:
32
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
33
+ t_i = (i + 1) / N
34
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
35
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
36
+ Args:
37
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
38
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
39
+ Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
40
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
41
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
42
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
43
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
44
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
45
+ and
46
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
47
+ 2. For continuous-time DPMs:
48
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
49
+ schedule are the default settings in DDPM and improved-DDPM:
50
+ Args:
51
+ beta_min: A `float` number. The smallest beta for the linear schedule.
52
+ beta_max: A `float` number. The largest beta for the linear schedule.
53
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
54
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
55
+ T: A `float` number. The ending time of the forward process.
56
+ ===============================================================
57
+ Args:
58
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
59
+ 'linear' or 'cosine' for continuous-time DPMs.
60
+ Returns:
61
+ A wrapper object of the forward SDE (VP type).
62
+
63
+ ===============================================================
64
+ Example:
65
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
66
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
67
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
68
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
69
+ # For continuous-time DPMs (VPSDE), linear schedule:
70
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
71
+ """
72
+
73
+ if schedule not in ['discrete', 'linear', 'cosine']:
74
+ raise ValueError(
75
+ "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
76
+ schedule))
77
+
78
+ self.schedule = schedule
79
+ if schedule == 'discrete':
80
+ if betas is not None:
81
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
82
+ else:
83
+ assert alphas_cumprod is not None
84
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
85
+ self.total_N = len(log_alphas)
86
+ self.T = 1.
87
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
88
+ self.log_alpha_array = log_alphas.reshape((1, -1,))
89
+ else:
90
+ self.total_N = 1000
91
+ self.beta_0 = continuous_beta_0
92
+ self.beta_1 = continuous_beta_1
93
+ self.cosine_s = 0.008
94
+ self.cosine_beta_max = 999.
95
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
96
+ 1. + self.cosine_s) / math.pi - self.cosine_s
97
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
98
+ self.schedule = schedule
99
+ if schedule == 'cosine':
100
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
101
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
102
+ self.T = 0.9946
103
+ else:
104
+ self.T = 1.
105
+
106
+ def marginal_log_mean_coeff(self, t):
107
+ """
108
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
109
+ """
110
+ if self.schedule == 'discrete':
111
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
112
+ self.log_alpha_array.to(t.device)).reshape((-1))
113
+ elif self.schedule == 'linear':
114
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
115
+ elif self.schedule == 'cosine':
116
+ log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
117
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
118
+ return log_alpha_t
119
+
120
+ def marginal_alpha(self, t):
121
+ """
122
+ Compute alpha_t of a given continuous-time label t in [0, T].
123
+ """
124
+ return torch.exp(self.marginal_log_mean_coeff(t))
125
+
126
+ def marginal_std(self, t):
127
+ """
128
+ Compute sigma_t of a given continuous-time label t in [0, T].
129
+ """
130
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
131
+
132
+ def marginal_lambda(self, t):
133
+ """
134
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
135
+ """
136
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
137
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
138
+ return log_mean_coeff - log_std
139
+
140
+ def inverse_lambda(self, lamb):
141
+ """
142
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
143
+ """
144
+ if self.schedule == 'linear':
145
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
146
+ Delta = self.beta_0 ** 2 + tmp
147
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
148
+ elif self.schedule == 'discrete':
149
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
150
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
151
+ torch.flip(self.t_array.to(lamb.device), [1]))
152
+ return t.reshape((-1,))
153
+ else:
154
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
155
+ t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
156
+ 1. + self.cosine_s) / math.pi - self.cosine_s
157
+ t = t_fn(log_alpha)
158
+ return t
159
+
160
+
161
+ def model_wrapper(
162
+ model,
163
+ noise_schedule,
164
+ model_type="noise",
165
+ model_kwargs={},
166
+ guidance_type="uncond",
167
+ condition=None,
168
+ unconditional_condition=None,
169
+ guidance_scale=1.,
170
+ classifier_fn=None,
171
+ classifier_kwargs={},
172
+ ):
173
+ """Create a wrapper function for the noise prediction model.
174
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
175
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
176
+ We support four types of the diffusion model by setting `model_type`:
177
+ 1. "noise": noise prediction model. (Trained by predicting noise).
178
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
179
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
180
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
181
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
182
+ arXiv preprint arXiv:2202.00512 (2022).
183
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
184
+ arXiv preprint arXiv:2210.02303 (2022).
185
+
186
+ 4. "score": marginal score function. (Trained by denoising score matching).
187
+ Note that the score function and the noise prediction model follows a simple relationship:
188
+ ```
189
+ noise(x_t, t) = -sigma_t * score(x_t, t)
190
+ ```
191
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
192
+ 1. "uncond": unconditional sampling by DPMs.
193
+ The input `model` has the following format:
194
+ ``
195
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
196
+ ``
197
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
198
+ The input `model` has the following format:
199
+ ``
200
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
201
+ ``
202
+ The input `classifier_fn` has the following format:
203
+ ``
204
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
205
+ ``
206
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
207
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
208
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
209
+ The input `model` has the following format:
210
+ ``
211
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
212
+ ``
213
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
214
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
215
+ arXiv preprint arXiv:2207.12598 (2022).
216
+
217
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
218
+ or continuous-time labels (i.e. epsilon to T).
219
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
220
+ ``
221
+ def model_fn(x, t_continuous) -> noise:
222
+ t_input = get_model_input_time(t_continuous)
223
+ return noise_pred(model, x, t_input, **model_kwargs)
224
+ ``
225
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
226
+ ===============================================================
227
+ Args:
228
+ model: A diffusion model with the corresponding format described above.
229
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
230
+ model_type: A `str`. The parameterization type of the diffusion model.
231
+ "noise" or "x_start" or "v" or "score".
232
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
233
+ guidance_type: A `str`. The type of the guidance for sampling.
234
+ "uncond" or "classifier" or "classifier-free".
235
+ condition: A pytorch tensor. The condition for the guided sampling.
236
+ Only used for "classifier" or "classifier-free" guidance type.
237
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
238
+ Only used for "classifier-free" guidance type.
239
+ guidance_scale: A `float`. The scale for the guided sampling.
240
+ classifier_fn: A classifier function. Only used for the classifier guidance.
241
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
242
+ Returns:
243
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
244
+ """
245
+
246
+ def get_model_input_time(t_continuous):
247
+ """
248
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
249
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
250
+ For continuous-time DPMs, we just use `t_continuous`.
251
+ """
252
+ if noise_schedule.schedule == 'discrete':
253
+ return (t_continuous - 1. / noise_schedule.total_N) * 1000.
254
+ else:
255
+ return t_continuous
256
+
257
+ def noise_pred_fn(x, t_continuous, cond=None):
258
+ if t_continuous.reshape((-1,)).shape[0] == 1:
259
+ t_continuous = t_continuous.expand((x.shape[0]))
260
+ t_input = get_model_input_time(t_continuous)
261
+ if cond is None:
262
+ output = model(x, t_input, **model_kwargs)
263
+ else:
264
+ output = model(x, t_input, cond, **model_kwargs)
265
+ if model_type == "noise":
266
+ return output
267
+ elif model_type == "x_start":
268
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
269
+ dims = x.dim()
270
+ return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
271
+ elif model_type == "v":
272
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
273
+ dims = x.dim()
274
+ return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
275
+ elif model_type == "score":
276
+ sigma_t = noise_schedule.marginal_std(t_continuous)
277
+ dims = x.dim()
278
+ return -expand_dims(sigma_t, dims) * output
279
+
280
+ def cond_grad_fn(x, t_input):
281
+ """
282
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
283
+ """
284
+ with torch.enable_grad():
285
+ x_in = x.detach().requires_grad_(True)
286
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
287
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
288
+
289
+ def model_fn(x, t_continuous):
290
+ """
291
+ The noise predicition model function that is used for DPM-Solver.
292
+ """
293
+ if t_continuous.reshape((-1,)).shape[0] == 1:
294
+ t_continuous = t_continuous.expand((x.shape[0]))
295
+ if guidance_type == "uncond":
296
+ return noise_pred_fn(x, t_continuous)
297
+ elif guidance_type == "classifier":
298
+ assert classifier_fn is not None
299
+ t_input = get_model_input_time(t_continuous)
300
+ cond_grad = cond_grad_fn(x, t_input)
301
+ sigma_t = noise_schedule.marginal_std(t_continuous)
302
+ noise = noise_pred_fn(x, t_continuous)
303
+ return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
304
+ elif guidance_type == "classifier-free":
305
+ if guidance_scale == 1. or unconditional_condition is None:
306
+ return noise_pred_fn(x, t_continuous, cond=condition)
307
+ else:
308
+ x_in = torch.cat([x] * 2)
309
+ t_in = torch.cat([t_continuous] * 2)
310
+ c_in = torch.cat([unconditional_condition, condition])
311
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
312
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
313
+
314
+ assert model_type in ["noise", "x_start", "v"]
315
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
316
+ return model_fn
317
+
318
+
319
+ class DPM_Solver:
320
+ def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
321
+ """Construct a DPM-Solver.
322
+ We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
323
+ If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
324
+ If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
325
+ In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
326
+ The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
327
+ Args:
328
+ model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
329
+ ``
330
+ def model_fn(x, t_continuous):
331
+ return noise
332
+ ``
333
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
334
+ predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
335
+ thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
336
+ max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
337
+
338
+ [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
339
+ """
340
+ self.model = model_fn
341
+ self.noise_schedule = noise_schedule
342
+ self.predict_x0 = predict_x0
343
+ self.thresholding = thresholding
344
+ self.max_val = max_val
345
+
346
+ def noise_prediction_fn(self, x, t):
347
+ """
348
+ Return the noise prediction model.
349
+ """
350
+ return self.model(x, t)
351
+
352
+ def data_prediction_fn(self, x, t):
353
+ """
354
+ Return the data prediction model (with thresholding).
355
+ """
356
+ noise = self.noise_prediction_fn(x, t)
357
+ dims = x.dim()
358
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
359
+ x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
360
+ if self.thresholding:
361
+ p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
362
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
363
+ s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
364
+ x0 = torch.clamp(x0, -s, s) / s
365
+ return x0
366
+
367
+ def model_fn(self, x, t):
368
+ """
369
+ Convert the model to the noise prediction model or the data prediction model.
370
+ """
371
+ if self.predict_x0:
372
+ return self.data_prediction_fn(x, t)
373
+ else:
374
+ return self.noise_prediction_fn(x, t)
375
+
376
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
377
+ """Compute the intermediate time steps for sampling.
378
+ Args:
379
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
380
+ - 'logSNR': uniform logSNR for the time steps.
381
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
382
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
383
+ t_T: A `float`. The starting time of the sampling (default is T).
384
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
385
+ N: A `int`. The total number of the spacing of the time steps.
386
+ device: A torch device.
387
+ Returns:
388
+ A pytorch tensor of the time steps, with the shape (N + 1,).
389
+ """
390
+ if skip_type == 'logSNR':
391
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
392
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
393
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
394
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
395
+ elif skip_type == 'time_uniform':
396
+ return torch.linspace(t_T, t_0, N + 1).to(device)
397
+ elif skip_type == 'time_quadratic':
398
+ t_order = 2
399
+ t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
400
+ return t
401
+ else:
402
+ raise ValueError(
403
+ "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
404
+
405
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
406
+ """
407
+ Get the order of each step for sampling by the singlestep DPM-Solver.
408
+ We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
409
+ Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
410
+ - If order == 1:
411
+ We take `steps` of DPM-Solver-1 (i.e. DDIM).
412
+ - If order == 2:
413
+ - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
414
+ - If steps % 2 == 0, we use K steps of DPM-Solver-2.
415
+ - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
416
+ - If order == 3:
417
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
418
+ - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
419
+ - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
420
+ - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
421
+ ============================================
422
+ Args:
423
+ order: A `int`. The max order for the solver (2 or 3).
424
+ steps: A `int`. The total number of function evaluations (NFE).
425
+ skip_type: A `str`. The type for the spacing of the time steps. We support three types:
426
+ - 'logSNR': uniform logSNR for the time steps.
427
+ - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
428
+ - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
429
+ t_T: A `float`. The starting time of the sampling (default is T).
430
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
431
+ device: A torch device.
432
+ Returns:
433
+ orders: A list of the solver order of each step.
434
+ """
435
+ if order == 3:
436
+ K = steps // 3 + 1
437
+ if steps % 3 == 0:
438
+ orders = [3, ] * (K - 2) + [2, 1]
439
+ elif steps % 3 == 1:
440
+ orders = [3, ] * (K - 1) + [1]
441
+ else:
442
+ orders = [3, ] * (K - 1) + [2]
443
+ elif order == 2:
444
+ if steps % 2 == 0:
445
+ K = steps // 2
446
+ orders = [2, ] * K
447
+ else:
448
+ K = steps // 2 + 1
449
+ orders = [2, ] * (K - 1) + [1]
450
+ elif order == 1:
451
+ K = 1
452
+ orders = [1, ] * steps
453
+ else:
454
+ raise ValueError("'order' must be '1' or '2' or '3'.")
455
+ if skip_type == 'logSNR':
456
+ # To reproduce the results in DPM-Solver paper
457
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
458
+ else:
459
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
460
+ torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
461
+ return timesteps_outer, orders
462
+
463
+ def denoise_to_zero_fn(self, x, s):
464
+ """
465
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
466
+ """
467
+ return self.data_prediction_fn(x, s)
468
+
469
+ def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
470
+ """
471
+ DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
472
+ Args:
473
+ x: A pytorch tensor. The initial value at time `s`.
474
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
475
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
476
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
477
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
478
+ return_intermediate: A `bool`. If true, also return the model value at time `s`.
479
+ Returns:
480
+ x_t: A pytorch tensor. The approximated solution at time `t`.
481
+ """
482
+ ns = self.noise_schedule
483
+ dims = x.dim()
484
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
485
+ h = lambda_t - lambda_s
486
+ log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
487
+ sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
488
+ alpha_t = torch.exp(log_alpha_t)
489
+
490
+ if self.predict_x0:
491
+ phi_1 = torch.expm1(-h)
492
+ if model_s is None:
493
+ model_s = self.model_fn(x, s)
494
+ x_t = (
495
+ expand_dims(sigma_t / sigma_s, dims) * x
496
+ - expand_dims(alpha_t * phi_1, dims) * model_s
497
+ )
498
+ if return_intermediate:
499
+ return x_t, {'model_s': model_s}
500
+ else:
501
+ return x_t
502
+ else:
503
+ phi_1 = torch.expm1(h)
504
+ if model_s is None:
505
+ model_s = self.model_fn(x, s)
506
+ x_t = (
507
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
508
+ - expand_dims(sigma_t * phi_1, dims) * model_s
509
+ )
510
+ if return_intermediate:
511
+ return x_t, {'model_s': model_s}
512
+ else:
513
+ return x_t
514
+
515
+ def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
516
+ solver_type='dpm_solver'):
517
+ """
518
+ Singlestep solver DPM-Solver-2 from time `s` to time `t`.
519
+ Args:
520
+ x: A pytorch tensor. The initial value at time `s`.
521
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
522
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
523
+ r1: A `float`. The hyperparameter of the second-order solver.
524
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
525
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
526
+ return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
527
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
528
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
529
+ Returns:
530
+ x_t: A pytorch tensor. The approximated solution at time `t`.
531
+ """
532
+ if solver_type not in ['dpm_solver', 'taylor']:
533
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
534
+ if r1 is None:
535
+ r1 = 0.5
536
+ ns = self.noise_schedule
537
+ dims = x.dim()
538
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
539
+ h = lambda_t - lambda_s
540
+ lambda_s1 = lambda_s + r1 * h
541
+ s1 = ns.inverse_lambda(lambda_s1)
542
+ log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
543
+ s1), ns.marginal_log_mean_coeff(t)
544
+ sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
545
+ alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
546
+
547
+ if self.predict_x0:
548
+ phi_11 = torch.expm1(-r1 * h)
549
+ phi_1 = torch.expm1(-h)
550
+
551
+ if model_s is None:
552
+ model_s = self.model_fn(x, s)
553
+ x_s1 = (
554
+ expand_dims(sigma_s1 / sigma_s, dims) * x
555
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
556
+ )
557
+ model_s1 = self.model_fn(x_s1, s1)
558
+ if solver_type == 'dpm_solver':
559
+ x_t = (
560
+ expand_dims(sigma_t / sigma_s, dims) * x
561
+ - expand_dims(alpha_t * phi_1, dims) * model_s
562
+ - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
563
+ )
564
+ elif solver_type == 'taylor':
565
+ x_t = (
566
+ expand_dims(sigma_t / sigma_s, dims) * x
567
+ - expand_dims(alpha_t * phi_1, dims) * model_s
568
+ + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
569
+ model_s1 - model_s)
570
+ )
571
+ else:
572
+ phi_11 = torch.expm1(r1 * h)
573
+ phi_1 = torch.expm1(h)
574
+
575
+ if model_s is None:
576
+ model_s = self.model_fn(x, s)
577
+ x_s1 = (
578
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
579
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
580
+ )
581
+ model_s1 = self.model_fn(x_s1, s1)
582
+ if solver_type == 'dpm_solver':
583
+ x_t = (
584
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
585
+ - expand_dims(sigma_t * phi_1, dims) * model_s
586
+ - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
587
+ )
588
+ elif solver_type == 'taylor':
589
+ x_t = (
590
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
591
+ - expand_dims(sigma_t * phi_1, dims) * model_s
592
+ - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
593
+ )
594
+ if return_intermediate:
595
+ return x_t, {'model_s': model_s, 'model_s1': model_s1}
596
+ else:
597
+ return x_t
598
+
599
+ def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
600
+ return_intermediate=False, solver_type='dpm_solver'):
601
+ """
602
+ Singlestep solver DPM-Solver-3 from time `s` to time `t`.
603
+ Args:
604
+ x: A pytorch tensor. The initial value at time `s`.
605
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
606
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
607
+ r1: A `float`. The hyperparameter of the third-order solver.
608
+ r2: A `float`. The hyperparameter of the third-order solver.
609
+ model_s: A pytorch tensor. The model function evaluated at time `s`.
610
+ If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
611
+ model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
612
+ If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
613
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
614
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
615
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
616
+ Returns:
617
+ x_t: A pytorch tensor. The approximated solution at time `t`.
618
+ """
619
+ if solver_type not in ['dpm_solver', 'taylor']:
620
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
621
+ if r1 is None:
622
+ r1 = 1. / 3.
623
+ if r2 is None:
624
+ r2 = 2. / 3.
625
+ ns = self.noise_schedule
626
+ dims = x.dim()
627
+ lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
628
+ h = lambda_t - lambda_s
629
+ lambda_s1 = lambda_s + r1 * h
630
+ lambda_s2 = lambda_s + r2 * h
631
+ s1 = ns.inverse_lambda(lambda_s1)
632
+ s2 = ns.inverse_lambda(lambda_s2)
633
+ log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
634
+ s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
635
+ sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
636
+ s2), ns.marginal_std(t)
637
+ alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
638
+
639
+ if self.predict_x0:
640
+ phi_11 = torch.expm1(-r1 * h)
641
+ phi_12 = torch.expm1(-r2 * h)
642
+ phi_1 = torch.expm1(-h)
643
+ phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
644
+ phi_2 = phi_1 / h + 1.
645
+ phi_3 = phi_2 / h - 0.5
646
+
647
+ if model_s is None:
648
+ model_s = self.model_fn(x, s)
649
+ if model_s1 is None:
650
+ x_s1 = (
651
+ expand_dims(sigma_s1 / sigma_s, dims) * x
652
+ - expand_dims(alpha_s1 * phi_11, dims) * model_s
653
+ )
654
+ model_s1 = self.model_fn(x_s1, s1)
655
+ x_s2 = (
656
+ expand_dims(sigma_s2 / sigma_s, dims) * x
657
+ - expand_dims(alpha_s2 * phi_12, dims) * model_s
658
+ + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
659
+ )
660
+ model_s2 = self.model_fn(x_s2, s2)
661
+ if solver_type == 'dpm_solver':
662
+ x_t = (
663
+ expand_dims(sigma_t / sigma_s, dims) * x
664
+ - expand_dims(alpha_t * phi_1, dims) * model_s
665
+ + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
666
+ )
667
+ elif solver_type == 'taylor':
668
+ D1_0 = (1. / r1) * (model_s1 - model_s)
669
+ D1_1 = (1. / r2) * (model_s2 - model_s)
670
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
671
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
672
+ x_t = (
673
+ expand_dims(sigma_t / sigma_s, dims) * x
674
+ - expand_dims(alpha_t * phi_1, dims) * model_s
675
+ + expand_dims(alpha_t * phi_2, dims) * D1
676
+ - expand_dims(alpha_t * phi_3, dims) * D2
677
+ )
678
+ else:
679
+ phi_11 = torch.expm1(r1 * h)
680
+ phi_12 = torch.expm1(r2 * h)
681
+ phi_1 = torch.expm1(h)
682
+ phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
683
+ phi_2 = phi_1 / h - 1.
684
+ phi_3 = phi_2 / h - 0.5
685
+
686
+ if model_s is None:
687
+ model_s = self.model_fn(x, s)
688
+ if model_s1 is None:
689
+ x_s1 = (
690
+ expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
691
+ - expand_dims(sigma_s1 * phi_11, dims) * model_s
692
+ )
693
+ model_s1 = self.model_fn(x_s1, s1)
694
+ x_s2 = (
695
+ expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
696
+ - expand_dims(sigma_s2 * phi_12, dims) * model_s
697
+ - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
698
+ )
699
+ model_s2 = self.model_fn(x_s2, s2)
700
+ if solver_type == 'dpm_solver':
701
+ x_t = (
702
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
703
+ - expand_dims(sigma_t * phi_1, dims) * model_s
704
+ - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
705
+ )
706
+ elif solver_type == 'taylor':
707
+ D1_0 = (1. / r1) * (model_s1 - model_s)
708
+ D1_1 = (1. / r2) * (model_s2 - model_s)
709
+ D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
710
+ D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
711
+ x_t = (
712
+ expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
713
+ - expand_dims(sigma_t * phi_1, dims) * model_s
714
+ - expand_dims(sigma_t * phi_2, dims) * D1
715
+ - expand_dims(sigma_t * phi_3, dims) * D2
716
+ )
717
+
718
+ if return_intermediate:
719
+ return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
720
+ else:
721
+ return x_t
722
+
723
+ def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
724
+ """
725
+ Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
726
+ Args:
727
+ x: A pytorch tensor. The initial value at time `s`.
728
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
729
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
730
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
731
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
732
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
733
+ Returns:
734
+ x_t: A pytorch tensor. The approximated solution at time `t`.
735
+ """
736
+ if solver_type not in ['dpm_solver', 'taylor']:
737
+ raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
738
+ ns = self.noise_schedule
739
+ dims = x.dim()
740
+ model_prev_1, model_prev_0 = model_prev_list
741
+ t_prev_1, t_prev_0 = t_prev_list
742
+ lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
743
+ t_prev_0), ns.marginal_lambda(t)
744
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
745
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
746
+ alpha_t = torch.exp(log_alpha_t)
747
+
748
+ h_0 = lambda_prev_0 - lambda_prev_1
749
+ h = lambda_t - lambda_prev_0
750
+ r0 = h_0 / h
751
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
752
+ if self.predict_x0:
753
+ if solver_type == 'dpm_solver':
754
+ x_t = (
755
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
756
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
757
+ - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
758
+ )
759
+ elif solver_type == 'taylor':
760
+ x_t = (
761
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
762
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
763
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
764
+ )
765
+ else:
766
+ if solver_type == 'dpm_solver':
767
+ x_t = (
768
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
769
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
770
+ - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
771
+ )
772
+ elif solver_type == 'taylor':
773
+ x_t = (
774
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
775
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
776
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
777
+ )
778
+ return x_t
779
+
780
+ def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
781
+ """
782
+ Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
783
+ Args:
784
+ x: A pytorch tensor. The initial value at time `s`.
785
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
786
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
787
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
788
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
789
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
790
+ Returns:
791
+ x_t: A pytorch tensor. The approximated solution at time `t`.
792
+ """
793
+ ns = self.noise_schedule
794
+ dims = x.dim()
795
+ model_prev_2, model_prev_1, model_prev_0 = model_prev_list
796
+ t_prev_2, t_prev_1, t_prev_0 = t_prev_list
797
+ lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
798
+ t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
799
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
800
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
801
+ alpha_t = torch.exp(log_alpha_t)
802
+
803
+ h_1 = lambda_prev_1 - lambda_prev_2
804
+ h_0 = lambda_prev_0 - lambda_prev_1
805
+ h = lambda_t - lambda_prev_0
806
+ r0, r1 = h_0 / h, h_1 / h
807
+ D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
808
+ D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
809
+ D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
810
+ D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
811
+ if self.predict_x0:
812
+ x_t = (
813
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
814
+ - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
815
+ + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
816
+ - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
817
+ )
818
+ else:
819
+ x_t = (
820
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
821
+ - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
822
+ - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
823
+ - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
824
+ )
825
+ return x_t
826
+
827
+ def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
828
+ r2=None):
829
+ """
830
+ Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
831
+ Args:
832
+ x: A pytorch tensor. The initial value at time `s`.
833
+ s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
834
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
835
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
836
+ return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
837
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
838
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
839
+ r1: A `float`. The hyperparameter of the second-order or third-order solver.
840
+ r2: A `float`. The hyperparameter of the third-order solver.
841
+ Returns:
842
+ x_t: A pytorch tensor. The approximated solution at time `t`.
843
+ """
844
+ if order == 1:
845
+ return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
846
+ elif order == 2:
847
+ return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
848
+ solver_type=solver_type, r1=r1)
849
+ elif order == 3:
850
+ return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
851
+ solver_type=solver_type, r1=r1, r2=r2)
852
+ else:
853
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
854
+
855
+ def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
856
+ """
857
+ Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
858
+ Args:
859
+ x: A pytorch tensor. The initial value at time `s`.
860
+ model_prev_list: A list of pytorch tensor. The previous computed model values.
861
+ t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
862
+ t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
863
+ order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
864
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
865
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
866
+ Returns:
867
+ x_t: A pytorch tensor. The approximated solution at time `t`.
868
+ """
869
+ if order == 1:
870
+ return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
871
+ elif order == 2:
872
+ return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
873
+ elif order == 3:
874
+ return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
875
+ else:
876
+ raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
877
+
878
+ def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
879
+ solver_type='dpm_solver'):
880
+ """
881
+ The adaptive step size solver based on singlestep DPM-Solver.
882
+ Args:
883
+ x: A pytorch tensor. The initial value at time `t_T`.
884
+ order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
885
+ t_T: A `float`. The starting time of the sampling (default is T).
886
+ t_0: A `float`. The ending time of the sampling (default is epsilon).
887
+ h_init: A `float`. The initial step size (for logSNR).
888
+ atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
889
+ rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
890
+ theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
891
+ t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
892
+ current time and `t_0` is less than `t_err`. The default setting is 1e-5.
893
+ solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
894
+ The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
895
+ Returns:
896
+ x_0: A pytorch tensor. The approximated solution at time `t_0`.
897
+ [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
898
+ """
899
+ ns = self.noise_schedule
900
+ s = t_T * torch.ones((x.shape[0],)).to(x)
901
+ lambda_s = ns.marginal_lambda(s)
902
+ lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
903
+ h = h_init * torch.ones_like(s).to(x)
904
+ x_prev = x
905
+ nfe = 0
906
+ if order == 2:
907
+ r1 = 0.5
908
+ lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
909
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
910
+ solver_type=solver_type,
911
+ **kwargs)
912
+ elif order == 3:
913
+ r1, r2 = 1. / 3., 2. / 3.
914
+ lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
915
+ return_intermediate=True,
916
+ solver_type=solver_type)
917
+ higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
918
+ solver_type=solver_type,
919
+ **kwargs)
920
+ else:
921
+ raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
922
+ while torch.abs((s - t_0)).mean() > t_err:
923
+ t = ns.inverse_lambda(lambda_s + h)
924
+ x_lower, lower_noise_kwargs = lower_update(x, s, t)
925
+ x_higher = higher_update(x, s, t, **lower_noise_kwargs)
926
+ delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
927
+ norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
928
+ E = norm_fn((x_higher - x_lower) / delta).max()
929
+ if torch.all(E <= 1.):
930
+ x = x_higher
931
+ s = t
932
+ x_prev = x_lower
933
+ lambda_s = ns.marginal_lambda(s)
934
+ h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
935
+ nfe += order
936
+ print('adaptive solver nfe', nfe)
937
+ return x
938
+
939
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
940
+ method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
941
+ atol=0.0078, rtol=0.05,
942
+ ):
943
+ """
944
+ Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
945
+ =====================================================
946
+ We support the following algorithms for both noise prediction model and data prediction model:
947
+ - 'singlestep':
948
+ Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
949
+ We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
950
+ The total number of function evaluations (NFE) == `steps`.
951
+ Given a fixed NFE == `steps`, the sampling procedure is:
952
+ - If `order` == 1:
953
+ - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
954
+ - If `order` == 2:
955
+ - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
956
+ - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
957
+ - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
958
+ - If `order` == 3:
959
+ - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
960
+ - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
961
+ - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
962
+ - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
963
+ - 'multistep':
964
+ Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
965
+ We initialize the first `order` values by lower order multistep solvers.
966
+ Given a fixed NFE == `steps`, the sampling procedure is:
967
+ Denote K = steps.
968
+ - If `order` == 1:
969
+ - We use K steps of DPM-Solver-1 (i.e. DDIM).
970
+ - If `order` == 2:
971
+ - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
972
+ - If `order` == 3:
973
+ - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
974
+ - 'singlestep_fixed':
975
+ Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
976
+ We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
977
+ - 'adaptive':
978
+ Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
979
+ We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
980
+ You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
981
+ (NFE) and the sample quality.
982
+ - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
983
+ - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
984
+ =====================================================
985
+ Some advices for choosing the algorithm:
986
+ - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
987
+ Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
988
+ e.g.
989
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
990
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
991
+ skip_type='time_uniform', method='singlestep')
992
+ - For **guided sampling with large guidance scale** by DPMs:
993
+ Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
994
+ e.g.
995
+ >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
996
+ >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
997
+ skip_type='time_uniform', method='multistep')
998
+ We support three types of `skip_type`:
999
+ - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
1000
+ - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
1001
+ - 'time_quadratic': quadratic time for the time steps.
1002
+ =====================================================
1003
+ Args:
1004
+ x: A pytorch tensor. The initial value at time `t_start`
1005
+ e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
1006
+ steps: A `int`. The total number of function evaluations (NFE).
1007
+ t_start: A `float`. The starting time of the sampling.
1008
+ If `T` is None, we use self.noise_schedule.T (default is 1.0).
1009
+ t_end: A `float`. The ending time of the sampling.
1010
+ If `t_end` is None, we use 1. / self.noise_schedule.total_N.
1011
+ e.g. if total_N == 1000, we have `t_end` == 1e-3.
1012
+ For discrete-time DPMs:
1013
+ - We recommend `t_end` == 1. / self.noise_schedule.total_N.
1014
+ For continuous-time DPMs:
1015
+ - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
1016
+ order: A `int`. The order of DPM-Solver.
1017
+ skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
1018
+ method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
1019
+ denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
1020
+ Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
1021
+ This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
1022
+ score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
1023
+ for diffusion models sampling by diffusion SDEs for low-resolutional images
1024
+ (such as CIFAR-10). However, we observed that such trick does not matter for
1025
+ high-resolutional images. As it needs an additional NFE, we do not recommend
1026
+ it for high-resolutional images.
1027
+ lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
1028
+ Only valid for `method=multistep` and `steps < 15`. We empirically find that
1029
+ this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
1030
+ (especially for steps <= 10). So we recommend to set it to be `True`.
1031
+ solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
1032
+ atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1033
+ rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
1034
+ Returns:
1035
+ x_end: A pytorch tensor. The approximated solution at time `t_end`.
1036
+ """
1037
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
1038
+ t_T = self.noise_schedule.T if t_start is None else t_start
1039
+ device = x.device
1040
+ if method == 'adaptive':
1041
+ with torch.no_grad():
1042
+ x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
1043
+ solver_type=solver_type)
1044
+ elif method == 'multistep':
1045
+ assert steps >= order
1046
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
1047
+ assert timesteps.shape[0] - 1 == steps
1048
+ with torch.no_grad():
1049
+ vec_t = timesteps[0].expand((x.shape[0]))
1050
+ model_prev_list = [self.model_fn(x, vec_t)]
1051
+ t_prev_list = [vec_t]
1052
+ # Init the first `order` values by lower order multistep DPM-Solver.
1053
+ for init_order in tqdm(range(1, order), desc="DPM init order"):
1054
+ vec_t = timesteps[init_order].expand(x.shape[0])
1055
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
1056
+ solver_type=solver_type)
1057
+ model_prev_list.append(self.model_fn(x, vec_t))
1058
+ t_prev_list.append(vec_t)
1059
+ # Compute the remaining values by `order`-th order multistep DPM-Solver.
1060
+ for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
1061
+ vec_t = timesteps[step].expand(x.shape[0])
1062
+ if lower_order_final and steps < 15:
1063
+ step_order = min(order, steps + 1 - step)
1064
+ else:
1065
+ step_order = order
1066
+ x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
1067
+ solver_type=solver_type)
1068
+ for i in range(order - 1):
1069
+ t_prev_list[i] = t_prev_list[i + 1]
1070
+ model_prev_list[i] = model_prev_list[i + 1]
1071
+ t_prev_list[-1] = vec_t
1072
+ # We do not need to evaluate the final model value.
1073
+ if step < steps:
1074
+ model_prev_list[-1] = self.model_fn(x, vec_t)
1075
+ elif method in ['singlestep', 'singlestep_fixed']:
1076
+ if method == 'singlestep':
1077
+ timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
1078
+ skip_type=skip_type,
1079
+ t_T=t_T, t_0=t_0,
1080
+ device=device)
1081
+ elif method == 'singlestep_fixed':
1082
+ K = steps // order
1083
+ orders = [order, ] * K
1084
+ timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
1085
+ for i, order in enumerate(orders):
1086
+ t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
1087
+ timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
1088
+ N=order, device=device)
1089
+ lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
1090
+ vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
1091
+ h = lambda_inner[-1] - lambda_inner[0]
1092
+ r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
1093
+ r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
1094
+ x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
1095
+ if denoise_to_zero:
1096
+ x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
1097
+ return x
1098
+
1099
+
1100
+ #############################################################
1101
+ # other utility functions
1102
+ #############################################################
1103
+
1104
+ def interpolate_fn(x, xp, yp):
1105
+ """
1106
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
1107
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
1108
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
1109
+ Args:
1110
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
1111
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
1112
+ yp: PyTorch tensor with shape [C, K].
1113
+ Returns:
1114
+ The function values f(x), with shape [N, C].
1115
+ """
1116
+ N, K = x.shape[0], xp.shape[1]
1117
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
1118
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
1119
+ x_idx = torch.argmin(x_indices, dim=2)
1120
+ cand_start_idx = x_idx - 1
1121
+ start_idx = torch.where(
1122
+ torch.eq(x_idx, 0),
1123
+ torch.tensor(1, device=x.device),
1124
+ torch.where(
1125
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1126
+ ),
1127
+ )
1128
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
1129
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
1130
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
1131
+ start_idx2 = torch.where(
1132
+ torch.eq(x_idx, 0),
1133
+ torch.tensor(0, device=x.device),
1134
+ torch.where(
1135
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
1136
+ ),
1137
+ )
1138
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
1139
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
1140
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
1141
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
1142
+ return cand
1143
+
1144
+
1145
+ def expand_dims(v, dims):
1146
+ """
1147
+ Expand the tensor `v` to the dim `dims`.
1148
+ Args:
1149
+ `v`: a PyTorch tensor with shape [N].
1150
+ `dim`: a `int`.
1151
+ Returns:
1152
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
1153
+ """
1154
+ return v[(...,) + (None,) * (dims - 1)]
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/sampler.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+ import torch
3
+
4
+ from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
5
+
6
+
7
+ MODEL_TYPES = {
8
+ "eps": "noise",
9
+ "v": "v"
10
+ }
11
+
12
+
13
+ class DPMSolverSampler(object):
14
+ def __init__(self, model, **kwargs):
15
+ super().__init__()
16
+ self.model = model
17
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
18
+ self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
19
+
20
+ def register_buffer(self, name, attr):
21
+ if type(attr) == torch.Tensor:
22
+ if attr.device != torch.device("cuda"):
23
+ attr = attr.to(torch.device("cuda"))
24
+ setattr(self, name, attr)
25
+
26
+ @torch.no_grad()
27
+ def sample(self,
28
+ S,
29
+ batch_size,
30
+ shape,
31
+ conditioning=None,
32
+ callback=None,
33
+ normals_sequence=None,
34
+ img_callback=None,
35
+ quantize_x0=False,
36
+ eta=0.,
37
+ mask=None,
38
+ x0=None,
39
+ temperature=1.,
40
+ noise_dropout=0.,
41
+ score_corrector=None,
42
+ corrector_kwargs=None,
43
+ verbose=True,
44
+ x_T=None,
45
+ log_every_t=100,
46
+ unconditional_guidance_scale=1.,
47
+ unconditional_conditioning=None,
48
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
49
+ **kwargs
50
+ ):
51
+ if conditioning is not None:
52
+ if isinstance(conditioning, dict):
53
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
54
+ if cbs != batch_size:
55
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
56
+ else:
57
+ if conditioning.shape[0] != batch_size:
58
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
59
+
60
+ # sampling
61
+ C, H, W = shape
62
+ size = (batch_size, C, H, W)
63
+
64
+ print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')
65
+
66
+ device = self.model.betas.device
67
+ if x_T is None:
68
+ img = torch.randn(size, device=device)
69
+ else:
70
+ img = x_T
71
+
72
+ ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
73
+
74
+ model_fn = model_wrapper(
75
+ lambda x, t, c: self.model.apply_model(x, t, c),
76
+ ns,
77
+ model_type=MODEL_TYPES[self.model.parameterization],
78
+ guidance_type="classifier-free",
79
+ condition=conditioning,
80
+ unconditional_condition=unconditional_conditioning,
81
+ guidance_scale=unconditional_guidance_scale,
82
+ )
83
+
84
+ dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
85
+ x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True)
86
+
87
+ return x.to(device), None
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpmpp_2m.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import torch
5
+ import k_diffusion as K
6
+ import numpy as np
7
+
8
+ from ldm.models.diffusion.sampler import Sampler
9
+
10
+
11
+ class DPMPP2M_Sampler(Sampler):
12
+ def __init__(self, model, v_mode, **kwargs):
13
+ super().__init__()
14
+ self.model = model
15
+ if v_mode:
16
+ self.model_wrap = K.external.CompVisVDenoiser(model)
17
+ else:
18
+ self.model_wrap = K.external.CompVisDenoiser(model)
19
+
20
+ def generate_params(self, sigmas):
21
+ """DPM-Solver++(2M)."""
22
+ # Based on https://github.com/crowsonkb/k-diffusion/blob/v0.0.14/k_diffusion/sampling.py#L585
23
+ device = sigmas.device
24
+ sigmas = sigmas.cpu()
25
+ def sigma_fn(t): return t.neg().exp()
26
+ def t_fn(sigma): return sigma.log().neg()
27
+ params = []
28
+ for i in range(len(sigmas) - 1):
29
+ sigma = sigmas[i]
30
+ t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
31
+ h = t_next - t
32
+ a = sigma_fn(t_next) / sigma_fn(t)
33
+ if i == 0 or sigmas[i + 1] == 0:
34
+ b = 1.0
35
+ c = 0.0
36
+ else:
37
+ h_last = t - t_fn(sigmas[i - 1])
38
+ r = h_last / h
39
+ b = 1 + 1 / (2 * r)
40
+ c = 1 / (2 * r)
41
+ b *= - (-h).expm1()
42
+ c *= (-h).expm1()
43
+ p = np.array([a.numpy(), b.numpy(), c.numpy(), sigma.numpy()])
44
+ params.append(p)
45
+ params = torch.Tensor(np.stack(params, axis=0)
46
+ ).transpose(0, 1).to(device)
47
+ return params
48
+
49
+ @torch.no_grad()
50
+ def compile(self,
51
+ S,
52
+ shape,
53
+ unconditional_guidance_scale=1.,
54
+ batch_size=1,
55
+ **kwargs
56
+ ):
57
+ self.sigmas = self.model_wrap.get_sigmas(S)
58
+ self.params = self.generate_params(self.sigmas)
59
+ self.cond_scale = unconditional_guidance_scale
60
+ self.old_denoised_zeros = self.sigmas.new_zeros([batch_size] + shape)
61
+ self.rand_scale = self.params[3, 0].to(torch.float32).cpu()
62
+ self.batch_size = batch_size
63
+
64
+ def one_step(self, x, c_in, old_denoised, param_t):
65
+ a, b, c, sigma = param_t.chunk(4)
66
+ sigma = sigma.broadcast_to((self.batch_size)).contiguous()
67
+ denoised = self.run_model(x, c_in, sigma)
68
+ x = a * x + b * denoised + c * old_denoised
69
+ return x, denoised
70
+
71
+ def sampler_step(self, arg):
72
+ x, c_in, params, old_denoised = arg
73
+ x, denoised = self.one_step(x, c_in, old_denoised, params[:, 0])
74
+ params = torch.roll(params, shifts=-1, dims=1)
75
+ return [x, c_in, params, denoised]
76
+
77
+ def init_loop(self, x, c_in):
78
+ return [x, c_in, self.params.clone(), self.old_denoised_zeros.clone()]
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/plms.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+
3
+ import torch
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ from functools import partial
7
+
8
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
9
+ from ldm.models.diffusion.sampling_util import norm_thresholding
10
+
11
+
12
+ class PLMSSampler(object):
13
+ def __init__(self, model, schedule="linear", **kwargs):
14
+ super().__init__()
15
+ self.model = model
16
+ self.ddpm_num_timesteps = model.num_timesteps
17
+ self.schedule = schedule
18
+
19
+ def register_buffer(self, name, attr):
20
+ if type(attr) == torch.Tensor:
21
+ if attr.device != torch.device("cuda"):
22
+ attr = attr.to(torch.device("cuda"))
23
+ setattr(self, name, attr)
24
+
25
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
26
+ if ddim_eta != 0:
27
+ raise ValueError('ddim_eta must be 0 for PLMS')
28
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
29
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
30
+ alphas_cumprod = self.model.alphas_cumprod
31
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
32
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
33
+
34
+ self.register_buffer('betas', to_torch(self.model.betas))
35
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
36
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
37
+
38
+ # calculations for diffusion q(x_t | x_{t-1}) and others
39
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
40
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
41
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
42
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
43
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
44
+
45
+ # ddim sampling parameters
46
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
47
+ ddim_timesteps=self.ddim_timesteps,
48
+ eta=ddim_eta,verbose=verbose)
49
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
50
+ self.register_buffer('ddim_alphas', ddim_alphas)
51
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
52
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
53
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
54
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
55
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
56
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
57
+
58
+ @torch.no_grad()
59
+ def sample(self,
60
+ S,
61
+ batch_size,
62
+ shape,
63
+ conditioning=None,
64
+ callback=None,
65
+ normals_sequence=None,
66
+ img_callback=None,
67
+ quantize_x0=False,
68
+ eta=0.,
69
+ mask=None,
70
+ x0=None,
71
+ temperature=1.,
72
+ noise_dropout=0.,
73
+ score_corrector=None,
74
+ corrector_kwargs=None,
75
+ verbose=True,
76
+ x_T=None,
77
+ log_every_t=100,
78
+ unconditional_guidance_scale=1.,
79
+ unconditional_conditioning=None,
80
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
81
+ dynamic_threshold=None,
82
+ **kwargs
83
+ ):
84
+ if conditioning is not None:
85
+ if isinstance(conditioning, dict):
86
+ cbs = conditioning[list(conditioning.keys())[0]].shape[0]
87
+ if cbs != batch_size:
88
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
89
+ else:
90
+ if conditioning.shape[0] != batch_size:
91
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
92
+
93
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
94
+ # sampling
95
+ C, H, W = shape
96
+ size = (batch_size, C, H, W)
97
+ print(f'Data shape for PLMS sampling is {size}')
98
+
99
+ samples, intermediates = self.plms_sampling(conditioning, size,
100
+ callback=callback,
101
+ img_callback=img_callback,
102
+ quantize_denoised=quantize_x0,
103
+ mask=mask, x0=x0,
104
+ ddim_use_original_steps=False,
105
+ noise_dropout=noise_dropout,
106
+ temperature=temperature,
107
+ score_corrector=score_corrector,
108
+ corrector_kwargs=corrector_kwargs,
109
+ x_T=x_T,
110
+ log_every_t=log_every_t,
111
+ unconditional_guidance_scale=unconditional_guidance_scale,
112
+ unconditional_conditioning=unconditional_conditioning,
113
+ dynamic_threshold=dynamic_threshold,
114
+ )
115
+ return samples, intermediates
116
+
117
+ @torch.no_grad()
118
+ def plms_sampling(self, cond, shape,
119
+ x_T=None, ddim_use_original_steps=False,
120
+ callback=None, timesteps=None, quantize_denoised=False,
121
+ mask=None, x0=None, img_callback=None, log_every_t=100,
122
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
123
+ unconditional_guidance_scale=1., unconditional_conditioning=None,
124
+ dynamic_threshold=None):
125
+ device = self.model.betas.device
126
+ b = shape[0]
127
+ if x_T is None:
128
+ img = torch.randn(shape, device=device)
129
+ else:
130
+ img = x_T
131
+
132
+ if timesteps is None:
133
+ timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
134
+ elif timesteps is not None and not ddim_use_original_steps:
135
+ subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
136
+ timesteps = self.ddim_timesteps[:subset_end]
137
+
138
+ intermediates = {'x_inter': [img], 'pred_x0': [img]}
139
+ time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
140
+ total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
141
+ print(f"Running PLMS Sampling with {total_steps} timesteps")
142
+
143
+ iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
144
+ old_eps = []
145
+
146
+ for i, step in enumerate(iterator):
147
+ index = total_steps - i - 1
148
+ ts = torch.full((b,), step, device=device, dtype=torch.long)
149
+ ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
150
+
151
+ if mask is not None:
152
+ assert x0 is not None
153
+ img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
154
+ img = img_orig * mask + (1. - mask) * img
155
+
156
+ outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
157
+ quantize_denoised=quantize_denoised, temperature=temperature,
158
+ noise_dropout=noise_dropout, score_corrector=score_corrector,
159
+ corrector_kwargs=corrector_kwargs,
160
+ unconditional_guidance_scale=unconditional_guidance_scale,
161
+ unconditional_conditioning=unconditional_conditioning,
162
+ old_eps=old_eps, t_next=ts_next,
163
+ dynamic_threshold=dynamic_threshold)
164
+ img, pred_x0, e_t = outs
165
+ old_eps.append(e_t)
166
+ if len(old_eps) >= 4:
167
+ old_eps.pop(0)
168
+ if callback: callback(i)
169
+ if img_callback: img_callback(pred_x0, i)
170
+
171
+ if index % log_every_t == 0 or index == total_steps - 1:
172
+ intermediates['x_inter'].append(img)
173
+ intermediates['pred_x0'].append(pred_x0)
174
+
175
+ return img, intermediates
176
+
177
+ @torch.no_grad()
178
+ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
179
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
180
+ unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None,
181
+ dynamic_threshold=None):
182
+ b, *_, device = *x.shape, x.device
183
+
184
+ def get_model_output(x, t):
185
+ if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
186
+ e_t = self.model.apply_model(x, t, c)
187
+ else:
188
+ x_in = torch.cat([x] * 2)
189
+ t_in = torch.cat([t] * 2)
190
+ c_in = torch.cat([unconditional_conditioning, c])
191
+ e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
192
+ e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
193
+
194
+ if score_corrector is not None:
195
+ assert self.model.parameterization == "eps"
196
+ e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
197
+
198
+ return e_t
199
+
200
+ alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
201
+ alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
202
+ sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
203
+ sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
204
+
205
+ def get_x_prev_and_pred_x0(e_t, index):
206
+ # select parameters corresponding to the currently considered timestep
207
+ a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
208
+ a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
209
+ sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
210
+ sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
211
+
212
+ # current prediction for x_0
213
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
214
+ if quantize_denoised:
215
+ pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
216
+ if dynamic_threshold is not None:
217
+ pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
218
+ # direction pointing to x_t
219
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
220
+ noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
221
+ if noise_dropout > 0.:
222
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
223
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
224
+ return x_prev, pred_x0
225
+
226
+ e_t = get_model_output(x, t)
227
+ if len(old_eps) == 0:
228
+ # Pseudo Improved Euler (2nd order)
229
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
230
+ e_t_next = get_model_output(x_prev, t_next)
231
+ e_t_prime = (e_t + e_t_next) / 2
232
+ elif len(old_eps) == 1:
233
+ # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
234
+ e_t_prime = (3 * e_t - old_eps[-1]) / 2
235
+ elif len(old_eps) == 2:
236
+ # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
237
+ e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
238
+ elif len(old_eps) >= 3:
239
+ # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
240
+ e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
241
+
242
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
243
+
244
+ return x_prev, pred_x0, e_t
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampler.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###############################################################################
2
+ # Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
3
+ ###############################################################################
4
+ import torch
5
+
6
+
7
+ class Sampler(object):
8
+ def __init__(self, **kwargs):
9
+ super().__init__()
10
+
11
+ @torch.no_grad()
12
+ def compile(self, S, shape, **kwargs):
13
+ pass
14
+
15
+ def run_model(self, x, c_in, sigma):
16
+ x_in = torch.cat([x] * 2)
17
+ sigma_in = torch.cat([sigma] * 2)
18
+ uncond, cond = self.model_wrap(x_in, sigma_in, cond=c_in).chunk(2)
19
+ return uncond + (cond - uncond) * self.cond_scale
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampling_util.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ def append_dims(x, target_dims):
6
+ """Appends dimensions to the end of a tensor until it has target_dims dimensions.
7
+ From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py"""
8
+ dims_to_append = target_dims - x.ndim
9
+ if dims_to_append < 0:
10
+ raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
11
+ return x[(...,) + (None,) * dims_to_append]
12
+
13
+
14
+ def norm_thresholding(x0, value):
15
+ s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
16
+ return x0 * (value / s)
17
+
18
+
19
+ def spatial_norm_thresholding(x0, value):
20
+ # b c h w
21
+ s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)
22
+ return x0 * (value / s)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/attention.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from inspect import isfunction
2
+ import math
3
+ import os
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from torch import nn, einsum
7
+ from einops import rearrange, repeat
8
+ from typing import Optional, Any
9
+
10
+ from ldm.modules.diffusionmodules.util import checkpoint
11
+
12
+
13
+ try:
14
+ import xformers
15
+ import xformers.ops
16
+ XFORMERS_IS_AVAILBLE = True
17
+ except:
18
+ XFORMERS_IS_AVAILBLE = False
19
+
20
+ # CrossAttn precision handling
21
+ import os
22
+ _ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32")
23
+
24
+ def exists(val):
25
+ return val is not None
26
+
27
+
28
+ def uniq(arr):
29
+ return{el: True for el in arr}.keys()
30
+
31
+
32
+ def default(val, d):
33
+ if exists(val):
34
+ return val
35
+ return d() if isfunction(d) else d
36
+
37
+
38
+ def max_neg_value(t):
39
+ return -torch.finfo(t.dtype).max
40
+
41
+
42
+ def init_(tensor):
43
+ dim = tensor.shape[-1]
44
+ std = 1 / math.sqrt(dim)
45
+ tensor.uniform_(-std, std)
46
+ return tensor
47
+
48
+
49
+ # feedforward
50
+ class GEGLU(nn.Module):
51
+ def __init__(self, dim_in, dim_out):
52
+ super().__init__()
53
+ self.proj = nn.Linear(dim_in, dim_out * 2)
54
+
55
+ def forward(self, x):
56
+ x, gate = self.proj(x).chunk(2, dim=-1)
57
+ return x * F.gelu(gate)
58
+
59
+
60
+ class FeedForward(nn.Module):
61
+ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
62
+ super().__init__()
63
+ inner_dim = int(dim * mult)
64
+ dim_out = default(dim_out, dim)
65
+ project_in = nn.Sequential(
66
+ nn.Linear(dim, inner_dim),
67
+ nn.GELU()
68
+ ) if not glu else GEGLU(dim, inner_dim)
69
+
70
+ self.net = nn.Sequential(
71
+ project_in,
72
+ nn.Dropout(dropout),
73
+ nn.Linear(inner_dim, dim_out)
74
+ )
75
+
76
+ def forward(self, x):
77
+ return self.net(x)
78
+
79
+
80
+ def zero_module(module):
81
+ """
82
+ Zero out the parameters of a module and return it.
83
+ """
84
+ for p in module.parameters():
85
+ p.detach().zero_()
86
+ return module
87
+
88
+
89
+ def Normalize(in_channels):
90
+ return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
91
+
92
+
93
+ class SpatialSelfAttention(nn.Module):
94
+ def __init__(self, in_channels):
95
+ super().__init__()
96
+ self.in_channels = in_channels
97
+
98
+ self.norm = Normalize(in_channels)
99
+ self.q = torch.nn.Conv2d(in_channels,
100
+ in_channels,
101
+ kernel_size=1,
102
+ stride=1,
103
+ padding=0)
104
+ self.k = torch.nn.Conv2d(in_channels,
105
+ in_channels,
106
+ kernel_size=1,
107
+ stride=1,
108
+ padding=0)
109
+ self.v = torch.nn.Conv2d(in_channels,
110
+ in_channels,
111
+ kernel_size=1,
112
+ stride=1,
113
+ padding=0)
114
+ self.proj_out = torch.nn.Conv2d(in_channels,
115
+ in_channels,
116
+ kernel_size=1,
117
+ stride=1,
118
+ padding=0)
119
+
120
+ def forward(self, x):
121
+ h_ = x
122
+ h_ = self.norm(h_)
123
+ q = self.q(h_)
124
+ k = self.k(h_)
125
+ v = self.v(h_)
126
+
127
+ # compute attention
128
+ b,c,h,w = q.shape
129
+ q = rearrange(q, 'b c h w -> b (h w) c')
130
+ k = rearrange(k, 'b c h w -> b c (h w)')
131
+ w_ = torch.einsum('bij,bjk->bik', q, k)
132
+
133
+ w_ = w_ * (int(c)**(-0.5))
134
+ w_ = torch.nn.functional.softmax(w_, dim=2)
135
+
136
+ # attend to values
137
+ v = rearrange(v, 'b c h w -> b c (h w)')
138
+ w_ = rearrange(w_, 'b i j -> b j i')
139
+ h_ = torch.einsum('bij,bjk->bik', v, w_)
140
+ h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
141
+ h_ = self.proj_out(h_)
142
+
143
+ return x+h_
144
+
145
+
146
+ class CrossAttention(nn.Module):
147
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
148
+ super().__init__()
149
+ inner_dim = dim_head * heads
150
+ context_dim = default(context_dim, query_dim)
151
+
152
+ self.scale = nn.Parameter(torch.Tensor([dim_head ** -0.5]))
153
+ self.heads = heads
154
+
155
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
156
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
157
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
158
+
159
+ self.to_out = nn.Sequential(
160
+ nn.Linear(inner_dim, query_dim),
161
+ nn.Dropout(dropout)
162
+ )
163
+
164
+ def forward(self, x, context=None, mask=None):
165
+ h = self.heads
166
+
167
+ q = self.to_q(x)
168
+ context = default(context, x)
169
+ k = self.to_k(context)
170
+ v = self.to_v(context)
171
+
172
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
173
+
174
+ k *= self.scale
175
+
176
+ # force cast to fp32 to avoid overflowing
177
+ if _ATTN_PRECISION =="fp32":
178
+ with torch.autocast(enabled=False, device_type = 'cuda'):
179
+ q, k = q.float(), k.float()
180
+ sim = einsum('b i d, b j d -> b i j', q, k)
181
+ else:
182
+ sim = einsum('b i d, b j d -> b i j', q, k)
183
+
184
+ del q, k
185
+
186
+ if exists(mask):
187
+ mask = rearrange(mask, 'b ... -> b (...)')
188
+ max_neg_value = -torch.finfo(sim.dtype).max
189
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
190
+ sim.masked_fill_(~mask, max_neg_value)
191
+
192
+ # attention, what we cannot get enough of
193
+ if os.environ["CUSTOM_SOFTMAX_FLAVOR"] != "0":
194
+ try:
195
+ import habana_frameworks.torch.hpex.kernels as hpu_kernels
196
+ sim = hpu_kernels.CustomSoftmax.apply(sim, int(os.environ["CUSTOM_SOFTMAX_FLAVOR"]))
197
+ except:
198
+ sim = sim.softmax(dim=-1)
199
+ print("Not using custom softmax!")
200
+ else:
201
+ sim = sim.softmax(dim=-1)
202
+
203
+ out = einsum('b i j, b j d -> b i d', sim, v)
204
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
205
+ return self.to_out(out)
206
+
207
+
208
+ class MemoryEfficientCrossAttention(nn.Module):
209
+ # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
210
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
211
+ super().__init__()
212
+ print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
213
+ f"{heads} heads.")
214
+ inner_dim = dim_head * heads
215
+ context_dim = default(context_dim, query_dim)
216
+
217
+ self.heads = heads
218
+ self.dim_head = dim_head
219
+
220
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
221
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
222
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
223
+
224
+ self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
225
+ self.attention_op: Optional[Any] = None
226
+
227
+ def forward(self, x, context=None, mask=None):
228
+ q = self.to_q(x)
229
+ context = default(context, x)
230
+ k = self.to_k(context)
231
+ v = self.to_v(context)
232
+
233
+ b, _, _ = q.shape
234
+ q, k, v = map(
235
+ lambda t: t.unsqueeze(3)
236
+ .reshape(b, t.shape[1], self.heads, self.dim_head)
237
+ .permute(0, 2, 1, 3)
238
+ .reshape(b * self.heads, t.shape[1], self.dim_head)
239
+ .contiguous(),
240
+ (q, k, v),
241
+ )
242
+
243
+ # actually compute the attention, what we cannot get enough of
244
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
245
+
246
+ if exists(mask):
247
+ raise NotImplementedError
248
+ out = (
249
+ out.unsqueeze(0)
250
+ .reshape(b, self.heads, out.shape[1], self.dim_head)
251
+ .permute(0, 2, 1, 3)
252
+ .reshape(b, out.shape[1], self.heads * self.dim_head)
253
+ )
254
+ return self.to_out(out)
255
+
256
+
257
+ class BasicTransformerBlock(nn.Module):
258
+ ATTENTION_MODES = {
259
+ "softmax": CrossAttention, # vanilla attention
260
+ "softmax-xformers": MemoryEfficientCrossAttention
261
+ }
262
+ def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
263
+ disable_self_attn=False):
264
+ super().__init__()
265
+ attn_mode = "softmax-xformers" if XFORMERS_IS_AVAILBLE else "softmax"
266
+ assert attn_mode in self.ATTENTION_MODES
267
+ attn_cls = self.ATTENTION_MODES[attn_mode]
268
+ self.disable_self_attn = disable_self_attn
269
+ self.attn1 = attn_cls(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
270
+ context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn
271
+ self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
272
+ self.attn2 = attn_cls(query_dim=dim, context_dim=context_dim,
273
+ heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
274
+ self.norm1 = nn.LayerNorm(dim)
275
+ self.norm2 = nn.LayerNorm(dim)
276
+ self.norm3 = nn.LayerNorm(dim)
277
+ self.checkpoint = checkpoint
278
+
279
+ def forward(self, x, context=None):
280
+ return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
281
+
282
+ def _forward(self, x, context=None):
283
+ x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
284
+ x = self.attn2(self.norm2(x), context=context) + x
285
+ x = self.ff(self.norm3(x)) + x
286
+ return x
287
+
288
+
289
+ class SpatialTransformer(nn.Module):
290
+ """
291
+ Transformer block for image-like data.
292
+ First, project the input (aka embedding)
293
+ and reshape to b, t, d.
294
+ Then apply standard transformer action.
295
+ Finally, reshape to image
296
+ NEW: use_linear for more efficiency instead of the 1x1 convs
297
+ """
298
+ def __init__(self, in_channels, n_heads, d_head,
299
+ depth=1, dropout=0., context_dim=None,
300
+ disable_self_attn=False, use_linear=False,
301
+ use_checkpoint=True):
302
+ super().__init__()
303
+ if exists(context_dim) and not isinstance(context_dim, list):
304
+ context_dim = [context_dim]
305
+ self.in_channels = in_channels
306
+ inner_dim = n_heads * d_head
307
+ self.norm = Normalize(in_channels)
308
+ if not use_linear:
309
+ self.proj_in = nn.Conv2d(in_channels,
310
+ inner_dim,
311
+ kernel_size=1,
312
+ stride=1,
313
+ padding=0)
314
+ else:
315
+ self.proj_in = nn.Linear(in_channels, inner_dim)
316
+
317
+ self.transformer_blocks = nn.ModuleList(
318
+ [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
319
+ disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)
320
+ for d in range(depth)]
321
+ )
322
+ if not use_linear:
323
+ self.proj_out = zero_module(nn.Conv2d(inner_dim,
324
+ in_channels,
325
+ kernel_size=1,
326
+ stride=1,
327
+ padding=0))
328
+ else:
329
+ self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
330
+ self.use_linear = use_linear
331
+
332
+ def forward(self, x, context=None):
333
+ # note: if no context is given, cross-attention defaults to self-attention
334
+ if not isinstance(context, list):
335
+ context = [context]
336
+ b, c, h, w = x.shape
337
+ x_in = x
338
+ x = self.norm(x)
339
+ if not self.use_linear:
340
+ x = self.proj_in(x)
341
+ x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
342
+ if self.use_linear:
343
+ x = self.proj_in(x)
344
+ for i, block in enumerate(self.transformer_blocks):
345
+ x = block(x, context=context[i])
346
+ if self.use_linear:
347
+ x = self.proj_out(x)
348
+ x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
349
+ if not self.use_linear:
350
+ x = self.proj_out(x)
351
+ return x + x_in
352
+
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/openaimodel.py ADDED
@@ -0,0 +1,786 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ import math
3
+
4
+ import numpy as np
5
+ import torch as th
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ from ldm.modules.diffusionmodules.util import (
10
+ checkpoint,
11
+ conv_nd,
12
+ linear,
13
+ avg_pool_nd,
14
+ zero_module,
15
+ normalization,
16
+ timestep_embedding,
17
+ )
18
+ from ldm.modules.attention import SpatialTransformer
19
+ from ldm.util import exists
20
+
21
+
22
+ # dummy replace
23
+ def convert_module_to_f16(x):
24
+ pass
25
+
26
+ def convert_module_to_f32(x):
27
+ pass
28
+
29
+
30
+ ## go
31
+ class AttentionPool2d(nn.Module):
32
+ """
33
+ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ spacial_dim: int,
39
+ embed_dim: int,
40
+ num_heads_channels: int,
41
+ output_dim: int = None,
42
+ ):
43
+ super().__init__()
44
+ self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
45
+ self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
46
+ self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
47
+ self.num_heads = embed_dim // num_heads_channels
48
+ self.attention = QKVAttention(self.num_heads)
49
+
50
+ def forward(self, x):
51
+ b, c, *_spatial = x.shape
52
+ x = x.reshape(b, c, -1) # NC(HW)
53
+ x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
54
+ x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
55
+ x = self.qkv_proj(x)
56
+ x = self.attention(x)
57
+ x = self.c_proj(x)
58
+ return x[:, :, 0]
59
+
60
+
61
+ class TimestepBlock(nn.Module):
62
+ """
63
+ Any module where forward() takes timestep embeddings as a second argument.
64
+ """
65
+
66
+ @abstractmethod
67
+ def forward(self, x, emb):
68
+ """
69
+ Apply the module to `x` given `emb` timestep embeddings.
70
+ """
71
+
72
+
73
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
74
+ """
75
+ A sequential module that passes timestep embeddings to the children that
76
+ support it as an extra input.
77
+ """
78
+
79
+ def forward(self, x, emb, context=None):
80
+ for layer in self:
81
+ if isinstance(layer, TimestepBlock):
82
+ x = layer(x, emb)
83
+ elif isinstance(layer, SpatialTransformer):
84
+ x = layer(x, context)
85
+ else:
86
+ x = layer(x)
87
+ return x
88
+
89
+
90
+ class Upsample(nn.Module):
91
+ """
92
+ An upsampling layer with an optional convolution.
93
+ :param channels: channels in the inputs and outputs.
94
+ :param use_conv: a bool determining if a convolution is applied.
95
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
96
+ upsampling occurs in the inner-two dimensions.
97
+ """
98
+
99
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
100
+ super().__init__()
101
+ self.channels = channels
102
+ self.out_channels = out_channels or channels
103
+ self.use_conv = use_conv
104
+ self.dims = dims
105
+ if use_conv:
106
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
107
+
108
+ def forward(self, x):
109
+ assert x.shape[1] == self.channels
110
+ if self.dims == 3:
111
+ x = F.interpolate(
112
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
113
+ )
114
+ else:
115
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
116
+ if self.use_conv:
117
+ x = self.conv(x)
118
+ return x
119
+
120
+ class TransposedUpsample(nn.Module):
121
+ 'Learned 2x upsampling without padding'
122
+ def __init__(self, channels, out_channels=None, ks=5):
123
+ super().__init__()
124
+ self.channels = channels
125
+ self.out_channels = out_channels or channels
126
+
127
+ self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
128
+
129
+ def forward(self,x):
130
+ return self.up(x)
131
+
132
+
133
+ class Downsample(nn.Module):
134
+ """
135
+ A downsampling layer with an optional convolution.
136
+ :param channels: channels in the inputs and outputs.
137
+ :param use_conv: a bool determining if a convolution is applied.
138
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
139
+ downsampling occurs in the inner-two dimensions.
140
+ """
141
+
142
+ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
143
+ super().__init__()
144
+ self.channels = channels
145
+ self.out_channels = out_channels or channels
146
+ self.use_conv = use_conv
147
+ self.dims = dims
148
+ stride = 2 if dims != 3 else (1, 2, 2)
149
+ if use_conv:
150
+ self.op = conv_nd(
151
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
152
+ )
153
+ else:
154
+ assert self.channels == self.out_channels
155
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
156
+
157
+ def forward(self, x):
158
+ assert x.shape[1] == self.channels
159
+ return self.op(x)
160
+
161
+
162
+ class ResBlock(TimestepBlock):
163
+ """
164
+ A residual block that can optionally change the number of channels.
165
+ :param channels: the number of input channels.
166
+ :param emb_channels: the number of timestep embedding channels.
167
+ :param dropout: the rate of dropout.
168
+ :param out_channels: if specified, the number of out channels.
169
+ :param use_conv: if True and out_channels is specified, use a spatial
170
+ convolution instead of a smaller 1x1 convolution to change the
171
+ channels in the skip connection.
172
+ :param dims: determines if the signal is 1D, 2D, or 3D.
173
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
174
+ :param up: if True, use this block for upsampling.
175
+ :param down: if True, use this block for downsampling.
176
+ """
177
+
178
+ def __init__(
179
+ self,
180
+ channels,
181
+ emb_channels,
182
+ dropout,
183
+ out_channels=None,
184
+ use_conv=False,
185
+ use_scale_shift_norm=False,
186
+ dims=2,
187
+ use_checkpoint=False,
188
+ up=False,
189
+ down=False,
190
+ ):
191
+ super().__init__()
192
+ self.channels = channels
193
+ self.emb_channels = emb_channels
194
+ self.dropout = dropout
195
+ self.out_channels = out_channels or channels
196
+ self.use_conv = use_conv
197
+ self.use_checkpoint = use_checkpoint
198
+ self.use_scale_shift_norm = use_scale_shift_norm
199
+
200
+ self.in_layers = nn.Sequential(
201
+ normalization(channels),
202
+ nn.SiLU(),
203
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
204
+ )
205
+
206
+ self.updown = up or down
207
+
208
+ if up:
209
+ self.h_upd = Upsample(channels, False, dims)
210
+ self.x_upd = Upsample(channels, False, dims)
211
+ elif down:
212
+ self.h_upd = Downsample(channels, False, dims)
213
+ self.x_upd = Downsample(channels, False, dims)
214
+ else:
215
+ self.h_upd = self.x_upd = nn.Identity()
216
+
217
+ self.emb_layers = nn.Sequential(
218
+ nn.SiLU(),
219
+ linear(
220
+ emb_channels,
221
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
222
+ ),
223
+ )
224
+ self.out_layers = nn.Sequential(
225
+ normalization(self.out_channels),
226
+ nn.SiLU(),
227
+ nn.Dropout(p=dropout),
228
+ zero_module(
229
+ conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
230
+ ),
231
+ )
232
+
233
+ if self.out_channels == channels:
234
+ self.skip_connection = nn.Identity()
235
+ elif use_conv:
236
+ self.skip_connection = conv_nd(
237
+ dims, channels, self.out_channels, 3, padding=1
238
+ )
239
+ else:
240
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
241
+
242
+ def forward(self, x, emb):
243
+ """
244
+ Apply the block to a Tensor, conditioned on a timestep embedding.
245
+ :param x: an [N x C x ...] Tensor of features.
246
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
247
+ :return: an [N x C x ...] Tensor of outputs.
248
+ """
249
+ return checkpoint(
250
+ self._forward, (x, emb), self.parameters(), self.use_checkpoint
251
+ )
252
+
253
+
254
+ def _forward(self, x, emb):
255
+ if self.updown:
256
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
257
+ h = in_rest(x)
258
+ h = self.h_upd(h)
259
+ x = self.x_upd(x)
260
+ h = in_conv(h)
261
+ else:
262
+ h = self.in_layers(x)
263
+ emb_out = self.emb_layers(emb).type(h.dtype)
264
+ while len(emb_out.shape) < len(h.shape):
265
+ emb_out = emb_out[..., None]
266
+ if self.use_scale_shift_norm:
267
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
268
+ scale, shift = th.chunk(emb_out, 2, dim=1)
269
+ h = out_norm(h) * (1 + scale) + shift
270
+ h = out_rest(h)
271
+ else:
272
+ h = h + emb_out
273
+ h = self.out_layers(h)
274
+ return self.skip_connection(x) + h
275
+
276
+
277
+ class AttentionBlock(nn.Module):
278
+ """
279
+ An attention block that allows spatial positions to attend to each other.
280
+ Originally ported from here, but adapted to the N-d case.
281
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
282
+ """
283
+
284
+ def __init__(
285
+ self,
286
+ channels,
287
+ num_heads=1,
288
+ num_head_channels=-1,
289
+ use_checkpoint=False,
290
+ use_new_attention_order=False,
291
+ ):
292
+ super().__init__()
293
+ self.channels = channels
294
+ if num_head_channels == -1:
295
+ self.num_heads = num_heads
296
+ else:
297
+ assert (
298
+ channels % num_head_channels == 0
299
+ ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
300
+ self.num_heads = channels // num_head_channels
301
+ self.use_checkpoint = use_checkpoint
302
+ self.norm = normalization(channels)
303
+ self.qkv = conv_nd(1, channels, channels * 3, 1)
304
+ if use_new_attention_order:
305
+ # split qkv before split heads
306
+ self.attention = QKVAttention(self.num_heads)
307
+ else:
308
+ # split heads before split qkv
309
+ self.attention = QKVAttentionLegacy(self.num_heads)
310
+
311
+ self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
312
+
313
+ def forward(self, x):
314
+ return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
315
+ #return pt_checkpoint(self._forward, x) # pytorch
316
+
317
+ def _forward(self, x):
318
+ b, c, *spatial = x.shape
319
+ x = x.reshape(b, c, -1)
320
+ qkv = self.qkv(self.norm(x))
321
+ h = self.attention(qkv)
322
+ h = self.proj_out(h)
323
+ return (x + h).reshape(b, c, *spatial)
324
+
325
+
326
+ def count_flops_attn(model, _x, y):
327
+ """
328
+ A counter for the `thop` package to count the operations in an
329
+ attention operation.
330
+ Meant to be used like:
331
+ macs, params = thop.profile(
332
+ model,
333
+ inputs=(inputs, timestamps),
334
+ custom_ops={QKVAttention: QKVAttention.count_flops},
335
+ )
336
+ """
337
+ b, c, *spatial = y[0].shape
338
+ num_spatial = int(np.prod(spatial))
339
+ # We perform two matmuls with the same number of ops.
340
+ # The first computes the weight matrix, the second computes
341
+ # the combination of the value vectors.
342
+ matmul_ops = 2 * b * (num_spatial ** 2) * c
343
+ model.total_ops += th.DoubleTensor([matmul_ops])
344
+
345
+
346
+ class QKVAttentionLegacy(nn.Module):
347
+ """
348
+ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
349
+ """
350
+
351
+ def __init__(self, n_heads):
352
+ super().__init__()
353
+ self.n_heads = n_heads
354
+
355
+ def forward(self, qkv):
356
+ """
357
+ Apply QKV attention.
358
+ :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
359
+ :return: an [N x (H * C) x T] tensor after attention.
360
+ """
361
+ bs, width, length = qkv.shape
362
+ assert width % (3 * self.n_heads) == 0
363
+ ch = width // (3 * self.n_heads)
364
+ q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
365
+ scale = 1 / math.sqrt(math.sqrt(ch))
366
+ weight = th.einsum(
367
+ "bct,bcs->bts", q * scale, k * scale
368
+ ) # More stable with f16 than dividing afterwards
369
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
370
+ a = th.einsum("bts,bcs->bct", weight, v)
371
+ return a.reshape(bs, -1, length)
372
+
373
+ @staticmethod
374
+ def count_flops(model, _x, y):
375
+ return count_flops_attn(model, _x, y)
376
+
377
+
378
+ class QKVAttention(nn.Module):
379
+ """
380
+ A module which performs QKV attention and splits in a different order.
381
+ """
382
+
383
+ def __init__(self, n_heads):
384
+ super().__init__()
385
+ self.n_heads = n_heads
386
+
387
+ def forward(self, qkv):
388
+ """
389
+ Apply QKV attention.
390
+ :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
391
+ :return: an [N x (H * C) x T] tensor after attention.
392
+ """
393
+ bs, width, length = qkv.shape
394
+ assert width % (3 * self.n_heads) == 0
395
+ ch = width // (3 * self.n_heads)
396
+ q, k, v = qkv.chunk(3, dim=1)
397
+ scale = 1 / math.sqrt(math.sqrt(ch))
398
+ weight = th.einsum(
399
+ "bct,bcs->bts",
400
+ (q * scale).view(bs * self.n_heads, ch, length),
401
+ (k * scale).view(bs * self.n_heads, ch, length),
402
+ ) # More stable with f16 than dividing afterwards
403
+ weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
404
+ a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
405
+ return a.reshape(bs, -1, length)
406
+
407
+ @staticmethod
408
+ def count_flops(model, _x, y):
409
+ return count_flops_attn(model, _x, y)
410
+
411
+
412
+ class UNetModel(nn.Module):
413
+ """
414
+ The full UNet model with attention and timestep embedding.
415
+ :param in_channels: channels in the input Tensor.
416
+ :param model_channels: base channel count for the model.
417
+ :param out_channels: channels in the output Tensor.
418
+ :param num_res_blocks: number of residual blocks per downsample.
419
+ :param attention_resolutions: a collection of downsample rates at which
420
+ attention will take place. May be a set, list, or tuple.
421
+ For example, if this contains 4, then at 4x downsampling, attention
422
+ will be used.
423
+ :param dropout: the dropout probability.
424
+ :param channel_mult: channel multiplier for each level of the UNet.
425
+ :param conv_resample: if True, use learned convolutions for upsampling and
426
+ downsampling.
427
+ :param dims: determines if the signal is 1D, 2D, or 3D.
428
+ :param num_classes: if specified (as an int), then this model will be
429
+ class-conditional with `num_classes` classes.
430
+ :param use_checkpoint: use gradient checkpointing to reduce memory usage.
431
+ :param num_heads: the number of attention heads in each attention layer.
432
+ :param num_heads_channels: if specified, ignore num_heads and instead use
433
+ a fixed channel width per attention head.
434
+ :param num_heads_upsample: works with num_heads to set a different number
435
+ of heads for upsampling. Deprecated.
436
+ :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
437
+ :param resblock_updown: use residual blocks for up/downsampling.
438
+ :param use_new_attention_order: use a different attention pattern for potentially
439
+ increased efficiency.
440
+ """
441
+
442
+ def __init__(
443
+ self,
444
+ image_size,
445
+ in_channels,
446
+ model_channels,
447
+ out_channels,
448
+ num_res_blocks,
449
+ attention_resolutions,
450
+ dropout=0,
451
+ channel_mult=(1, 2, 4, 8),
452
+ conv_resample=True,
453
+ dims=2,
454
+ num_classes=None,
455
+ use_checkpoint=False,
456
+ use_fp16=False,
457
+ num_heads=-1,
458
+ num_head_channels=-1,
459
+ num_heads_upsample=-1,
460
+ use_scale_shift_norm=False,
461
+ resblock_updown=False,
462
+ use_new_attention_order=False,
463
+ use_spatial_transformer=False, # custom transformer support
464
+ transformer_depth=1, # custom transformer support
465
+ context_dim=None, # custom transformer support
466
+ n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
467
+ legacy=True,
468
+ disable_self_attentions=None,
469
+ num_attention_blocks=None,
470
+ disable_middle_self_attn=False,
471
+ use_linear_in_transformer=False,
472
+ ):
473
+ super().__init__()
474
+ if use_spatial_transformer:
475
+ assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
476
+
477
+ if context_dim is not None:
478
+ assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
479
+ from omegaconf.listconfig import ListConfig
480
+ if type(context_dim) == ListConfig:
481
+ context_dim = list(context_dim)
482
+
483
+ if num_heads_upsample == -1:
484
+ num_heads_upsample = num_heads
485
+
486
+ if num_heads == -1:
487
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
488
+
489
+ if num_head_channels == -1:
490
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
491
+
492
+ self.image_size = image_size
493
+ self.in_channels = in_channels
494
+ self.model_channels = model_channels
495
+ self.out_channels = out_channels
496
+ if isinstance(num_res_blocks, int):
497
+ self.num_res_blocks = len(channel_mult) * [num_res_blocks]
498
+ else:
499
+ if len(num_res_blocks) != len(channel_mult):
500
+ raise ValueError("provide num_res_blocks either as an int (globally constant) or "
501
+ "as a list/tuple (per-level) with the same length as channel_mult")
502
+ self.num_res_blocks = num_res_blocks
503
+ if disable_self_attentions is not None:
504
+ # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
505
+ assert len(disable_self_attentions) == len(channel_mult)
506
+ if num_attention_blocks is not None:
507
+ assert len(num_attention_blocks) == len(self.num_res_blocks)
508
+ assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
509
+ print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
510
+ f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
511
+ f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
512
+ f"attention will still not be set.")
513
+
514
+ self.attention_resolutions = attention_resolutions
515
+ self.dropout = dropout
516
+ self.channel_mult = channel_mult
517
+ self.conv_resample = conv_resample
518
+ self.num_classes = num_classes
519
+ self.use_checkpoint = use_checkpoint
520
+ self.dtype = th.float16 if use_fp16 else th.float32
521
+ self.num_heads = num_heads
522
+ self.num_head_channels = num_head_channels
523
+ self.num_heads_upsample = num_heads_upsample
524
+ self.predict_codebook_ids = n_embed is not None
525
+
526
+ time_embed_dim = model_channels * 4
527
+ self.time_embed = nn.Sequential(
528
+ linear(model_channels, time_embed_dim),
529
+ nn.SiLU(),
530
+ linear(time_embed_dim, time_embed_dim),
531
+ )
532
+
533
+ if self.num_classes is not None:
534
+ if isinstance(self.num_classes, int):
535
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
536
+ elif self.num_classes == "continuous":
537
+ print("setting up linear c_adm embedding layer")
538
+ self.label_emb = nn.Linear(1, time_embed_dim)
539
+ else:
540
+ raise ValueError()
541
+
542
+ self.input_blocks = nn.ModuleList(
543
+ [
544
+ TimestepEmbedSequential(
545
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
546
+ )
547
+ ]
548
+ )
549
+ self._feature_size = model_channels
550
+ input_block_chans = [model_channels]
551
+ ch = model_channels
552
+ ds = 1
553
+ for level, mult in enumerate(channel_mult):
554
+ for nr in range(self.num_res_blocks[level]):
555
+ layers = [
556
+ ResBlock(
557
+ ch,
558
+ time_embed_dim,
559
+ dropout,
560
+ out_channels=mult * model_channels,
561
+ dims=dims,
562
+ use_checkpoint=use_checkpoint,
563
+ use_scale_shift_norm=use_scale_shift_norm,
564
+ )
565
+ ]
566
+ ch = mult * model_channels
567
+ if ds in attention_resolutions:
568
+ if num_head_channels == -1:
569
+ dim_head = ch // num_heads
570
+ else:
571
+ num_heads = ch // num_head_channels
572
+ dim_head = num_head_channels
573
+ if legacy:
574
+ #num_heads = 1
575
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
576
+ if exists(disable_self_attentions):
577
+ disabled_sa = disable_self_attentions[level]
578
+ else:
579
+ disabled_sa = False
580
+
581
+ if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
582
+ layers.append(
583
+ AttentionBlock(
584
+ ch,
585
+ use_checkpoint=use_checkpoint,
586
+ num_heads=num_heads,
587
+ num_head_channels=dim_head,
588
+ use_new_attention_order=use_new_attention_order,
589
+ ) if not use_spatial_transformer else SpatialTransformer(
590
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
591
+ disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
592
+ use_checkpoint=use_checkpoint
593
+ )
594
+ )
595
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
596
+ self._feature_size += ch
597
+ input_block_chans.append(ch)
598
+ if level != len(channel_mult) - 1:
599
+ out_ch = ch
600
+ self.input_blocks.append(
601
+ TimestepEmbedSequential(
602
+ ResBlock(
603
+ ch,
604
+ time_embed_dim,
605
+ dropout,
606
+ out_channels=out_ch,
607
+ dims=dims,
608
+ use_checkpoint=use_checkpoint,
609
+ use_scale_shift_norm=use_scale_shift_norm,
610
+ down=True,
611
+ )
612
+ if resblock_updown
613
+ else Downsample(
614
+ ch, conv_resample, dims=dims, out_channels=out_ch
615
+ )
616
+ )
617
+ )
618
+ ch = out_ch
619
+ input_block_chans.append(ch)
620
+ ds *= 2
621
+ self._feature_size += ch
622
+
623
+ if num_head_channels == -1:
624
+ dim_head = ch // num_heads
625
+ else:
626
+ num_heads = ch // num_head_channels
627
+ dim_head = num_head_channels
628
+ if legacy:
629
+ #num_heads = 1
630
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
631
+ self.middle_block = TimestepEmbedSequential(
632
+ ResBlock(
633
+ ch,
634
+ time_embed_dim,
635
+ dropout,
636
+ dims=dims,
637
+ use_checkpoint=use_checkpoint,
638
+ use_scale_shift_norm=use_scale_shift_norm,
639
+ ),
640
+ AttentionBlock(
641
+ ch,
642
+ use_checkpoint=use_checkpoint,
643
+ num_heads=num_heads,
644
+ num_head_channels=dim_head,
645
+ use_new_attention_order=use_new_attention_order,
646
+ ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
647
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
648
+ disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
649
+ use_checkpoint=use_checkpoint
650
+ ),
651
+ ResBlock(
652
+ ch,
653
+ time_embed_dim,
654
+ dropout,
655
+ dims=dims,
656
+ use_checkpoint=use_checkpoint,
657
+ use_scale_shift_norm=use_scale_shift_norm,
658
+ ),
659
+ )
660
+ self._feature_size += ch
661
+
662
+ self.output_blocks = nn.ModuleList([])
663
+ for level, mult in list(enumerate(channel_mult))[::-1]:
664
+ for i in range(self.num_res_blocks[level] + 1):
665
+ ich = input_block_chans.pop()
666
+ layers = [
667
+ ResBlock(
668
+ ch + ich,
669
+ time_embed_dim,
670
+ dropout,
671
+ out_channels=model_channels * mult,
672
+ dims=dims,
673
+ use_checkpoint=use_checkpoint,
674
+ use_scale_shift_norm=use_scale_shift_norm,
675
+ )
676
+ ]
677
+ ch = model_channels * mult
678
+ if ds in attention_resolutions:
679
+ if num_head_channels == -1:
680
+ dim_head = ch // num_heads
681
+ else:
682
+ num_heads = ch // num_head_channels
683
+ dim_head = num_head_channels
684
+ if legacy:
685
+ #num_heads = 1
686
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
687
+ if exists(disable_self_attentions):
688
+ disabled_sa = disable_self_attentions[level]
689
+ else:
690
+ disabled_sa = False
691
+
692
+ if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
693
+ layers.append(
694
+ AttentionBlock(
695
+ ch,
696
+ use_checkpoint=use_checkpoint,
697
+ num_heads=num_heads_upsample,
698
+ num_head_channels=dim_head,
699
+ use_new_attention_order=use_new_attention_order,
700
+ ) if not use_spatial_transformer else SpatialTransformer(
701
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
702
+ disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
703
+ use_checkpoint=use_checkpoint
704
+ )
705
+ )
706
+ if level and i == self.num_res_blocks[level]:
707
+ out_ch = ch
708
+ layers.append(
709
+ ResBlock(
710
+ ch,
711
+ time_embed_dim,
712
+ dropout,
713
+ out_channels=out_ch,
714
+ dims=dims,
715
+ use_checkpoint=use_checkpoint,
716
+ use_scale_shift_norm=use_scale_shift_norm,
717
+ up=True,
718
+ )
719
+ if resblock_updown
720
+ else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
721
+ )
722
+ ds //= 2
723
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
724
+ self._feature_size += ch
725
+
726
+ self.out = nn.Sequential(
727
+ normalization(ch),
728
+ nn.SiLU(),
729
+ zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
730
+ )
731
+ if self.predict_codebook_ids:
732
+ self.id_predictor = nn.Sequential(
733
+ normalization(ch),
734
+ conv_nd(dims, model_channels, n_embed, 1),
735
+ #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
736
+ )
737
+
738
+ def convert_to_fp16(self):
739
+ """
740
+ Convert the torso of the model to float16.
741
+ """
742
+ self.input_blocks.apply(convert_module_to_f16)
743
+ self.middle_block.apply(convert_module_to_f16)
744
+ self.output_blocks.apply(convert_module_to_f16)
745
+
746
+ def convert_to_fp32(self):
747
+ """
748
+ Convert the torso of the model to float32.
749
+ """
750
+ self.input_blocks.apply(convert_module_to_f32)
751
+ self.middle_block.apply(convert_module_to_f32)
752
+ self.output_blocks.apply(convert_module_to_f32)
753
+
754
+ def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
755
+ """
756
+ Apply the model to an input batch.
757
+ :param x: an [N x C x ...] Tensor of inputs.
758
+ :param timesteps: a 1-D batch of timesteps.
759
+ :param context: conditioning plugged in via crossattn
760
+ :param y: an [N] Tensor of labels, if class-conditional.
761
+ :return: an [N x C x ...] Tensor of outputs.
762
+ """
763
+ assert (y is not None) == (
764
+ self.num_classes is not None
765
+ ), "must specify y if and only if the model is class-conditional"
766
+ hs = []
767
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
768
+ emb = self.time_embed(t_emb)
769
+
770
+ if self.num_classes is not None:
771
+ assert y.shape[0] == x.shape[0]
772
+ emb = emb + self.label_emb(y)
773
+
774
+ h = x.type(self.dtype)
775
+ for module in self.input_blocks:
776
+ h = module(h, emb, context)
777
+ hs.append(h)
778
+ h = self.middle_block(h, emb, context)
779
+ for module in self.output_blocks:
780
+ h = th.cat([h, hs.pop()], dim=1)
781
+ h = module(h, emb, context)
782
+ h = h.type(x.dtype)
783
+ if self.predict_codebook_ids:
784
+ return self.id_predictor(h)
785
+ else:
786
+ return self.out(h)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/util.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # adopted from
2
+ # https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
3
+ # and
4
+ # https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
5
+ # and
6
+ # https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
7
+ #
8
+ # thanks!
9
+
10
+
11
+ import os
12
+ import math
13
+ import torch
14
+ import torch.nn as nn
15
+ import numpy as np
16
+ from einops import repeat
17
+
18
+ from ldm.util import instantiate_from_config
19
+
20
+
21
+ def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
22
+ if schedule == "linear":
23
+ betas = (
24
+ torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
25
+ )
26
+
27
+ elif schedule == "cosine":
28
+ timesteps = (
29
+ torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
30
+ )
31
+ alphas = timesteps / (1 + cosine_s) * np.pi / 2
32
+ alphas = torch.cos(alphas).pow(2)
33
+ alphas = alphas / alphas[0]
34
+ betas = 1 - alphas[1:] / alphas[:-1]
35
+ betas = np.clip(betas, a_min=0, a_max=0.999)
36
+
37
+ elif schedule == "sqrt_linear":
38
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
39
+ elif schedule == "sqrt":
40
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
41
+ else:
42
+ raise ValueError(f"schedule '{schedule}' unknown.")
43
+ return betas.numpy()
44
+
45
+
46
+ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
47
+ if ddim_discr_method == 'uniform':
48
+ c = num_ddpm_timesteps // num_ddim_timesteps
49
+ ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
50
+ elif ddim_discr_method == 'quad':
51
+ ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
52
+ else:
53
+ raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
54
+
55
+ # assert ddim_timesteps.shape[0] == num_ddim_timesteps
56
+ # add one to get the final alpha values right (the ones from first scale to data during sampling)
57
+ steps_out = ddim_timesteps + 1
58
+ if verbose:
59
+ print(f'Selected timesteps for ddim sampler: {steps_out}')
60
+ return steps_out
61
+
62
+
63
+ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
64
+ # select alphas for computing the variance schedule
65
+ alphas = alphacums[ddim_timesteps]
66
+ alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
67
+
68
+ # according the the formula provided in https://arxiv.org/abs/2010.02502
69
+ sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
70
+ if verbose:
71
+ print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
72
+ print(f'For the chosen value of eta, which is {eta}, '
73
+ f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
74
+ return sigmas, alphas, alphas_prev
75
+
76
+
77
+ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
78
+ """
79
+ Create a beta schedule that discretizes the given alpha_t_bar function,
80
+ which defines the cumulative product of (1-beta) over time from t = [0,1].
81
+ :param num_diffusion_timesteps: the number of betas to produce.
82
+ :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
83
+ produces the cumulative product of (1-beta) up to that
84
+ part of the diffusion process.
85
+ :param max_beta: the maximum beta to use; use values lower than 1 to
86
+ prevent singularities.
87
+ """
88
+ betas = []
89
+ for i in range(num_diffusion_timesteps):
90
+ t1 = i / num_diffusion_timesteps
91
+ t2 = (i + 1) / num_diffusion_timesteps
92
+ betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
93
+ return np.array(betas)
94
+
95
+
96
+ def extract_into_tensor(a, t, x_shape):
97
+ b, *_ = t.shape
98
+ out = a.gather(-1, t)
99
+ return out.reshape(b, *((1,) * (len(x_shape) - 1)))
100
+
101
+
102
+ def checkpoint(func, inputs, params, flag):
103
+ """
104
+ Evaluate a function without caching intermediate activations, allowing for
105
+ reduced memory at the expense of extra compute in the backward pass.
106
+ :param func: the function to evaluate.
107
+ :param inputs: the argument sequence to pass to `func`.
108
+ :param params: a sequence of parameters `func` depends on but does not
109
+ explicitly take as arguments.
110
+ :param flag: if False, disable gradient checkpointing.
111
+ """
112
+ if flag:
113
+ args = tuple(inputs) + tuple(params)
114
+ return CheckpointFunction.apply(func, len(inputs), *args)
115
+ else:
116
+ return func(*inputs)
117
+
118
+
119
+ class CheckpointFunction(torch.autograd.Function):
120
+ @staticmethod
121
+ def forward(ctx, run_function, length, *args):
122
+ ctx.run_function = run_function
123
+ ctx.input_tensors = list(args[:length])
124
+ ctx.input_params = list(args[length:])
125
+ ctx.gpu_autocast_kwargs = {"enabled": torch.is_autocast_enabled(),
126
+ "dtype": torch.get_autocast_gpu_dtype(),
127
+ "cache_enabled": torch.is_autocast_cache_enabled()}
128
+ with torch.no_grad():
129
+ output_tensors = ctx.run_function(*ctx.input_tensors)
130
+ return output_tensors
131
+
132
+ @staticmethod
133
+ def backward(ctx, *output_grads):
134
+ ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
135
+ with torch.enable_grad(), \
136
+ torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs):
137
+ # Fixes a bug where the first op in run_function modifies the
138
+ # Tensor storage in place, which is not allowed for detach()'d
139
+ # Tensors.
140
+ shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
141
+ output_tensors = ctx.run_function(*shallow_copies)
142
+ input_grads = torch.autograd.grad(
143
+ output_tensors,
144
+ ctx.input_tensors + ctx.input_params,
145
+ output_grads,
146
+ allow_unused=True,
147
+ )
148
+ del ctx.input_tensors
149
+ del ctx.input_params
150
+ del output_tensors
151
+ return (None, None) + input_grads
152
+
153
+
154
+ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
155
+ """
156
+ Create sinusoidal timestep embeddings.
157
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
158
+ These may be fractional.
159
+ :param dim: the dimension of the output.
160
+ :param max_period: controls the minimum frequency of the embeddings.
161
+ :return: an [N x dim] Tensor of positional embeddings.
162
+ """
163
+ if not repeat_only:
164
+ half = dim // 2
165
+ freqs = torch.exp(
166
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
167
+ ).to(device=timesteps.device)
168
+ args = timesteps[:, None].float() * freqs[None]
169
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
170
+ if dim % 2:
171
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
172
+ else:
173
+ embedding = repeat(timesteps, 'b -> b d', d=dim)
174
+ return embedding
175
+
176
+
177
+ def zero_module(module):
178
+ """
179
+ Zero out the parameters of a module and return it.
180
+ """
181
+ for p in module.parameters():
182
+ p.detach().zero_()
183
+ return module
184
+
185
+
186
+ def scale_module(module, scale):
187
+ """
188
+ Scale the parameters of a module and return it.
189
+ """
190
+ for p in module.parameters():
191
+ p.detach().mul_(scale)
192
+ return module
193
+
194
+
195
+ def mean_flat(tensor):
196
+ """
197
+ Take the mean over all non-batch dimensions.
198
+ """
199
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
200
+
201
+
202
+ def normalization(channels):
203
+ """
204
+ Make a standard normalization layer.
205
+ :param channels: number of input channels.
206
+ :return: an nn.Module for normalization.
207
+ """
208
+ return GroupNorm32(32, channels)
209
+
210
+
211
+ # PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
212
+ class SiLU(nn.Module):
213
+ def forward(self, x):
214
+ return x * torch.sigmoid(x)
215
+
216
+
217
+ class GroupNorm32(nn.GroupNorm):
218
+ def forward(self, x):
219
+ return super().forward(x.float()).type(x.dtype)
220
+
221
+ def conv_nd(dims, *args, **kwargs):
222
+ """
223
+ Create a 1D, 2D, or 3D convolution module.
224
+ """
225
+ if dims == 1:
226
+ return nn.Conv1d(*args, **kwargs)
227
+ elif dims == 2:
228
+ return nn.Conv2d(*args, **kwargs)
229
+ elif dims == 3:
230
+ return nn.Conv3d(*args, **kwargs)
231
+ raise ValueError(f"unsupported dimensions: {dims}")
232
+
233
+
234
+ def linear(*args, **kwargs):
235
+ """
236
+ Create a linear module.
237
+ """
238
+ return nn.Linear(*args, **kwargs)
239
+
240
+
241
+ def avg_pool_nd(dims, *args, **kwargs):
242
+ """
243
+ Create a 1D, 2D, or 3D average pooling module.
244
+ """
245
+ if dims == 1:
246
+ return nn.AvgPool1d(*args, **kwargs)
247
+ elif dims == 2:
248
+ return nn.AvgPool2d(*args, **kwargs)
249
+ elif dims == 3:
250
+ return nn.AvgPool3d(*args, **kwargs)
251
+ raise ValueError(f"unsupported dimensions: {dims}")
252
+
253
+
254
+ class HybridConditioner(nn.Module):
255
+
256
+ def __init__(self, c_concat_config, c_crossattn_config):
257
+ super().__init__()
258
+ self.concat_conditioner = instantiate_from_config(c_concat_config)
259
+ self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
260
+
261
+ def forward(self, c_concat, c_crossattn):
262
+ c_concat = self.concat_conditioner(c_concat)
263
+ c_crossattn = self.crossattn_conditioner(c_crossattn)
264
+ return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
265
+
266
+
267
+ def noise_like(shape, device, repeat=False):
268
+ repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
269
+ noise = lambda: torch.randn(shape, device=device)
270
+ return repeat_noise() if repeat else noise()
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/ema.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ class LitEma(nn.Module):
6
+ def __init__(self, model, decay=0.9999, use_num_upates=True):
7
+ super().__init__()
8
+ if decay < 0.0 or decay > 1.0:
9
+ raise ValueError('Decay must be between 0 and 1')
10
+
11
+ self.m_name2s_name = {}
12
+ self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
13
+ self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates
14
+ else torch.tensor(-1, dtype=torch.int))
15
+
16
+ for name, p in model.named_parameters():
17
+ if p.requires_grad:
18
+ # remove as '.'-character is not allowed in buffers
19
+ s_name = name.replace('.', '')
20
+ self.m_name2s_name.update({name: s_name})
21
+ self.register_buffer(s_name, p.clone().detach().data)
22
+
23
+ self.collected_params = []
24
+
25
+ def reset_num_updates(self):
26
+ del self.num_updates
27
+ self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))
28
+
29
+ def forward(self, model):
30
+ decay = self.decay
31
+
32
+ if self.num_updates >= 0:
33
+ self.num_updates += 1
34
+ decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
35
+
36
+ one_minus_decay = 1.0 - decay
37
+
38
+ with torch.no_grad():
39
+ m_param = dict(model.named_parameters())
40
+ shadow_params = dict(self.named_buffers())
41
+
42
+ for key in m_param:
43
+ if m_param[key].requires_grad:
44
+ sname = self.m_name2s_name[key]
45
+ shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
46
+ shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
47
+ else:
48
+ assert not key in self.m_name2s_name
49
+
50
+ def copy_to(self, model):
51
+ m_param = dict(model.named_parameters())
52
+ shadow_params = dict(self.named_buffers())
53
+ for key in m_param:
54
+ if m_param[key].requires_grad:
55
+ m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
56
+ else:
57
+ assert not key in self.m_name2s_name
58
+
59
+ def store(self, parameters):
60
+ """
61
+ Save the current parameters for restoring later.
62
+ Args:
63
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
64
+ temporarily stored.
65
+ """
66
+ self.collected_params = [param.clone() for param in parameters]
67
+
68
+ def restore(self, parameters):
69
+ """
70
+ Restore the parameters stored with the `store` method.
71
+ Useful to validate the model with EMA parameters without affecting the
72
+ original optimization process. Store the parameters before the
73
+ `copy_to` method. After validation (or model saving), use this to
74
+ restore the former parameters.
75
+ Args:
76
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
77
+ updated with the stored parameters.
78
+ """
79
+ for c_param, param in zip(self.collected_params, parameters):
80
+ param.data.copy_(c_param.data)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/encoders/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/encoders/modules.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.utils.checkpoint import checkpoint
4
+
5
+ from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel
6
+
7
+ import open_clip
8
+ from ldm.util import default, count_params
9
+
10
+
11
+ class AbstractEncoder(nn.Module):
12
+ def __init__(self):
13
+ super().__init__()
14
+
15
+ def encode(self, *args, **kwargs):
16
+ raise NotImplementedError
17
+
18
+
19
+ class IdentityEncoder(AbstractEncoder):
20
+
21
+ def encode(self, x):
22
+ return x
23
+
24
+
25
+ class ClassEmbedder(nn.Module):
26
+ def __init__(self, embed_dim, n_classes=1000, key='class', ucg_rate=0.1):
27
+ super().__init__()
28
+ self.key = key
29
+ self.embedding = nn.Embedding(n_classes, embed_dim)
30
+ self.n_classes = n_classes
31
+ self.ucg_rate = ucg_rate
32
+
33
+ def forward(self, batch, key=None, disable_dropout=False):
34
+ if key is None:
35
+ key = self.key
36
+ # this is for use in crossattn
37
+ c = batch[key][:, None]
38
+ if self.ucg_rate > 0. and not disable_dropout:
39
+ mask = 1. - torch.bernoulli(torch.ones_like(c) * self.ucg_rate)
40
+ c = mask * c + (1-mask) * torch.ones_like(c)*(self.n_classes-1)
41
+ c = c.long()
42
+ c = self.embedding(c)
43
+ return c
44
+
45
+ def get_unconditional_conditioning(self, bs, device="cuda"):
46
+ uc_class = self.n_classes - 1 # 1000 classes --> 0 ... 999, one extra class for ucg (class 1000)
47
+ uc = torch.ones((bs,), device=device) * uc_class
48
+ uc = {self.key: uc}
49
+ return uc
50
+
51
+
52
+ def disabled_train(self, mode=True):
53
+ """Overwrite model.train with this function to make sure train/eval mode
54
+ does not change anymore."""
55
+ return self
56
+
57
+
58
+ class FrozenT5Embedder(AbstractEncoder):
59
+ """Uses the T5 transformer encoder for text"""
60
+ def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
61
+ super().__init__()
62
+ self.tokenizer = T5Tokenizer.from_pretrained(version)
63
+ self.transformer = T5EncoderModel.from_pretrained(version)
64
+ self.device = device
65
+ self.max_length = max_length # TODO: typical value?
66
+ if freeze:
67
+ self.freeze()
68
+
69
+ def freeze(self):
70
+ self.transformer = self.transformer.eval()
71
+ #self.train = disabled_train
72
+ for param in self.parameters():
73
+ param.requires_grad = False
74
+
75
+ def forward(self, text):
76
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
77
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
78
+ tokens = batch_encoding["input_ids"].to(self.device)
79
+ outputs = self.transformer(input_ids=tokens)
80
+
81
+ z = outputs.last_hidden_state
82
+ return z
83
+
84
+ def encode(self, text):
85
+ return self(text)
86
+
87
+
88
+ class FrozenCLIPEmbedder(AbstractEncoder):
89
+ """Uses the CLIP transformer encoder for text (from huggingface)"""
90
+ LAYERS = [
91
+ "last",
92
+ "pooled",
93
+ "hidden"
94
+ ]
95
+ def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77,
96
+ freeze=True, layer="last", layer_idx=None): # clip-vit-base-patch32
97
+ super().__init__()
98
+ assert layer in self.LAYERS
99
+ self.tokenizer = CLIPTokenizer.from_pretrained(version)
100
+ self.transformer = CLIPTextModel.from_pretrained(version)
101
+ self.device = device
102
+ self.max_length = max_length
103
+ if freeze:
104
+ self.freeze()
105
+ self.layer = layer
106
+ self.layer_idx = layer_idx
107
+ if layer == "hidden":
108
+ assert layer_idx is not None
109
+ assert 0 <= abs(layer_idx) <= 12
110
+
111
+ def freeze(self):
112
+ self.transformer = self.transformer.eval()
113
+ #self.train = disabled_train
114
+ for param in self.parameters():
115
+ param.requires_grad = False
116
+
117
+ def forward(self, text):
118
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
119
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
120
+ tokens = batch_encoding["input_ids"].to(self.device)
121
+ outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden")
122
+ if self.layer == "last":
123
+ z = outputs.last_hidden_state
124
+ elif self.layer == "pooled":
125
+ z = outputs.pooler_output[:, None, :]
126
+ else:
127
+ z = outputs.hidden_states[self.layer_idx]
128
+ return z
129
+
130
+ def encode(self, text):
131
+ return self(text)
132
+
133
+
134
+ class FrozenOpenCLIPEmbedder(AbstractEncoder):
135
+ """
136
+ Uses the OpenCLIP transformer encoder for text
137
+ """
138
+ LAYERS = [
139
+ #"pooled",
140
+ "last",
141
+ "penultimate"
142
+ ]
143
+ def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77,
144
+ freeze=True, layer="last"):
145
+ super().__init__()
146
+ assert layer in self.LAYERS
147
+ model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version)
148
+ del model.visual
149
+ self.model = model
150
+
151
+ self.device = device
152
+ self.max_length = max_length
153
+ if freeze:
154
+ self.freeze()
155
+ self.layer = layer
156
+ if self.layer == "last":
157
+ self.layer_idx = 0
158
+ elif self.layer == "penultimate":
159
+ self.layer_idx = 1
160
+ else:
161
+ raise NotImplementedError()
162
+
163
+ def freeze(self):
164
+ self.model = self.model.eval()
165
+ for param in self.parameters():
166
+ param.requires_grad = False
167
+
168
+ def forward(self, text):
169
+ tokens = open_clip.tokenize(text)
170
+ z = self.encode_with_transformer(tokens.to(self.device))
171
+ return z
172
+
173
+ def encode_with_transformer(self, text):
174
+ x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model]
175
+ x = x + self.model.positional_embedding
176
+ x = x.permute(1, 0, 2) # NLD -> LND
177
+ x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask)
178
+ x = x.permute(1, 0, 2) # LND -> NLD
179
+ x = self.model.ln_final(x)
180
+ return x
181
+
182
+ def text_transformer_forward(self, x: torch.Tensor, attn_mask = None):
183
+ for i, r in enumerate(self.model.transformer.resblocks):
184
+ if i == len(self.model.transformer.resblocks) - self.layer_idx:
185
+ break
186
+ if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting():
187
+ x = checkpoint(r, x, attn_mask)
188
+ else:
189
+ x = r(x, attn_mask=attn_mask)
190
+ return x
191
+
192
+ def encode(self, text):
193
+ return self(text)
194
+
195
+
196
+ class FrozenCLIPT5Encoder(AbstractEncoder):
197
+ def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device="cuda",
198
+ clip_max_length=77, t5_max_length=77):
199
+ super().__init__()
200
+ self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length)
201
+ self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length)
202
+ print(f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder)*1.e-6:.2f} M parameters, "
203
+ f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder)*1.e-6:.2f} M params.")
204
+
205
+ def encode(self, text):
206
+ return self(text)
207
+
208
+ def forward(self, text):
209
+ clip_z = self.clip_encoder.encode(text)
210
+ t5_z = self.t5_encoder.encode(text)
211
+ return [clip_z, t5_z]
212
+
213
+
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/api.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # based on https://github.com/isl-org/MiDaS
2
+
3
+ import cv2
4
+ import torch
5
+ import torch.nn as nn
6
+ from torchvision.transforms import Compose
7
+
8
+ from ldm.modules.midas.midas.dpt_depth import DPTDepthModel
9
+ from ldm.modules.midas.midas.midas_net import MidasNet
10
+ from ldm.modules.midas.midas.midas_net_custom import MidasNet_small
11
+ from ldm.modules.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
12
+
13
+
14
+ ISL_PATHS = {
15
+ "dpt_large": "midas_models/dpt_large-midas-2f21e586.pt",
16
+ "dpt_hybrid": "midas_models/dpt_hybrid-midas-501f0c75.pt",
17
+ "midas_v21": "",
18
+ "midas_v21_small": "",
19
+ }
20
+
21
+
22
+ def disabled_train(self, mode=True):
23
+ """Overwrite model.train with this function to make sure train/eval mode
24
+ does not change anymore."""
25
+ return self
26
+
27
+
28
+ def load_midas_transform(model_type):
29
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
30
+ # load transform only
31
+ if model_type == "dpt_large": # DPT-Large
32
+ net_w, net_h = 384, 384
33
+ resize_mode = "minimal"
34
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
35
+
36
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
37
+ net_w, net_h = 384, 384
38
+ resize_mode = "minimal"
39
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
40
+
41
+ elif model_type == "midas_v21":
42
+ net_w, net_h = 384, 384
43
+ resize_mode = "upper_bound"
44
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
45
+
46
+ elif model_type == "midas_v21_small":
47
+ net_w, net_h = 256, 256
48
+ resize_mode = "upper_bound"
49
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
50
+
51
+ else:
52
+ assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
53
+
54
+ transform = Compose(
55
+ [
56
+ Resize(
57
+ net_w,
58
+ net_h,
59
+ resize_target=None,
60
+ keep_aspect_ratio=True,
61
+ ensure_multiple_of=32,
62
+ resize_method=resize_mode,
63
+ image_interpolation_method=cv2.INTER_CUBIC,
64
+ ),
65
+ normalization,
66
+ PrepareForNet(),
67
+ ]
68
+ )
69
+
70
+ return transform
71
+
72
+
73
+ def load_model(model_type):
74
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
75
+ # load network
76
+ model_path = ISL_PATHS[model_type]
77
+ if model_type == "dpt_large": # DPT-Large
78
+ model = DPTDepthModel(
79
+ path=model_path,
80
+ backbone="vitl16_384",
81
+ non_negative=True,
82
+ )
83
+ net_w, net_h = 384, 384
84
+ resize_mode = "minimal"
85
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
86
+
87
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
88
+ model = DPTDepthModel(
89
+ path=model_path,
90
+ backbone="vitb_rn50_384",
91
+ non_negative=True,
92
+ )
93
+ net_w, net_h = 384, 384
94
+ resize_mode = "minimal"
95
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
96
+
97
+ elif model_type == "midas_v21":
98
+ model = MidasNet(model_path, non_negative=True)
99
+ net_w, net_h = 384, 384
100
+ resize_mode = "upper_bound"
101
+ normalization = NormalizeImage(
102
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
103
+ )
104
+
105
+ elif model_type == "midas_v21_small":
106
+ model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
107
+ non_negative=True, blocks={'expand': True})
108
+ net_w, net_h = 256, 256
109
+ resize_mode = "upper_bound"
110
+ normalization = NormalizeImage(
111
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
112
+ )
113
+
114
+ else:
115
+ print(f"model_type '{model_type}' not implemented, use: --model_type large")
116
+ assert False
117
+
118
+ transform = Compose(
119
+ [
120
+ Resize(
121
+ net_w,
122
+ net_h,
123
+ resize_target=None,
124
+ keep_aspect_ratio=True,
125
+ ensure_multiple_of=32,
126
+ resize_method=resize_mode,
127
+ image_interpolation_method=cv2.INTER_CUBIC,
128
+ ),
129
+ normalization,
130
+ PrepareForNet(),
131
+ ]
132
+ )
133
+
134
+ return model.eval(), transform
135
+
136
+
137
+ class MiDaSInference(nn.Module):
138
+ MODEL_TYPES_TORCH_HUB = [
139
+ "DPT_Large",
140
+ "DPT_Hybrid",
141
+ "MiDaS_small"
142
+ ]
143
+ MODEL_TYPES_ISL = [
144
+ "dpt_large",
145
+ "dpt_hybrid",
146
+ "midas_v21",
147
+ "midas_v21_small",
148
+ ]
149
+
150
+ def __init__(self, model_type):
151
+ super().__init__()
152
+ assert (model_type in self.MODEL_TYPES_ISL)
153
+ model, _ = load_model(model_type)
154
+ self.model = model
155
+ self.model.train = disabled_train
156
+
157
+ def forward(self, x):
158
+ # x in 0..1 as produced by calling self.transform on a 0..1 float64 numpy array
159
+ # NOTE: we expect that the correct transform has been called during dataloading.
160
+ with torch.no_grad():
161
+ prediction = self.model(x)
162
+ prediction = torch.nn.functional.interpolate(
163
+ prediction.unsqueeze(1),
164
+ size=x.shape[2:],
165
+ mode="bicubic",
166
+ align_corners=False,
167
+ )
168
+ assert prediction.shape == (x.shape[0], 1, x.shape[2], x.shape[3])
169
+ return prediction
170
+
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/__init__.py ADDED
File without changes
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/base_model.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class BaseModel(torch.nn.Module):
5
+ def load(self, path):
6
+ """Load model from file.
7
+
8
+ Args:
9
+ path (str): file path
10
+ """
11
+ parameters = torch.load(path, map_location=torch.device('cpu'))
12
+
13
+ if "optimizer" in parameters:
14
+ parameters = parameters["model"]
15
+
16
+ self.load_state_dict(parameters)
docker/bloom13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/blocks.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .vit import (
5
+ _make_pretrained_vitb_rn50_384,
6
+ _make_pretrained_vitl16_384,
7
+ _make_pretrained_vitb16_384,
8
+ forward_vit,
9
+ )
10
+
11
+ def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
12
+ if backbone == "vitl16_384":
13
+ pretrained = _make_pretrained_vitl16_384(
14
+ use_pretrained, hooks=hooks, use_readout=use_readout
15
+ )
16
+ scratch = _make_scratch(
17
+ [256, 512, 1024, 1024], features, groups=groups, expand=expand
18
+ ) # ViT-L/16 - 85.0% Top1 (backbone)
19
+ elif backbone == "vitb_rn50_384":
20
+ pretrained = _make_pretrained_vitb_rn50_384(
21
+ use_pretrained,
22
+ hooks=hooks,
23
+ use_vit_only=use_vit_only,
24
+ use_readout=use_readout,
25
+ )
26
+ scratch = _make_scratch(
27
+ [256, 512, 768, 768], features, groups=groups, expand=expand
28
+ ) # ViT-H/16 - 85.0% Top1 (backbone)
29
+ elif backbone == "vitb16_384":
30
+ pretrained = _make_pretrained_vitb16_384(
31
+ use_pretrained, hooks=hooks, use_readout=use_readout
32
+ )
33
+ scratch = _make_scratch(
34
+ [96, 192, 384, 768], features, groups=groups, expand=expand
35
+ ) # ViT-B/16 - 84.6% Top1 (backbone)
36
+ elif backbone == "resnext101_wsl":
37
+ pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
38
+ scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
39
+ elif backbone == "efficientnet_lite3":
40
+ pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
41
+ scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
42
+ else:
43
+ print(f"Backbone '{backbone}' not implemented")
44
+ assert False
45
+
46
+ return pretrained, scratch
47
+
48
+
49
+ def _make_scratch(in_shape, out_shape, groups=1, expand=False):
50
+ scratch = nn.Module()
51
+
52
+ out_shape1 = out_shape
53
+ out_shape2 = out_shape
54
+ out_shape3 = out_shape
55
+ out_shape4 = out_shape
56
+ if expand==True:
57
+ out_shape1 = out_shape
58
+ out_shape2 = out_shape*2
59
+ out_shape3 = out_shape*4
60
+ out_shape4 = out_shape*8
61
+
62
+ scratch.layer1_rn = nn.Conv2d(
63
+ in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
64
+ )
65
+ scratch.layer2_rn = nn.Conv2d(
66
+ in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
67
+ )
68
+ scratch.layer3_rn = nn.Conv2d(
69
+ in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
70
+ )
71
+ scratch.layer4_rn = nn.Conv2d(
72
+ in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
73
+ )
74
+
75
+ return scratch
76
+
77
+
78
+ def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
79
+ efficientnet = torch.hub.load(
80
+ "rwightman/gen-efficientnet-pytorch",
81
+ "tf_efficientnet_lite3",
82
+ pretrained=use_pretrained,
83
+ exportable=exportable
84
+ )
85
+ return _make_efficientnet_backbone(efficientnet)
86
+
87
+
88
+ def _make_efficientnet_backbone(effnet):
89
+ pretrained = nn.Module()
90
+
91
+ pretrained.layer1 = nn.Sequential(
92
+ effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
93
+ )
94
+ pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
95
+ pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
96
+ pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
97
+
98
+ return pretrained
99
+
100
+
101
+ def _make_resnet_backbone(resnet):
102
+ pretrained = nn.Module()
103
+ pretrained.layer1 = nn.Sequential(
104
+ resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
105
+ )
106
+
107
+ pretrained.layer2 = resnet.layer2
108
+ pretrained.layer3 = resnet.layer3
109
+ pretrained.layer4 = resnet.layer4
110
+
111
+ return pretrained
112
+
113
+
114
+ def _make_pretrained_resnext101_wsl(use_pretrained):
115
+ resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
116
+ return _make_resnet_backbone(resnet)
117
+
118
+
119
+
120
+ class Interpolate(nn.Module):
121
+ """Interpolation module.
122
+ """
123
+
124
+ def __init__(self, scale_factor, mode, align_corners=False):
125
+ """Init.
126
+
127
+ Args:
128
+ scale_factor (float): scaling
129
+ mode (str): interpolation mode
130
+ """
131
+ super(Interpolate, self).__init__()
132
+
133
+ self.interp = nn.functional.interpolate
134
+ self.scale_factor = scale_factor
135
+ self.mode = mode
136
+ self.align_corners = align_corners
137
+
138
+ def forward(self, x):
139
+ """Forward pass.
140
+
141
+ Args:
142
+ x (tensor): input
143
+
144
+ Returns:
145
+ tensor: interpolated data
146
+ """
147
+
148
+ x = self.interp(
149
+ x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
150
+ )
151
+
152
+ return x
153
+
154
+
155
+ class ResidualConvUnit(nn.Module):
156
+ """Residual convolution module.
157
+ """
158
+
159
+ def __init__(self, features):
160
+ """Init.
161
+
162
+ Args:
163
+ features (int): number of features
164
+ """
165
+ super().__init__()
166
+
167
+ self.conv1 = nn.Conv2d(
168
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
169
+ )
170
+
171
+ self.conv2 = nn.Conv2d(
172
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
173
+ )
174
+
175
+ self.relu = nn.ReLU(inplace=True)
176
+
177
+ def forward(self, x):
178
+ """Forward pass.
179
+
180
+ Args:
181
+ x (tensor): input
182
+
183
+ Returns:
184
+ tensor: output
185
+ """
186
+ out = self.relu(x)
187
+ out = self.conv1(out)
188
+ out = self.relu(out)
189
+ out = self.conv2(out)
190
+
191
+ return out + x
192
+
193
+
194
+ class FeatureFusionBlock(nn.Module):
195
+ """Feature fusion block.
196
+ """
197
+
198
+ def __init__(self, features):
199
+ """Init.
200
+
201
+ Args:
202
+ features (int): number of features
203
+ """
204
+ super(FeatureFusionBlock, self).__init__()
205
+
206
+ self.resConfUnit1 = ResidualConvUnit(features)
207
+ self.resConfUnit2 = ResidualConvUnit(features)
208
+
209
+ def forward(self, *xs):
210
+ """Forward pass.
211
+
212
+ Returns:
213
+ tensor: output
214
+ """
215
+ output = xs[0]
216
+
217
+ if len(xs) == 2:
218
+ output += self.resConfUnit1(xs[1])
219
+
220
+ output = self.resConfUnit2(output)
221
+
222
+ output = nn.functional.interpolate(
223
+ output, scale_factor=2, mode="bilinear", align_corners=True
224
+ )
225
+
226
+ return output
227
+
228
+
229
+
230
+
231
+ class ResidualConvUnit_custom(nn.Module):
232
+ """Residual convolution module.
233
+ """
234
+
235
+ def __init__(self, features, activation, bn):
236
+ """Init.
237
+
238
+ Args:
239
+ features (int): number of features
240
+ """
241
+ super().__init__()
242
+
243
+ self.bn = bn
244
+
245
+ self.groups=1
246
+
247
+ self.conv1 = nn.Conv2d(
248
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
249
+ )
250
+
251
+ self.conv2 = nn.Conv2d(
252
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
253
+ )
254
+
255
+ if self.bn==True:
256
+ self.bn1 = nn.BatchNorm2d(features)
257
+ self.bn2 = nn.BatchNorm2d(features)
258
+
259
+ self.activation = activation
260
+
261
+ self.skip_add = nn.quantized.FloatFunctional()
262
+
263
+ def forward(self, x):
264
+ """Forward pass.
265
+
266
+ Args:
267
+ x (tensor): input
268
+
269
+ Returns:
270
+ tensor: output
271
+ """
272
+
273
+ out = self.activation(x)
274
+ out = self.conv1(out)
275
+ if self.bn==True:
276
+ out = self.bn1(out)
277
+
278
+ out = self.activation(out)
279
+ out = self.conv2(out)
280
+ if self.bn==True:
281
+ out = self.bn2(out)
282
+
283
+ if self.groups > 1:
284
+ out = self.conv_merge(out)
285
+
286
+ return self.skip_add.add(out, x)
287
+
288
+ # return out + x
289
+
290
+
291
+ class FeatureFusionBlock_custom(nn.Module):
292
+ """Feature fusion block.
293
+ """
294
+
295
+ def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
296
+ """Init.
297
+
298
+ Args:
299
+ features (int): number of features
300
+ """
301
+ super(FeatureFusionBlock_custom, self).__init__()
302
+
303
+ self.deconv = deconv
304
+ self.align_corners = align_corners
305
+
306
+ self.groups=1
307
+
308
+ self.expand = expand
309
+ out_features = features
310
+ if self.expand==True:
311
+ out_features = features//2
312
+
313
+ self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
314
+
315
+ self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
316
+ self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
317
+
318
+ self.skip_add = nn.quantized.FloatFunctional()
319
+
320
+ def forward(self, *xs):
321
+ """Forward pass.
322
+
323
+ Returns:
324
+ tensor: output
325
+ """
326
+ output = xs[0]
327
+
328
+ if len(xs) == 2:
329
+ res = self.resConfUnit1(xs[1])
330
+ output = self.skip_add.add(output, res)
331
+ # output += res
332
+
333
+ output = self.resConfUnit2(output)
334
+
335
+ output = nn.functional.interpolate(
336
+ output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
337
+ )
338
+
339
+ output = self.out_conv(output)
340
+
341
+ return output
342
+