Update src/flux/pipeline_tools.py
Browse files- src/flux/pipeline_tools.py +619 -619
src/flux/pipeline_tools.py
CHANGED
@@ -29,657 +29,657 @@ from diffusers.models.transformers.transformer_flux import (
|
|
29 |
unscale_lora_layers,
|
30 |
logger,
|
31 |
)
|
32 |
-
|
33 |
-
|
34 |
-
from optimum.quanto import (
|
35 |
-
|
36 |
-
)
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
#
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
#
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
#
|
149 |
-
#
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
#
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
#
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
|
180 |
-
|
181 |
-
|
182 |
-
#
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
#
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
#
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
#
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
#
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
#
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
|
336 |
-
|
337 |
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
#
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
|
356 |
-
|
357 |
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
|
362 |
-
|
363 |
-
|
364 |
|
365 |
-
#
|
366 |
-
#
|
367 |
-
#
|
368 |
-
#
|
369 |
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
#
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
#
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
|
411 |
-
|
412 |
|
413 |
-
|
414 |
-
|
415 |
|
416 |
-
#
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
|
425 |
-
|
426 |
-
|
427 |
|
428 |
-
|
429 |
-
#
|
430 |
-
#
|
431 |
|
432 |
-
|
433 |
-
|
434 |
|
435 |
-
|
436 |
-
|
437 |
|
438 |
-
|
439 |
-
|
440 |
|
441 |
-
|
442 |
-
|
443 |
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
#
|
493 |
-
|
494 |
-
|
495 |
-
#
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
|
559 |
-
|
560 |
|
561 |
-
|
562 |
-
|
563 |
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
|
614 |
-
|
615 |
-
|
616 |
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
628 |
-
|
629 |
-
|
630 |
-
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
|
681 |
-
|
682 |
-
|
683 |
|
684 |
|
685 |
|
|
|
29 |
unscale_lora_layers,
|
30 |
logger,
|
31 |
)
|
32 |
+
from torchvision.transforms import ToPILImage
|
33 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
34 |
+
# from optimum.quanto import (
|
35 |
+
# freeze, quantize, QTensor, qfloat8, qint8, qint4, qint2,
|
36 |
+
# )
|
37 |
+
import re
|
38 |
+
import safetensors
|
39 |
+
from src.adapters.mod_adapters import CLIPModAdapter
|
40 |
+
from peft import LoraConfig, set_peft_model_state_dict
|
41 |
+
from transformers import CLIPProcessor, CLIPModel, CLIPVisionModelWithProjection, CLIPVisionModel
|
42 |
+
|
43 |
+
|
44 |
+
def encode_vae_images(pipeline: FluxPipeline, images: Tensor):
|
45 |
+
images = pipeline.image_processor.preprocess(images)
|
46 |
+
images = images.to(pipeline.device).to(pipeline.dtype)
|
47 |
+
images = pipeline.vae.encode(images).latent_dist.sample()
|
48 |
+
images = (
|
49 |
+
images - pipeline.vae.config.shift_factor
|
50 |
+
) * pipeline.vae.config.scaling_factor
|
51 |
+
images_tokens = pipeline._pack_latents(images, *images.shape)
|
52 |
+
images_ids = pipeline._prepare_latent_image_ids(
|
53 |
+
images.shape[0],
|
54 |
+
images.shape[2],
|
55 |
+
images.shape[3],
|
56 |
+
pipeline.device,
|
57 |
+
pipeline.dtype,
|
58 |
+
)
|
59 |
+
if images_tokens.shape[1] != images_ids.shape[0]:
|
60 |
+
images_ids = pipeline._prepare_latent_image_ids(
|
61 |
+
images.shape[0],
|
62 |
+
images.shape[2] // 2,
|
63 |
+
images.shape[3] // 2,
|
64 |
+
pipeline.device,
|
65 |
+
pipeline.dtype,
|
66 |
+
)
|
67 |
+
return images_tokens, images_ids
|
68 |
+
|
69 |
+
def decode_vae_images(pipeline: FluxPipeline, latents: Tensor, height, width, output_type: Optional[str] = "pil"):
|
70 |
+
latents = pipeline._unpack_latents(latents, height, width, pipeline.vae_scale_factor)
|
71 |
+
latents = (latents / pipeline.vae.config.scaling_factor) + pipeline.vae.config.shift_factor
|
72 |
+
image = pipeline.vae.decode(latents, return_dict=False)[0]
|
73 |
+
return pipeline.image_processor.postprocess(image, output_type=output_type)
|
74 |
+
|
75 |
+
|
76 |
+
def _get_clip_prompt_embeds(
|
77 |
+
self,
|
78 |
+
prompt: Union[str, List[str]],
|
79 |
+
num_images_per_prompt: int = 1,
|
80 |
+
device: Optional[torch.device] = None,
|
81 |
+
):
|
82 |
+
device = device or self._execution_device
|
83 |
+
|
84 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
85 |
+
batch_size = len(prompt)
|
86 |
+
|
87 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
88 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
89 |
+
|
90 |
+
text_inputs = self.tokenizer(
|
91 |
+
prompt,
|
92 |
+
padding="max_length",
|
93 |
+
max_length=self.tokenizer_max_length,
|
94 |
+
truncation=True,
|
95 |
+
return_overflowing_tokens=False,
|
96 |
+
return_length=False,
|
97 |
+
return_tensors="pt",
|
98 |
+
)
|
99 |
+
|
100 |
+
text_input_ids = text_inputs.input_ids
|
101 |
+
|
102 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
|
103 |
+
|
104 |
+
# Use pooled output of CLIPTextModel
|
105 |
+
prompt_embeds = prompt_embeds.pooler_output
|
106 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
107 |
+
|
108 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
109 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
|
110 |
+
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1)
|
111 |
+
|
112 |
+
return prompt_embeds
|
113 |
+
|
114 |
+
def encode_prompt_with_clip_t5(
|
115 |
+
self,
|
116 |
+
prompt: Union[str, List[str]],
|
117 |
+
prompt_2: Union[str, List[str]],
|
118 |
+
device: Optional[torch.device] = None,
|
119 |
+
num_images_per_prompt: int = 1,
|
120 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
121 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
122 |
+
max_sequence_length: int = 512,
|
123 |
+
lora_scale: Optional[float] = None,
|
124 |
+
):
|
125 |
+
r"""
|
126 |
+
|
127 |
+
Args:
|
128 |
+
prompt (`str` or `List[str]`, *optional*):
|
129 |
+
prompt to be encoded
|
130 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
131 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
132 |
+
used in all text-encoders
|
133 |
+
device: (`torch.device`):
|
134 |
+
torch device
|
135 |
+
num_images_per_prompt (`int`):
|
136 |
+
number of images that should be generated per prompt
|
137 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
138 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
139 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
140 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
141 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
142 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
143 |
+
lora_scale (`float`, *optional*):
|
144 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
145 |
+
"""
|
146 |
+
device = device or self._execution_device
|
147 |
+
|
148 |
+
# set lora scale so that monkey patched LoRA
|
149 |
+
# function of text encoder can correctly access it
|
150 |
+
if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin):
|
151 |
+
self._lora_scale = lora_scale
|
152 |
+
|
153 |
+
# dynamically adjust the LoRA scale
|
154 |
+
if self.text_encoder is not None and USE_PEFT_BACKEND:
|
155 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
156 |
+
if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
|
157 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
158 |
+
|
159 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
160 |
+
|
161 |
+
if prompt_embeds is None:
|
162 |
+
prompt_2 = prompt_2 or prompt
|
163 |
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
164 |
+
|
165 |
+
# We only use the pooled prompt output from the CLIPTextModel
|
166 |
+
pooled_prompt_embeds = _get_clip_prompt_embeds(
|
167 |
+
self=self,
|
168 |
+
prompt=prompt,
|
169 |
+
device=device,
|
170 |
+
num_images_per_prompt=num_images_per_prompt,
|
171 |
+
)
|
172 |
+
if self.text_encoder_2 is not None:
|
173 |
+
prompt_embeds = self._get_t5_prompt_embeds(
|
174 |
+
prompt=prompt_2,
|
175 |
+
num_images_per_prompt=num_images_per_prompt,
|
176 |
+
max_sequence_length=max_sequence_length,
|
177 |
+
device=device,
|
178 |
+
)
|
179 |
|
180 |
+
if self.text_encoder is not None:
|
181 |
+
if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
|
182 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
183 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
184 |
+
|
185 |
+
if self.text_encoder_2 is not None:
|
186 |
+
if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
|
187 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
188 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
189 |
+
|
190 |
+
dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype
|
191 |
+
if self.text_encoder_2 is not None:
|
192 |
+
text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype)
|
193 |
+
else:
|
194 |
+
text_ids = None
|
195 |
+
|
196 |
+
return prompt_embeds, pooled_prompt_embeds, text_ids
|
197 |
+
|
198 |
+
|
199 |
+
|
200 |
+
def prepare_text_input(
|
201 |
+
pipeline: FluxPipeline,
|
202 |
+
prompts,
|
203 |
+
max_sequence_length=512,
|
204 |
+
):
|
205 |
+
# Turn off warnings (CLIP overflow)
|
206 |
+
logger.setLevel(logging.ERROR)
|
207 |
+
(
|
208 |
+
t5_prompt_embeds,
|
209 |
+
pooled_prompt_embeds,
|
210 |
+
text_ids,
|
211 |
+
) = encode_prompt_with_clip_t5(
|
212 |
+
self=pipeline,
|
213 |
+
prompt=prompts,
|
214 |
+
prompt_2=None,
|
215 |
+
prompt_embeds=None,
|
216 |
+
pooled_prompt_embeds=None,
|
217 |
+
device=pipeline.device,
|
218 |
+
num_images_per_prompt=1,
|
219 |
+
max_sequence_length=max_sequence_length,
|
220 |
+
lora_scale=None,
|
221 |
+
)
|
222 |
+
# Turn on warnings
|
223 |
+
logger.setLevel(logging.WARNING)
|
224 |
+
return t5_prompt_embeds, pooled_prompt_embeds, text_ids
|
225 |
+
|
226 |
+
def prepare_t5_input(
|
227 |
+
pipeline: FluxPipeline,
|
228 |
+
prompts,
|
229 |
+
max_sequence_length=512,
|
230 |
+
):
|
231 |
+
# Turn off warnings (CLIP overflow)
|
232 |
+
logger.setLevel(logging.ERROR)
|
233 |
+
(
|
234 |
+
t5_prompt_embeds,
|
235 |
+
pooled_prompt_embeds,
|
236 |
+
text_ids,
|
237 |
+
) = encode_prompt_with_clip_t5(
|
238 |
+
self=pipeline,
|
239 |
+
prompt=prompts,
|
240 |
+
prompt_2=None,
|
241 |
+
prompt_embeds=None,
|
242 |
+
pooled_prompt_embeds=None,
|
243 |
+
device=pipeline.device,
|
244 |
+
num_images_per_prompt=1,
|
245 |
+
max_sequence_length=max_sequence_length,
|
246 |
+
lora_scale=None,
|
247 |
+
)
|
248 |
+
# Turn on warnings
|
249 |
+
logger.setLevel(logging.WARNING)
|
250 |
+
return t5_prompt_embeds, pooled_prompt_embeds, text_ids
|
251 |
+
|
252 |
+
def tokenize_t5_prompt(pipe, input_prompt, max_length, **kargs):
|
253 |
+
return pipe.tokenizer_2(
|
254 |
+
input_prompt,
|
255 |
+
padding="max_length",
|
256 |
+
max_length=max_length,
|
257 |
+
truncation=True,
|
258 |
+
return_length=False,
|
259 |
+
return_overflowing_tokens=False,
|
260 |
+
return_tensors="pt",
|
261 |
+
**kargs,
|
262 |
+
)
|
263 |
+
|
264 |
+
def clear_attn_maps(transformer):
|
265 |
+
for i, block in enumerate(transformer.transformer_blocks):
|
266 |
+
if hasattr(block.attn, "attn_maps"):
|
267 |
+
del block.attn.attn_maps
|
268 |
+
del block.attn.timestep
|
269 |
+
for i, block in enumerate(transformer.single_transformer_blocks):
|
270 |
+
if hasattr(block.attn, "cond2latents"):
|
271 |
+
del block.attn.cond2latents
|
272 |
+
|
273 |
+
def gather_attn_maps(transformer, clear=False):
|
274 |
+
t2i_attn_maps = {}
|
275 |
+
i2t_attn_maps = {}
|
276 |
+
for i, block in enumerate(transformer.transformer_blocks):
|
277 |
+
name = f"block_{i}"
|
278 |
+
if hasattr(block.attn, "attn_maps"):
|
279 |
+
attention_maps = block.attn.attn_maps
|
280 |
+
timesteps = block.attn.timestep # (B,)
|
281 |
+
for (timestep, (t2i_attn_map, i2t_attn_map)) in zip(timesteps, attention_maps):
|
282 |
+
timestep = str(timestep.item())
|
283 |
+
|
284 |
+
t2i_attn_maps[timestep] = t2i_attn_maps.get(timestep, dict())
|
285 |
+
t2i_attn_maps[timestep][name] = t2i_attn_maps[timestep].get(name, [])
|
286 |
+
t2i_attn_maps[timestep][name].append(t2i_attn_map.cpu())
|
287 |
+
|
288 |
+
i2t_attn_maps[timestep] = i2t_attn_maps.get(timestep, dict())
|
289 |
+
i2t_attn_maps[timestep][name] = i2t_attn_maps[timestep].get(name, [])
|
290 |
+
i2t_attn_maps[timestep][name].append(i2t_attn_map.cpu())
|
291 |
+
|
292 |
+
if clear:
|
293 |
+
del block.attn.attn_maps
|
294 |
+
|
295 |
+
for timestep in t2i_attn_maps:
|
296 |
+
for name in t2i_attn_maps[timestep]:
|
297 |
+
t2i_attn_maps[timestep][name] = torch.cat(t2i_attn_maps[timestep][name], dim=0)
|
298 |
+
i2t_attn_maps[timestep][name] = torch.cat(i2t_attn_maps[timestep][name], dim=0)
|
299 |
+
|
300 |
+
return t2i_attn_maps, i2t_attn_maps
|
301 |
+
|
302 |
+
def process_token(token, startofword):
|
303 |
+
if '</w>' in token:
|
304 |
+
token = token.replace('</w>', '')
|
305 |
+
if startofword:
|
306 |
+
token = '<' + token + '>'
|
307 |
+
else:
|
308 |
+
token = '-' + token + '>'
|
309 |
+
startofword = True
|
310 |
+
elif token not in ['<|startoftext|>', '<|endoftext|>']:
|
311 |
+
if startofword:
|
312 |
+
token = '<' + token + '-'
|
313 |
+
startofword = False
|
314 |
+
else:
|
315 |
+
token = '-' + token + '-'
|
316 |
+
return token, startofword
|
317 |
+
|
318 |
+
def save_attention_image(attn_map, tokens, batch_dir, to_pil):
|
319 |
+
startofword = True
|
320 |
+
for i, (token, a) in enumerate(zip(tokens, attn_map[:len(tokens)])):
|
321 |
+
token, startofword = process_token(token, startofword)
|
322 |
+
token = token.replace("/", "-")
|
323 |
+
if token == '-<pad>-':
|
324 |
+
continue
|
325 |
+
a = a.to(torch.float32)
|
326 |
+
a = a / a.max() * 255 / 256
|
327 |
+
to_pil(a).save(os.path.join(batch_dir, f'{i}-{token}.png'))
|
328 |
+
|
329 |
+
def save_attention_maps(attn_maps, pipe, prompts, base_dir='attn_maps'):
|
330 |
+
to_pil = ToPILImage()
|
331 |
|
332 |
+
token_ids = tokenize_t5_prompt(pipe, prompts, 512).input_ids # (B, 512)
|
333 |
+
token_ids = [x for x in token_ids]
|
334 |
+
total_tokens = [pipe.tokenizer_2.convert_ids_to_tokens(token_id) for token_id in token_ids]
|
335 |
|
336 |
+
os.makedirs(base_dir, exist_ok=True)
|
337 |
|
338 |
+
total_attn_map_shape = (256, 256)
|
339 |
+
total_attn_map_number = 0
|
340 |
+
|
341 |
+
# (B, 24, H, W, 512) -> (B, H, W, 512) -> (B, 512, H, W)
|
342 |
+
print(attn_maps.keys())
|
343 |
+
total_attn_map = list(list(attn_maps.values())[0].values())[0].sum(1)
|
344 |
+
total_attn_map = total_attn_map.permute(0, 3, 1, 2)
|
345 |
+
total_attn_map = torch.zeros_like(total_attn_map)
|
346 |
+
total_attn_map = F.interpolate(total_attn_map, size=total_attn_map_shape, mode='bilinear', align_corners=False)
|
347 |
|
348 |
+
for timestep, layers in attn_maps.items():
|
349 |
+
timestep_dir = os.path.join(base_dir, f'{timestep}')
|
350 |
+
os.makedirs(timestep_dir, exist_ok=True)
|
351 |
|
352 |
+
for layer, attn_map in layers.items():
|
353 |
+
layer_dir = os.path.join(timestep_dir, f'{layer}')
|
354 |
+
os.makedirs(layer_dir, exist_ok=True)
|
355 |
|
356 |
+
attn_map = attn_map.sum(1).squeeze(1).permute(0, 3, 1, 2)
|
357 |
|
358 |
+
resized_attn_map = F.interpolate(attn_map, size=total_attn_map_shape, mode='bilinear', align_corners=False)
|
359 |
+
total_attn_map += resized_attn_map
|
360 |
+
total_attn_map_number += 1
|
361 |
|
362 |
+
for batch, (attn_map, tokens) in enumerate(zip(resized_attn_map, total_tokens)):
|
363 |
+
save_attention_image(attn_map, tokens, layer_dir, to_pil)
|
364 |
|
365 |
+
# for batch, (tokens, attn) in enumerate(zip(total_tokens, attn_map)):
|
366 |
+
# batch_dir = os.path.join(layer_dir, f'batch-{batch}')
|
367 |
+
# os.makedirs(batch_dir, exist_ok=True)
|
368 |
+
# save_attention_image(attn, tokens, batch_dir, to_pil)
|
369 |
|
370 |
+
total_attn_map /= total_attn_map_number
|
371 |
+
for batch, (attn_map, tokens) in enumerate(zip(total_attn_map, total_tokens)):
|
372 |
+
batch_dir = os.path.join(base_dir, f'batch-{batch}')
|
373 |
+
os.makedirs(batch_dir, exist_ok=True)
|
374 |
+
save_attention_image(attn_map, tokens, batch_dir, to_pil)
|
375 |
+
|
376 |
+
def gather_cond2latents(transformer, clear=False):
|
377 |
+
c2l_attn_maps = {}
|
378 |
+
# for i, block in enumerate(transformer.transformer_blocks):
|
379 |
+
for i, block in enumerate(transformer.single_transformer_blocks):
|
380 |
+
name = f"block_{i}"
|
381 |
+
if hasattr(block.attn, "cond2latents"):
|
382 |
+
attention_maps = block.attn.cond2latents
|
383 |
+
timesteps = block.attn.cond_timesteps # (B,)
|
384 |
+
for (timestep, c2l_attn_map) in zip(timesteps, attention_maps):
|
385 |
+
timestep = str(timestep.item())
|
386 |
+
|
387 |
+
c2l_attn_maps[timestep] = c2l_attn_maps.get(timestep, dict())
|
388 |
+
c2l_attn_maps[timestep][name] = c2l_attn_maps[timestep].get(name, [])
|
389 |
+
c2l_attn_maps[timestep][name].append(c2l_attn_map.cpu())
|
390 |
+
|
391 |
+
if clear:
|
392 |
+
# del block.attn.attn_maps
|
393 |
+
del block.attn.cond2latents
|
394 |
+
del block.attn.cond_timesteps
|
395 |
+
|
396 |
+
for timestep in c2l_attn_maps:
|
397 |
+
for name in c2l_attn_maps[timestep]:
|
398 |
+
c2l_attn_maps[timestep][name] = torch.cat(c2l_attn_maps[timestep][name], dim=0)
|
399 |
+
|
400 |
+
return c2l_attn_maps
|
401 |
+
|
402 |
+
def save_cond2latent_image(attn_map, batch_dir, to_pil):
|
403 |
+
for i, a in enumerate(attn_map): # (N, H, W)
|
404 |
+
a = a.to(torch.float32)
|
405 |
+
a = a / a.max() * 255 / 256
|
406 |
+
to_pil(a).save(os.path.join(batch_dir, f'{i}.png'))
|
407 |
+
|
408 |
+
def save_cond2latent(attn_maps, base_dir='attn_maps'):
|
409 |
+
to_pil = ToPILImage()
|
410 |
|
411 |
+
os.makedirs(base_dir, exist_ok=True)
|
412 |
|
413 |
+
total_attn_map_shape = (256, 256)
|
414 |
+
total_attn_map_number = 0
|
415 |
|
416 |
+
# (N, H, W) -> (1, N, H, W)
|
417 |
+
total_attn_map = list(list(attn_maps.values())[0].values())[0].unsqueeze(0)
|
418 |
+
total_attn_map = torch.zeros_like(total_attn_map)
|
419 |
+
total_attn_map = F.interpolate(total_attn_map, size=total_attn_map_shape, mode='bilinear', align_corners=False)
|
420 |
|
421 |
+
for timestep, layers in attn_maps.items():
|
422 |
+
cur_ts_attn_map = torch.zeros_like(total_attn_map)
|
423 |
+
cur_ts_attn_map_number = 0
|
424 |
|
425 |
+
timestep_dir = os.path.join(base_dir, f'{timestep}')
|
426 |
+
os.makedirs(timestep_dir, exist_ok=True)
|
427 |
|
428 |
+
for layer, attn_map in layers.items():
|
429 |
+
# layer_dir = os.path.join(timestep_dir, f'{layer}')
|
430 |
+
# os.makedirs(layer_dir, exist_ok=True)
|
431 |
|
432 |
+
attn_map = attn_map.unsqueeze(0) # (1, N, H, W)
|
433 |
+
resized_attn_map = F.interpolate(attn_map, size=total_attn_map_shape, mode='bilinear', align_corners=False)
|
434 |
|
435 |
+
cur_ts_attn_map += resized_attn_map
|
436 |
+
cur_ts_attn_map_number += 1
|
437 |
|
438 |
+
for batch, attn_map in enumerate(cur_ts_attn_map / cur_ts_attn_map_number):
|
439 |
+
save_cond2latent_image(attn_map, timestep_dir, to_pil)
|
440 |
|
441 |
+
total_attn_map += cur_ts_attn_map
|
442 |
+
total_attn_map_number += cur_ts_attn_map_number
|
443 |
|
444 |
+
total_attn_map /= total_attn_map_number
|
445 |
+
for batch, attn_map in enumerate(total_attn_map):
|
446 |
+
batch_dir = os.path.join(base_dir, f'batch-{batch}')
|
447 |
+
os.makedirs(batch_dir, exist_ok=True)
|
448 |
+
save_cond2latent_image(attn_map, batch_dir, to_pil)
|
449 |
+
|
450 |
+
def quantization(pipe, qtype):
|
451 |
+
if qtype != "None" and qtype != "":
|
452 |
+
if qtype.endswith("quanto"):
|
453 |
+
if qtype == "int2-quanto":
|
454 |
+
quant_level = qint2
|
455 |
+
elif qtype == "int4-quanto":
|
456 |
+
quant_level = qint4
|
457 |
+
elif qtype == "int8-quanto":
|
458 |
+
quant_level = qint8
|
459 |
+
elif qtype == "fp8-quanto":
|
460 |
+
quant_level = qfloat8
|
461 |
+
else:
|
462 |
+
raise ValueError(f"Invalid quantisation level: {qtype}")
|
463 |
+
|
464 |
+
extra_quanto_args = {}
|
465 |
+
extra_quanto_args["exclude"] = [
|
466 |
+
"*.norm",
|
467 |
+
"*.norm1",
|
468 |
+
"*.norm2",
|
469 |
+
"*.norm2_context",
|
470 |
+
"proj_out",
|
471 |
+
"x_embedder",
|
472 |
+
"norm_out",
|
473 |
+
"context_embedder",
|
474 |
+
]
|
475 |
+
try:
|
476 |
+
quantize(pipe.transformer, weights=quant_level, **extra_quanto_args)
|
477 |
+
quantize(pipe.text_encoder_2, weights=quant_level, **extra_quanto_args)
|
478 |
+
print("[Quantization] Start freezing")
|
479 |
+
freeze(pipe.transformer)
|
480 |
+
freeze(pipe.text_encoder_2)
|
481 |
+
print("[Quantization] Finished")
|
482 |
+
except Exception as e:
|
483 |
+
if "out of memory" in str(e).lower():
|
484 |
+
print(
|
485 |
+
"GPU ran out of memory during quantisation. Use --quantize_via=cpu to use the slower CPU method."
|
486 |
+
)
|
487 |
+
raise e
|
488 |
+
else:
|
489 |
+
assert qtype == "fp8-ao"
|
490 |
+
from torchao.float8 import convert_to_float8_training, Float8LinearConfig
|
491 |
+
def module_filter_fn(mod: torch.nn.Module, fqn: str):
|
492 |
+
# don't convert the output module
|
493 |
+
if fqn == "proj_out":
|
494 |
+
return False
|
495 |
+
# don't convert linear modules with weight dimensions not divisible by 16
|
496 |
+
if isinstance(mod, torch.nn.Linear):
|
497 |
+
if mod.in_features % 16 != 0 or mod.out_features % 16 != 0:
|
498 |
+
return False
|
499 |
+
return True
|
500 |
+
convert_to_float8_training(
|
501 |
+
pipe.transformer, module_filter_fn=module_filter_fn, config=Float8LinearConfig(pad_inner_dim=True)
|
502 |
+
)
|
503 |
+
|
504 |
+
class CustomFluxPipeline:
|
505 |
+
def __init__(
|
506 |
+
self,
|
507 |
+
config,
|
508 |
+
device="cuda",
|
509 |
+
ckpt_root=None,
|
510 |
+
ckpt_root_condition=None,
|
511 |
+
torch_dtype=torch.bfloat16,
|
512 |
+
):
|
513 |
+
model_path = os.getenv("FLUX_MODEL_PATH", "black-forest-labs/FLUX.1-dev")
|
514 |
+
print("[CustomFluxPipeline] Loading FLUX Pipeline")
|
515 |
+
self.pipe = FluxPipeline.from_pretrained(model_path, torch_dtype=torch_dtype).to(device)
|
516 |
+
|
517 |
+
self.config = config
|
518 |
+
self.device = device
|
519 |
+
self.dtype = torch_dtype
|
520 |
+
if config["model"].get("dit_quant", "None") != "None":
|
521 |
+
quantization(self.pipe, config["model"]["dit_quant"])
|
522 |
+
|
523 |
+
self.modulation_adapters = []
|
524 |
+
self.pipe.modulation_adapters = []
|
525 |
|
526 |
+
try:
|
527 |
+
if config["model"]["modulation"]["use_clip"]:
|
528 |
+
load_clip(self, config, torch_dtype, device, None, is_training=False)
|
529 |
+
except Exception as e:
|
530 |
+
print(e)
|
531 |
|
532 |
+
if config["model"]["use_dit_lora"] or config["model"]["use_condition_dblock_lora"] or config["model"]["use_condition_sblock_lora"]:
|
533 |
+
if ckpt_root_condition is None and (config["model"]["use_condition_dblock_lora"] or config["model"]["use_condition_sblock_lora"]):
|
534 |
+
ckpt_root_condition = ckpt_root
|
535 |
+
load_dit_lora(self, self.pipe, config, torch_dtype, device, f"{ckpt_root}", f"{ckpt_root_condition}", is_training=False)
|
536 |
+
|
537 |
+
def add_modulation_adapter(self, modulation_adapter):
|
538 |
+
self.modulation_adapters.append(modulation_adapter)
|
539 |
+
self.pipe.modulation_adapters.append(modulation_adapter)
|
540 |
+
|
541 |
+
def clear_modulation_adapters(self):
|
542 |
+
self.modulation_adapters = []
|
543 |
+
self.pipe.modulation_adapters = []
|
544 |
+
torch.cuda.empty_cache()
|
545 |
+
|
546 |
+
def load_clip(self, config, torch_dtype, device, ckpt_dir=None, is_training=False):
|
547 |
+
model_path = os.getenv("CLIP_MODEL_PATH", "openai/clip-vit-large-patch14")
|
548 |
+
clip_model = CLIPVisionModelWithProjection.from_pretrained(model_path).to(device, dtype=torch_dtype)
|
549 |
+
clip_processor = CLIPProcessor.from_pretrained(model_path)
|
550 |
+
self.pipe.clip_model = clip_model
|
551 |
+
self.pipe.clip_processor = clip_processor
|
552 |
+
|
553 |
+
def load_dit_lora(self, pipe, config, torch_dtype, device, ckpt_dir=None, condition_ckpt_dir=None, is_training=False):
|
554 |
+
|
555 |
+
if not config["model"]["use_condition_dblock_lora"] and not config["model"]["use_condition_sblock_lora"] and not config["model"]["use_dit_lora"]:
|
556 |
+
print("[load_dit_lora] no dit lora, no condition lora")
|
557 |
+
return []
|
558 |
|
559 |
+
adapter_names = ["default", "condition"]
|
560 |
|
561 |
+
if condition_ckpt_dir is None:
|
562 |
+
condition_ckpt_dir = ckpt_dir
|
563 |
|
564 |
+
if not config["model"]["use_condition_dblock_lora"] and not config["model"]["use_condition_sblock_lora"]:
|
565 |
+
print("[load_dit_lora] no condition lora")
|
566 |
+
adapter_names.pop(1)
|
567 |
+
elif condition_ckpt_dir is not None and os.path.exists(os.path.join(condition_ckpt_dir, "pytorch_lora_weights_condition.safetensors")):
|
568 |
+
assert "condition" in adapter_names
|
569 |
+
print(f"[load_dit_lora] load condition lora from {condition_ckpt_dir}")
|
570 |
+
pipe.transformer.load_lora_adapter(condition_ckpt_dir, use_safetensors=True, adapter_name="condition", weight_name="pytorch_lora_weights_condition.safetensors") # TODO: check if they are trainable
|
571 |
+
else:
|
572 |
+
assert is_training
|
573 |
+
assert "condition" in adapter_names
|
574 |
+
print("[load_dit_lora] init new condition lora")
|
575 |
+
pipe.transformer.add_adapter(LoraConfig(**config["model"]["condition_lora_config"]), adapter_name="condition")
|
576 |
+
|
577 |
+
if not config["model"]["use_dit_lora"]:
|
578 |
+
print("[load_dit_lora] no dit lora")
|
579 |
+
adapter_names.pop(0)
|
580 |
+
elif ckpt_dir is not None and os.path.exists(os.path.join(ckpt_dir, "pytorch_lora_weights.safetensors")):
|
581 |
+
assert "default" in adapter_names
|
582 |
+
print(f"[load_dit_lora] load dit lora from {ckpt_dir}")
|
583 |
+
lora_file = os.path.join(ckpt_dir, "pytorch_lora_weights.safetensors")
|
584 |
+
lora_state_dict = safetensors.torch.load_file(lora_file, device="cpu")
|
585 |
|
586 |
+
single_lora_pattern = "(.*single_transformer_blocks\\.[0-9]+\\.norm\\.linear|.*single_transformer_blocks\\.[0-9]+\\.proj_mlp|.*single_transformer_blocks\\.[0-9]+\\.proj_out|.*single_transformer_blocks\\.[0-9]+\\.attn.to_k|.*single_transformer_blocks\\.[0-9]+\\.attn.to_q|.*single_transformer_blocks\\.[0-9]+\\.attn.to_v|.*single_transformer_blocks\\.[0-9]+\\.attn.to_out)"
|
587 |
+
latent_lora_pattern = "(.*(?<!single_)transformer_blocks\\.[0-9]+\\.norm1\\.linear|.*(?<!single_)transformer_blocks\\.[0-9]+\\.attn\\.to_k|.*(?<!single_)transformer_blocks\\.[0-9]+\\.attn\\.to_q|.*(?<!single_)transformer_blocks\\.[0-9]+\\.attn\\.to_v|.*(?<!single_)transformer_blocks\\.[0-9]+\\.attn\\.to_out\\.0|.*(?<!single_)transformer_blocks\\.[0-9]+\\.ff\\.net\\.2)"
|
588 |
+
use_pretrained_dit_single_lora = config["model"].get("use_pretrained_dit_single_lora", True)
|
589 |
+
use_pretrained_dit_latent_lora = config["model"].get("use_pretrained_dit_latent_lora", True)
|
590 |
+
if not use_pretrained_dit_single_lora or not use_pretrained_dit_latent_lora:
|
591 |
+
lora_state_dict_keys = list(lora_state_dict.keys())
|
592 |
+
for layer_name in lora_state_dict_keys:
|
593 |
+
if not use_pretrained_dit_single_lora:
|
594 |
+
if re.search(single_lora_pattern, layer_name):
|
595 |
+
del lora_state_dict[layer_name]
|
596 |
+
if not use_pretrained_dit_latent_lora:
|
597 |
+
if re.search(latent_lora_pattern, layer_name):
|
598 |
+
del lora_state_dict[layer_name]
|
599 |
+
pipe.transformer.add_adapter(LoraConfig(**config["model"]["dit_lora_config"]), adapter_name="default")
|
600 |
+
set_peft_model_state_dict(pipe.transformer, lora_state_dict, adapter_name="default")
|
601 |
+
else:
|
602 |
+
pipe.transformer.load_lora_adapter(ckpt_dir, use_safetensors=True, adapter_name="default", weight_name="pytorch_lora_weights.safetensors") # TODO: check if they are trainable
|
603 |
+
else:
|
604 |
+
assert is_training
|
605 |
+
assert "default" in adapter_names
|
606 |
+
print("[load_dit_lora] init new dit lora")
|
607 |
+
pipe.transformer.add_adapter(LoraConfig(**config["model"]["dit_lora_config"]), adapter_name="default")
|
608 |
+
|
609 |
+
assert len(adapter_names) <= 2 and len(adapter_names) > 0
|
610 |
+
for name, module in pipe.transformer.named_modules():
|
611 |
+
if isinstance(module, BaseTunerLayer):
|
612 |
+
module.set_adapter(adapter_names)
|
613 |
|
614 |
+
if "default" in adapter_names: assert config["model"]["use_dit_lora"]
|
615 |
+
if "condition" in adapter_names: assert config["model"]["use_condition_dblock_lora"] or config["model"]["use_condition_sblock_lora"]
|
616 |
|
617 |
+
lora_layers = list(filter(
|
618 |
+
lambda p: p[1].requires_grad, pipe.transformer.named_parameters()
|
619 |
+
))
|
620 |
|
621 |
+
lora_layers = [l[1] for l in lora_layers]
|
622 |
+
return lora_layers
|
623 |
+
|
624 |
+
def load_modulation_adapter(self, config, torch_dtype, device, ckpt_dir=None, is_training=False):
|
625 |
+
adapter_type = config["model"]["modulation"]["adapter_type"]
|
626 |
+
|
627 |
+
if ckpt_dir is not None and os.path.exists(ckpt_dir):
|
628 |
+
print(f"loading modulation adapter from {ckpt_dir}")
|
629 |
+
modulation_adapter = CLIPModAdapter.from_pretrained(
|
630 |
+
ckpt_dir, subfolder="modulation_adapter", strict=False,
|
631 |
+
low_cpu_mem_usage=False, device_map=None,
|
632 |
+
).to(device)
|
633 |
+
else:
|
634 |
+
print(f"Init new modulation adapter")
|
635 |
+
adapter_layers = config["model"]["modulation"]["adapter_layers"]
|
636 |
+
adapter_width = config["model"]["modulation"]["adapter_width"]
|
637 |
+
pblock_adapter_layers = config["model"]["modulation"]["per_block_adapter_layers"]
|
638 |
+
pblock_adapter_width = config["model"]["modulation"]["per_block_adapter_width"]
|
639 |
+
pblock_adapter_single_blocks = config["model"]["modulation"]["per_block_adapter_single_blocks"]
|
640 |
+
use_text_mod = config["model"]["modulation"]["use_text_mod"]
|
641 |
+
use_img_mod = config["model"]["modulation"]["use_img_mod"]
|
642 |
|
643 |
+
out_dim = config["model"]["modulation"]["out_dim"]
|
644 |
+
if adapter_type == "clip_adapter":
|
645 |
+
modulation_adapter = CLIPModAdapter(
|
646 |
+
out_dim=out_dim,
|
647 |
+
width=adapter_width,
|
648 |
+
pblock_width=pblock_adapter_width,
|
649 |
+
layers=adapter_layers,
|
650 |
+
pblock_layers=pblock_adapter_layers,
|
651 |
+
heads=8,
|
652 |
+
input_text_dim=4096,
|
653 |
+
input_image_dim=1024,
|
654 |
+
pblock_single_blocks=pblock_adapter_single_blocks,
|
655 |
+
)
|
656 |
+
else:
|
657 |
+
raise NotImplementedError()
|
658 |
+
|
659 |
+
if is_training:
|
660 |
+
modulation_adapter.train()
|
661 |
+
try:
|
662 |
+
modulation_adapter.enable_gradient_checkpointing()
|
663 |
+
except Exception as e:
|
664 |
+
print(e)
|
665 |
+
if not config["model"]["modulation"]["use_perblock_adapter"]:
|
666 |
+
try:
|
667 |
+
modulation_adapter.net2.requires_grad_(False)
|
668 |
+
except Exception as e:
|
669 |
+
print(e)
|
670 |
+
else:
|
671 |
+
modulation_adapter.requires_grad_(False)
|
672 |
+
|
673 |
+
modulation_adapter.to(device, dtype=torch_dtype)
|
674 |
+
return modulation_adapter
|
675 |
+
|
676 |
+
|
677 |
+
def load_ckpt(self, ckpt_dir, is_training=False):
|
678 |
+
if self.config["model"]["use_dit_lora"]:
|
679 |
+
self.pipe.transformer.delete_adapters(["subject"])
|
680 |
+
lora_path = f"{ckpt_dir}/pytorch_lora_weights.safetensors"
|
681 |
+
print(f"Loading DIT Lora from {lora_path}")
|
682 |
+
self.pipe.load_lora_weights(lora_path, adapter_name="subject")
|
683 |
|
684 |
|
685 |
|