Lifeinhockey commited on
Commit
af39a85
·
verified ·
1 Parent(s): c866609

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -57
app.py CHANGED
@@ -9,8 +9,9 @@ from diffusers import (
9
  AutoPipelineForImage2Image,
10
  DDIMScheduler,
11
  UniPCMultistepScheduler,
12
- LCMScheduler,
13
- AutoPipelineForText2Image)
 
14
  from transformers import pipeline
15
  from diffusers.utils import load_image, make_image_grid
16
  from peft import PeftModel, LoraConfig
@@ -493,62 +494,62 @@ def infer(
493
  else:
494
  # Генерация изображений с LCM_Adapter ---------------------------------------------------------------------------------------------
495
 
496
- if use_LCM_adapter:
497
 
498
- if LCM_adapter == "lcm-lora-sdv1-5":
499
- adapter_id = "latent-consistency/lcm-lora-sdv1-5"
500
-
501
- generator = torch.Generator(device).manual_seed(seed)
502
-
503
- pipe_default = get_lora_sd_pipeline(lora_dir='lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
504
- pipe = pipe_default
505
- pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
506
- pipe.to(device)
507
-
508
- pipe.load_lora_weights(adapter_id)
509
- pipe.fuse_lora()
510
-
511
- prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
512
- negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
513
- prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
514
-
515
- params = {
516
- 'prompt_embeds': prompt_embeds,
517
- 'negative_prompt_embeds': negative_prompt_embeds,
518
- 'guidance_scale': guidance_scale,
519
- 'num_inference_steps': num_inference_steps,
520
- 'width': width,
521
- 'height': height,
522
- 'generator': generator,
523
- }
524
-
525
- image = pipe(**params).images[0]
526
- else:
527
- # Генерация изображений с DDIMScheduler ---------------------------------------------------------------------------------------------
528
-
529
- if use_DDIMScheduler:
530
-
531
- generator = torch.Generator(device).manual_seed(seed)
532
-
533
- pipe = StableDiffusionPipeline.from_pretrained(model_default, torch_dtype=torch_dtype).to(device)
534
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
535
-
536
- prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
537
- negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
538
- prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
539
-
540
- params = {
541
- 'prompt_embeds': prompt_embeds,
542
- 'negative_prompt_embeds': negative_prompt_embeds,
543
- 'guidance_scale': guidance_scale,
544
- 'num_inference_steps': num_inference_steps,
545
- 'width': width,
546
- 'height': height,
547
- 'generator': generator,
548
- }
549
-
550
- image = pipe(**params).images[0]
551
- else:
552
  # Генерация изображений с LORA без ControlNet и IP_Adapter ---------------------------------------------------------------------------------------------
553
 
554
  # Инициализация ControlNet
 
9
  AutoPipelineForImage2Image,
10
  DDIMScheduler,
11
  UniPCMultistepScheduler,
12
+ # LCMScheduler,
13
+ # AutoPipelineForText2Image
14
+ )
15
  from transformers import pipeline
16
  from diffusers.utils import load_image, make_image_grid
17
  from peft import PeftModel, LoraConfig
 
494
  else:
495
  # Генерация изображений с LCM_Adapter ---------------------------------------------------------------------------------------------
496
 
497
+ # if use_LCM_adapter:
498
 
499
+ # if LCM_adapter == "lcm-lora-sdv1-5":
500
+ # adapter_id = "latent-consistency/lcm-lora-sdv1-5"
501
+
502
+ # generator = torch.Generator(device).manual_seed(seed)
503
+
504
+ # pipe_default = get_lora_sd_pipeline(lora_dir='lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
505
+ # pipe = pipe_default
506
+ # pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
507
+ # pipe.to(device)
508
+
509
+ # pipe.load_lora_weights(adapter_id)
510
+ # pipe.fuse_lora()
511
+
512
+ # prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
513
+ # negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
514
+ # prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
515
+
516
+ # params = {
517
+ # 'prompt_embeds': prompt_embeds,
518
+ # 'negative_prompt_embeds': negative_prompt_embeds,
519
+ # 'guidance_scale': guidance_scale,
520
+ # 'num_inference_steps': num_inference_steps,
521
+ # 'width': width,
522
+ # 'height': height,
523
+ # 'generator': generator,
524
+ # }
525
+
526
+ # image = pipe(**params).images[0]
527
+ # else:
528
+ # # Генерация изображений с DDIMScheduler ---------------------------------------------------------------------------------------------
529
+
530
+ # if use_DDIMScheduler:
531
+
532
+ # generator = torch.Generator(device).manual_seed(seed)
533
+
534
+ # pipe = StableDiffusionPipeline.from_pretrained(model_default, torch_dtype=torch_dtype).to(device)
535
+ # pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
536
+
537
+ # prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
538
+ # negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
539
+ # prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
540
+
541
+ # params = {
542
+ # 'prompt_embeds': prompt_embeds,
543
+ # 'negative_prompt_embeds': negative_prompt_embeds,
544
+ # 'guidance_scale': guidance_scale,
545
+ # 'num_inference_steps': num_inference_steps,
546
+ # 'width': width,
547
+ # 'height': height,
548
+ # 'generator': generator,
549
+ # }
550
+
551
+ # image = pipe(**params).images[0]
552
+ # else:
553
  # Генерация изображений с LORA без ControlNet и IP_Adapter ---------------------------------------------------------------------------------------------
554
 
555
  # Инициализация ControlNet