Lifeinhockey commited on
Commit
503e3c8
·
verified ·
1 Parent(s): af39a85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -55
app.py CHANGED
@@ -494,62 +494,62 @@ def infer(
494
  else:
495
  # Генерация изображений с LCM_Adapter ---------------------------------------------------------------------------------------------
496
 
497
- # if use_LCM_adapter:
498
 
499
- # if LCM_adapter == "lcm-lora-sdv1-5":
500
- # adapter_id = "latent-consistency/lcm-lora-sdv1-5"
501
-
502
- # generator = torch.Generator(device).manual_seed(seed)
503
-
504
- # pipe_default = get_lora_sd_pipeline(lora_dir='lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
505
- # pipe = pipe_default
506
- # pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
507
- # pipe.to(device)
508
-
509
- # pipe.load_lora_weights(adapter_id)
510
- # pipe.fuse_lora()
511
-
512
- # prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
513
- # negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
514
- # prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
515
-
516
- # params = {
517
- # 'prompt_embeds': prompt_embeds,
518
- # 'negative_prompt_embeds': negative_prompt_embeds,
519
- # 'guidance_scale': guidance_scale,
520
- # 'num_inference_steps': num_inference_steps,
521
- # 'width': width,
522
- # 'height': height,
523
- # 'generator': generator,
524
- # }
525
-
526
- # image = pipe(**params).images[0]
527
- # else:
528
- # # Генерация изображений с DDIMScheduler ---------------------------------------------------------------------------------------------
529
-
530
- # if use_DDIMScheduler:
531
-
532
- # generator = torch.Generator(device).manual_seed(seed)
533
-
534
- # pipe = StableDiffusionPipeline.from_pretrained(model_default, torch_dtype=torch_dtype).to(device)
535
- # pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
536
-
537
- # prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
538
- # negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
539
- # prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
540
-
541
- # params = {
542
- # 'prompt_embeds': prompt_embeds,
543
- # 'negative_prompt_embeds': negative_prompt_embeds,
544
- # 'guidance_scale': guidance_scale,
545
- # 'num_inference_steps': num_inference_steps,
546
- # 'width': width,
547
- # 'height': height,
548
- # 'generator': generator,
549
- # }
550
-
551
- # image = pipe(**params).images[0]
552
- # else:
553
  # Генерация изображений с LORA без ControlNet и IP_Adapter ---------------------------------------------------------------------------------------------
554
 
555
  # Инициализация ControlNet
 
494
  else:
495
  # Генерация изображений с LCM_Adapter ---------------------------------------------------------------------------------------------
496
 
497
+ if use_LCM_adapter:
498
 
499
+ if LCM_adapter == "lcm-lora-sdv1-5":
500
+ adapter_id = "latent-consistency/lcm-lora-sdv1-5"
501
+
502
+ generator = torch.Generator(device).manual_seed(seed)
503
+
504
+ pipe_LCM_default = get_lora_sd_pipeline(lora_dir='lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
505
+ pipe_LCM = pipe_LCM_default
506
+ pipe_LCM.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
507
+ pipe_LCM.to(device)
508
+
509
+ pipe_LCM.load_lora_weights(adapter_id)
510
+ pipe_LCM.fuse_lora()
511
+
512
+ prompt_embeds = long_prompt_encoder(prompt, pipe_LCM.tokenizer, pipe_LCM.text_encoder)
513
+ negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe_LCM.tokenizer, pipe_LCM.text_encoder)
514
+ prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
515
+
516
+ params = {
517
+ 'prompt_embeds': prompt_embeds,
518
+ 'negative_prompt_embeds': negative_prompt_embeds,
519
+ 'guidance_scale': guidance_scale,
520
+ 'num_inference_steps': num_inference_steps,
521
+ 'width': width,
522
+ 'height': height,
523
+ 'generator': generator,
524
+ }
525
+
526
+ image = pipe_LCM(**params).images[0]
527
+ else:
528
+ # Генерация изображений с DDIMScheduler ---------------------------------------------------------------------------------------------
529
+
530
+ if use_DDIMScheduler:
531
+
532
+ generator = torch.Generator(device).manual_seed(seed)
533
+
534
+ pipe_DDIMS = StableDiffusionPipeline.from_pretrained(model_default, torch_dtype=torch_dtype).to(device)
535
+ pipe_DDIMS.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
536
+
537
+ prompt_embeds = long_prompt_encoder(prompt, pipe_DDIMS.tokenizer, pipe_DDIMS.text_encoder)
538
+ negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe_DDIMS.tokenizer, pipe_DDIMS.text_encoder)
539
+ prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
540
+
541
+ params = {
542
+ 'prompt_embeds': prompt_embeds,
543
+ 'negative_prompt_embeds': negative_prompt_embeds,
544
+ 'guidance_scale': guidance_scale,
545
+ 'num_inference_steps': num_inference_steps,
546
+ 'width': width,
547
+ 'height': height,
548
+ 'generator': generator,
549
+ }
550
+
551
+ image = pipe_DDIMS(**params).images[0]
552
+ else:
553
  # Генерация изображений с LORA без ControlNet и IP_Adapter ---------------------------------------------------------------------------------------------
554
 
555
  # Инициализация ControlNet