r3gm commited on
Commit
58aa405
·
verified ·
1 Parent(s): 4d6abc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -35
app.py CHANGED
@@ -378,7 +378,7 @@ class GuiSD:
378
  face_restoration_visibility,
379
  face_restoration_weight,
380
  ):
381
- info_state = html_template_message("Navigating latent space...")
382
  yield info_state, gr.update(), gr.update()
383
 
384
  vae_model = vae_model if vae_model != "None" else None
@@ -562,42 +562,40 @@ class GuiSD:
562
 
563
  actual_progress = 0
564
  info_images = gr.update()
565
- for img, [seed, image_path, metadata] in self.model(**pipe_params):
566
- info_state = progress_step_bar(actual_progress, steps)
567
- actual_progress += concurrency
568
- if image_path:
569
- info_images = f"Seeds: {str(seed)}"
570
- if vae_msg:
571
- info_images = info_images + "<br>" + vae_msg
572
-
573
- if "Cannot copy out of meta tensor; no data!" in self.model.last_lora_error:
574
- msg_ram = "Unable to process the LoRAs due to high RAM usage; please try again later."
575
- print(msg_ram)
576
- msg_lora += f"<br>{msg_ram}"
577
-
578
- for status, lora in zip(self.model.lora_status, self.model.lora_memory):
579
- if status:
580
- msg_lora += f"<br>Loaded: {lora}"
581
- elif status is not None:
582
- msg_lora += f"<br>Error with: {lora}"
583
-
584
- if msg_lora:
585
- info_images += msg_lora
586
-
587
- info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[-1]) + "<br>-------<br>"
588
-
589
- download_links = "<br>".join(
590
- [
591
- f'<a href="{path.replace("/images/", "/file=/home/user/app/images/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
592
- for i, path in enumerate(image_path)
593
- ]
594
- )
595
- if save_generated_images:
596
- info_images += f"<br>{download_links}"
597
 
598
- info_state = "COMPLETE"
599
 
600
- yield info_state, img, info_images
601
 
602
 
603
  def dynamic_gpu_duration(func, duration, *args):
 
378
  face_restoration_visibility,
379
  face_restoration_weight,
380
  ):
381
+ info_state = html_template_message("Processing...")
382
  yield info_state, gr.update(), gr.update()
383
 
384
  vae_model = vae_model if vae_model != "None" else None
 
562
 
563
  actual_progress = 0
564
  info_images = gr.update()
565
+ img, [seed, image_path, metadata] = self.model(**pipe_params)
566
+
567
+ info_images = f"Seeds: {str(seed)}"
568
+ if vae_msg:
569
+ info_images = info_images + "<br>" + vae_msg
570
+
571
+ if "Cannot copy out of meta tensor; no data!" in self.model.last_lora_error:
572
+ msg_ram = "Unable to process the LoRAs due to high RAM usage; please try again later."
573
+ print(msg_ram)
574
+ msg_lora += f"<br>{msg_ram}"
575
+
576
+ for status, lora in zip(self.model.lora_status, self.model.lora_memory):
577
+ if status:
578
+ msg_lora += f"<br>Loaded: {lora}"
579
+ elif status is not None:
580
+ msg_lora += f"<br>Error with: {lora}"
581
+
582
+ if msg_lora:
583
+ info_images += msg_lora
584
+
585
+ info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[-1]) + "<br>-------<br>"
586
+
587
+ download_links = "<br>".join(
588
+ [
589
+ f'<a href="{path.replace("/images/", "/file=/home/user/app/images/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
590
+ for i, path in enumerate(image_path)
591
+ ]
592
+ )
593
+ if save_generated_images:
594
+ info_images += f"<br>{download_links}"
 
 
595
 
596
+ info_state = "COMPLETE"
597
 
598
+ return info_state, img, info_images
599
 
600
 
601
  def dynamic_gpu_duration(func, duration, *args):