Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -86,6 +86,9 @@ for url_embed in DOWNLOAD_EMBEDS:
|
|
| 86 |
|
| 87 |
# Build list models
|
| 88 |
embed_list = get_model_list(DIRECTORY_EMBEDS)
|
|
|
|
|
|
|
|
|
|
| 89 |
model_list = get_model_list(DIRECTORY_MODELS)
|
| 90 |
model_list = LOAD_DIFFUSERS_FORMAT_MODEL + model_list
|
| 91 |
lora_model_list = get_model_list(DIRECTORY_LORAS)
|
|
@@ -424,7 +427,7 @@ class GuiSD:
|
|
| 424 |
"lora_scale_D": lora_scale4,
|
| 425 |
"lora_E": lora5 if lora5 != "None" else None,
|
| 426 |
"lora_scale_E": lora_scale5,
|
| 427 |
-
"textual_inversion": embed_list if textual_inversion
|
| 428 |
"syntax_weights": syntax_weights, # "Classic"
|
| 429 |
"sampler": sampler,
|
| 430 |
"xformers_memory_efficient_attention": xformers_memory_efficient_attention,
|
|
@@ -484,6 +487,11 @@ class GuiSD:
|
|
| 484 |
if vae_msg:
|
| 485 |
info_images = info_images + "<br>" + vae_msg
|
| 486 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 487 |
for status, lora in zip(self.model.lora_status, self.model.lora_memory):
|
| 488 |
if status:
|
| 489 |
msg_lora += f"<br>Loaded: {lora}"
|
|
@@ -567,11 +575,11 @@ def sd_gen_generate_pipeline(*args):
|
|
| 567 |
)
|
| 568 |
gr.Info(f"LoRAs in cache: {lora_cache_msg}")
|
| 569 |
|
| 570 |
-
msg_request = f"Requesting {gpu_duration_arg}s. of GPU time
|
| 571 |
if verbose_arg:
|
| 572 |
gr.Info(msg_request)
|
| 573 |
print(msg_request)
|
| 574 |
-
yield msg_request, gr.update(), gr.update()
|
| 575 |
|
| 576 |
start_time = time.time()
|
| 577 |
|
|
@@ -585,7 +593,7 @@ def sd_gen_generate_pipeline(*args):
|
|
| 585 |
end_time = time.time()
|
| 586 |
execution_time = end_time - start_time
|
| 587 |
msg_task_complete = (
|
| 588 |
-
f"GPU task complete in: {round(execution_time, 0) + 1} seconds"
|
| 589 |
)
|
| 590 |
|
| 591 |
if verbose_arg:
|
|
@@ -595,7 +603,7 @@ def sd_gen_generate_pipeline(*args):
|
|
| 595 |
yield msg_task_complete, gr.update(), gr.update()
|
| 596 |
|
| 597 |
|
| 598 |
-
@spaces.GPU(duration=
|
| 599 |
def esrgan_upscale(image, upscaler_name, upscaler_size):
|
| 600 |
if image is None: return None
|
| 601 |
|
|
@@ -809,11 +817,13 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
|
|
| 809 |
|
| 810 |
with gr.Accordion("From URL", open=False, visible=True):
|
| 811 |
text_lora = gr.Textbox(label="LoRA URL", placeholder="https://civitai.com/api/download/models/28907", lines=1)
|
| 812 |
-
|
|
|
|
|
|
|
| 813 |
button_lora.click(
|
| 814 |
get_my_lora,
|
| 815 |
-
[text_lora],
|
| 816 |
-
[lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui]
|
| 817 |
)
|
| 818 |
|
| 819 |
with gr.Accordion("IP-Adapter", open=False, visible=True):
|
|
|
|
| 86 |
|
| 87 |
# Build list models
|
| 88 |
embed_list = get_model_list(DIRECTORY_EMBEDS)
|
| 89 |
+
embed_list = [
|
| 90 |
+
(os.path.splitext(os.path.basename(emb))[0], emb) for emb in embed_list
|
| 91 |
+
]
|
| 92 |
model_list = get_model_list(DIRECTORY_MODELS)
|
| 93 |
model_list = LOAD_DIFFUSERS_FORMAT_MODEL + model_list
|
| 94 |
lora_model_list = get_model_list(DIRECTORY_LORAS)
|
|
|
|
| 427 |
"lora_scale_D": lora_scale4,
|
| 428 |
"lora_E": lora5 if lora5 != "None" else None,
|
| 429 |
"lora_scale_E": lora_scale5,
|
| 430 |
+
"textual_inversion": embed_list if textual_inversion else [],
|
| 431 |
"syntax_weights": syntax_weights, # "Classic"
|
| 432 |
"sampler": sampler,
|
| 433 |
"xformers_memory_efficient_attention": xformers_memory_efficient_attention,
|
|
|
|
| 487 |
if vae_msg:
|
| 488 |
info_images = info_images + "<br>" + vae_msg
|
| 489 |
|
| 490 |
+
if "Cannot copy out of meta tensor; no data!" in self.model.last_lora_error:
|
| 491 |
+
msg_ram = "Unable to process the LoRAs due to high RAM usage; please try again later."
|
| 492 |
+
print(msg_ram)
|
| 493 |
+
msg_lora += f"<br>{msg_ram}"
|
| 494 |
+
|
| 495 |
for status, lora in zip(self.model.lora_status, self.model.lora_memory):
|
| 496 |
if status:
|
| 497 |
msg_lora += f"<br>Loaded: {lora}"
|
|
|
|
| 575 |
)
|
| 576 |
gr.Info(f"LoRAs in cache: {lora_cache_msg}")
|
| 577 |
|
| 578 |
+
msg_request = f"Requesting {gpu_duration_arg}s. of GPU time.\nModel: {sd_gen.model.base_model_id}"
|
| 579 |
if verbose_arg:
|
| 580 |
gr.Info(msg_request)
|
| 581 |
print(msg_request)
|
| 582 |
+
yield msg_request.replace("\n", "<br>"), gr.update(), gr.update()
|
| 583 |
|
| 584 |
start_time = time.time()
|
| 585 |
|
|
|
|
| 593 |
end_time = time.time()
|
| 594 |
execution_time = end_time - start_time
|
| 595 |
msg_task_complete = (
|
| 596 |
+
f"GPU task complete in: {int(round(execution_time, 0) + 1)} seconds"
|
| 597 |
)
|
| 598 |
|
| 599 |
if verbose_arg:
|
|
|
|
| 603 |
yield msg_task_complete, gr.update(), gr.update()
|
| 604 |
|
| 605 |
|
| 606 |
+
@spaces.GPU(duration=15)
|
| 607 |
def esrgan_upscale(image, upscaler_name, upscaler_size):
|
| 608 |
if image is None: return None
|
| 609 |
|
|
|
|
| 817 |
|
| 818 |
with gr.Accordion("From URL", open=False, visible=True):
|
| 819 |
text_lora = gr.Textbox(label="LoRA URL", placeholder="https://civitai.com/api/download/models/28907", lines=1)
|
| 820 |
+
romanize_text = gr.Checkbox(value=False, label="Transliterate name")
|
| 821 |
+
button_lora = gr.Button("Obtain and refresh the LoRAs lists")
|
| 822 |
+
new_lora_status = gr.HTML()
|
| 823 |
button_lora.click(
|
| 824 |
get_my_lora,
|
| 825 |
+
[text_lora, romanize_text],
|
| 826 |
+
[lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui, new_lora_status]
|
| 827 |
)
|
| 828 |
|
| 829 |
with gr.Accordion("IP-Adapter", open=False, visible=True):
|