Spaces:
Running
on
Zero
Running
on
Zero
Upload 3 files
Browse files- app.py +13 -6
- modutils.py +18 -0
- requirements.txt +1 -0
app.py
CHANGED
|
@@ -167,7 +167,7 @@ from modutils import (list_uniq, download_private_repo, get_model_id_list, get_t
|
|
| 167 |
get_my_lora, upload_file_lora, move_file_lora, search_civitai_lora, select_civitai_lora,
|
| 168 |
set_textual_inversion_prompt, get_model_pipeline, change_interface_mode, get_t2i_model_info,
|
| 169 |
get_tupled_model_list, save_gallery_images, set_optimization, set_sampler_settings,
|
| 170 |
-
set_quick_presets, process_style_prompt, optimization_list,
|
| 171 |
preset_styles, preset_quality, preset_sampler_setting, translate_to_en)
|
| 172 |
from env import (hf_token, CIVITAI_API_KEY, HF_LORA_ESSENTIAL_PRIVATE_REPO, HF_VAE_PRIVATE_REPO,
|
| 173 |
HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
|
|
@@ -556,16 +556,17 @@ class GuiSD:
|
|
| 556 |
model_precision = torch.float16
|
| 557 |
self.model.device = torch.device("cuda:0")
|
| 558 |
if not self.model:
|
| 559 |
-
from
|
| 560 |
|
| 561 |
print("Loading model...")
|
| 562 |
-
self.model =
|
| 563 |
base_model_id=model_name,
|
| 564 |
task_name=task,
|
| 565 |
vae_model=vae_model if vae_model != "None" else None,
|
| 566 |
type_model_precision=model_precision,
|
| 567 |
retain_task_model_in_cache=retain_task_cache_gui,
|
| 568 |
)
|
|
|
|
| 569 |
|
| 570 |
if task != "txt2img" and not image_control:
|
| 571 |
raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
|
|
@@ -710,18 +711,24 @@ class GuiSD:
|
|
| 710 |
}
|
| 711 |
|
| 712 |
# Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
|
| 713 |
-
self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 714 |
|
| 715 |
#progress(0, desc="Preparation completed. Starting inference...")
|
|
|
|
| 716 |
info_state = f"PROCESSING "
|
| 717 |
-
for img, seed,
|
| 718 |
info_state += ">"
|
| 719 |
-
if
|
| 720 |
info_state = f"COMPLETED. Seeds: {str(seed)}"
|
| 721 |
if vae_msg:
|
| 722 |
info_state = info_state + "<br>" + vae_msg
|
| 723 |
if msg_lora:
|
| 724 |
info_state = info_state + "<br>" + "<br>".join(msg_lora)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 725 |
yield img, info_state
|
| 726 |
|
| 727 |
sd_gen = GuiSD()
|
|
|
|
| 167 |
get_my_lora, upload_file_lora, move_file_lora, search_civitai_lora, select_civitai_lora,
|
| 168 |
set_textual_inversion_prompt, get_model_pipeline, change_interface_mode, get_t2i_model_info,
|
| 169 |
get_tupled_model_list, save_gallery_images, set_optimization, set_sampler_settings,
|
| 170 |
+
set_quick_presets, process_style_prompt, optimization_list, save_images,
|
| 171 |
preset_styles, preset_quality, preset_sampler_setting, translate_to_en)
|
| 172 |
from env import (hf_token, CIVITAI_API_KEY, HF_LORA_ESSENTIAL_PRIVATE_REPO, HF_VAE_PRIVATE_REPO,
|
| 173 |
HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
|
|
|
|
| 556 |
model_precision = torch.float16
|
| 557 |
self.model.device = torch.device("cuda:0")
|
| 558 |
if not self.model:
|
| 559 |
+
from modelstream import Model_Diffusers2
|
| 560 |
|
| 561 |
print("Loading model...")
|
| 562 |
+
self.model = Model_Diffusers2(
|
| 563 |
base_model_id=model_name,
|
| 564 |
task_name=task,
|
| 565 |
vae_model=vae_model if vae_model != "None" else None,
|
| 566 |
type_model_precision=model_precision,
|
| 567 |
retain_task_model_in_cache=retain_task_cache_gui,
|
| 568 |
)
|
| 569 |
+
self.model.stream_config(concurrency=5, latent_resize_by=1, vae_decoding=False)
|
| 570 |
|
| 571 |
if task != "txt2img" and not image_control:
|
| 572 |
raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
|
|
|
|
| 711 |
}
|
| 712 |
|
| 713 |
# Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
|
| 714 |
+
#self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 715 |
|
| 716 |
#progress(0, desc="Preparation completed. Starting inference...")
|
| 717 |
+
|
| 718 |
info_state = f"PROCESSING "
|
| 719 |
+
for img, seed, image_path, metadata in self.model(**pipe_params):
|
| 720 |
info_state += ">"
|
| 721 |
+
if image_path:
|
| 722 |
info_state = f"COMPLETED. Seeds: {str(seed)}"
|
| 723 |
if vae_msg:
|
| 724 |
info_state = info_state + "<br>" + vae_msg
|
| 725 |
if msg_lora:
|
| 726 |
info_state = info_state + "<br>" + "<br>".join(msg_lora)
|
| 727 |
+
|
| 728 |
+
info_state = info_state + "<br>" + "GENERATION DATA:<br>" + "<br>-------<br>".join(metadata).replace("\n", "<br>")
|
| 729 |
+
|
| 730 |
+
img = save_images(img, metadata)
|
| 731 |
+
|
| 732 |
yield img, info_state
|
| 733 |
|
| 734 |
sd_gen = GuiSD()
|
modutils.py
CHANGED
|
@@ -100,6 +100,24 @@ def safe_float(input):
|
|
| 100 |
return output
|
| 101 |
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
| 104 |
from datetime import datetime, timezone, timedelta
|
| 105 |
progress(0, desc="Updating gallery...")
|
|
|
|
| 100 |
return output
|
| 101 |
|
| 102 |
|
| 103 |
+
from PIL import Image
|
| 104 |
+
def save_images(images: list[Image.Image], metadatas: list[str]):
|
| 105 |
+
from PIL import PngImagePlugin
|
| 106 |
+
import uuid
|
| 107 |
+
try:
|
| 108 |
+
output_images = []
|
| 109 |
+
for image, metadata in zip(images, metadatas):
|
| 110 |
+
info = PngImagePlugin.PngInfo()
|
| 111 |
+
info.add_text("metadata", metadata)
|
| 112 |
+
savefile = f"{str(uuid.uuid4())}.png"
|
| 113 |
+
image.save(savefile, "PNG", pnginfo=info)
|
| 114 |
+
output_images.append(str(Path(savefile).resolve()))
|
| 115 |
+
return output_images
|
| 116 |
+
except Exception as e:
|
| 117 |
+
print(f"Failed to save image file: {e}")
|
| 118 |
+
raise Exception(f"Failed to save image file:") from e
|
| 119 |
+
|
| 120 |
+
|
| 121 |
def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
| 122 |
from datetime import datetime, timezone, timedelta
|
| 123 |
progress(0, desc="Updating gallery...")
|
requirements.txt
CHANGED
|
@@ -2,6 +2,7 @@ git+https://github.com/R3gm/stablepy.git
|
|
| 2 |
torch==2.2.0
|
| 3 |
gdown
|
| 4 |
opencv-python
|
|
|
|
| 5 |
torchvision
|
| 6 |
accelerate
|
| 7 |
transformers
|
|
|
|
| 2 |
torch==2.2.0
|
| 3 |
gdown
|
| 4 |
opencv-python
|
| 5 |
+
yt-dlp
|
| 6 |
torchvision
|
| 7 |
accelerate
|
| 8 |
transformers
|