Spaces:
Running
Running
import gradio as gr | |
import torch | |
import os | |
import gc | |
import random | |
from huggingface_hub import snapshot_download, HfApi | |
from diffusers import StableDiffusionXLPipeline, LCMScheduler | |
from PIL import Image | |
newcv = os.getenv("newcv") | |
newhf = os.getenv("newhf") | |
api = HfApi(token=newhf) | |
os.environ["XDG_CACHE_HOME"] = "/home/user/.cache" | |
os.environ["TRANSFORMERS_CACHE"] = "/home/user/.cache/huggingface/transformers" | |
os.environ["HF_HOME"] = "/home/user/.cache/huggingface" | |
models = [ | |
"Niggendar/fastPhotoPony_v80MixB", | |
"Niggendar/realisticPonyPhoto_v10", | |
"Niggendar/realmix_v10", | |
"Niggendar/realmixpony_v01", | |
"Niggendar/realmixpony_v02", | |
"Niggendar/recondiff_v10", | |
"Niggendar/Regro", | |
"Niggendar/relhCheckpoint_v20", | |
] | |
loras = ["openskyml/lcm-lora-sdxl-turbo"] | |
pipe = None | |
cached = {} | |
cached_loras = {} | |
def get_lora(lora_id): | |
if lora_id in cached_loras: | |
return cached_loras[lora_id] | |
lora_dir = snapshot_download(repo_id=lora_id, use_auth_token=newhf, local_files_only=False ,allow_patterns=["*.safetensors", "*.bin"]) | |
lora_files = [f for f in os.listdir(lora_dir) if f.endswith((".safetensors", ".bin"))] | |
lora_path = os.path.join(lora_dir, lora_files[0]) | |
cached_loras[lora_id] = lora_path | |
return lora_path | |
def load_pipe(model_id, lora_id): | |
global pipe | |
if (model_id, lora_id) in cached: | |
pipe = cached[(model_id, lora_id)] | |
return | |
if pipe is not None: | |
pipe.to("meta") | |
pipe.unet = None | |
pipe.vae = None | |
pipe.text_encoder = None | |
del pipe | |
gc.collect() | |
cached.clear() | |
pipe = StableDiffusionXLPipeline.from_pretrained(model_id,torch_dtype=torch.float32,low_cpu_mem_usage=True ) | |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) | |
pipe.load_lora_weights(get_lora(lora_id)) | |
pipe.to("cpu", dtype=torch.float32) | |
pipe.enable_attention_slicing() | |
cached[(model_id, lora_id)] = pipe | |
return gr.update(value='-') | |
def infer(model_id, lora_id, prompt, seed=None, steps=4, guid=0.1): | |
if seed is None or seed == "": | |
seed = random.randint(0, 2**32 - 1) | |
yield Image.new("RGB", (512, 512), color="gray"), gr.update(value='-') | |
image = pipe( prompt, generator=torch.manual_seed(int(seed)), num_inference_steps=steps, | |
guidance_scale=guid,width=128+256, height=128+256, added_cond_kwargs={} ).images[0] | |
yield image, gr.update(value='-') | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
with gr.Column(scale=2): | |
modeldrop=gr.Dropdown(models, label="Model",container=False) | |
loradrop=gr.Dropdown(loras, label="LCM LoRA",container=False,interactive=False,visible=False) | |
text= gr.Textbox(label="Prompt",container=False,placeholder="Prompt",value='') | |
gbtn=gr.Button(value="Generate") | |
with gr.Accordion(label="Settings", open=False): | |
seed=gr.Textbox(label="Seed",visible=False) | |
steps=gr.Slider(1, 15, value=4, step=1, label="Steps") | |
guidance=gr.Slider(0.0, 2.0, value=0.1, step=0.1, label="Guidance Scale") | |
with gr.Column(scale=3): | |
text2=gr.Textbox(label="Time",placeholder="timer",container=False,value='-') | |
imageout=gr.Image() | |
modeldrop.change(fn=load_pipe, inputs=[ modeldrop, loradrop ], outputs=[text2]) | |
gbtn.click(fn=infer, inputs=[ modeldrop, loradrop, text, seed,steps, guidance ], outputs=[imageout,text2]) | |
demo.queue() | |
demo.launch(server_name="0.0.0.0", server_port=7860) | |