Spaces:
Sleeping
Sleeping
File size: 2,920 Bytes
7c5aece 1fc8d06 fc29d6e 184daa2 fc29d6e 7c5aece 19c2aa1 d9f072c 19c2aa1 d9f072c 6ae079b fc29d6e 6ae079b d9f072c fc29d6e 6ae079b 7c5aece 19c2aa1 d9f072c fc29d6e d9f072c fc29d6e d9f072c fc29d6e d9f072c fc29d6e d9f072c 6ae079b fc29d6e 19c2aa1 d9f072c 19c2aa1 d9f072c fc29d6e d9f072c 19c2aa1 d9f072c e20d060 7c5aece 56a99b7 7c5aece fc29d6e d9f072c fc29d6e d9f072c fc29d6e d9f072c 3315876 56a99b7 7c5aece d9f072c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import gc, random
import gradio as gr
import torch, spaces
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
# ---- config ----
MODEL_ID = "runwayml/stable-diffusion-v1-5"
DTYPE = torch.float16 # ZeroGPU slice runs fp16 nicely
# lazy cache
_PIPE = None
def get_pipe():
global _PIPE
if _PIPE is None:
pipe = StableDiffusionPipeline.from_pretrained(
MODEL_ID,
torch_dtype=DTYPE,
safety_checker=None,
use_safetensors=True,
low_cpu_mem_usage=True,
)
# fast, stable scheduler
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="dpmsolver++"
)
# memory savers (great for Spaces/ZeroGPU)
pipe.enable_attention_slicing()
pipe.enable_vae_slicing()
pipe.enable_model_cpu_offload()
_PIPE = pipe
return _PIPE
def snap8(x: int) -> int:
x = max(256, min(1024, int(x)))
return x - (x % 8)
@spaces.GPU(duration=120)
def generate(prompt: str, negative: str, steps: int, cfg: float, width: int, height: int, seed: int):
pipe = get_pipe()
w, h = snap8(width), snap8(height)
# seed
if int(seed) < 0:
seed = random.randint(0, 2**31 - 1)
gen = torch.Generator(device="cuda").manual_seed(int(seed))
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
with torch.autocast(device_type="cuda", dtype=DTYPE):
out = pipe(
prompt=str(prompt),
negative_prompt=str(negative or ""),
num_inference_steps=int(steps),
guidance_scale=float(cfg),
width=w, height=h,
generator=gen,
)
return out.images[0]
# -------- UI --------
with gr.Blocks() as demo:
gr.Markdown("# 🎨 Stable Diffusion 1.5 — ZeroGPU (public, minimal)")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", value="a cozy reading nook, warm sunlight, cinematic lighting, highly detailed")
negative = gr.Textbox(label="Negative (optional)", value="lowres, blurry, watermark, text")
steps = gr.Slider(8, 40, value=28, step=1, label="Steps")
cfg = gr.Slider(1.0, 12.0, value=7.0, step=0.5, label="CFG")
width = gr.Slider(256, 1024, value=640, step=16, label="Width")
height = gr.Slider(256, 1024, value=640, step=16, label="Height")
seed = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
btn = gr.Button("Generate", variant="primary")
with gr.Column():
out = gr.Image(label="Result", interactive=False)
btn.click(generate, [prompt, negative, steps, cfg, width, height, seed], out)
if __name__ == "__main__":
# Keep it plain so the Space builds cleanly
demo.launch()
|