Spaces:
Running
on
Zero
Running
on
Zero
Tanut
commited on
Commit
·
d9f072c
1
Parent(s):
d6fcceb
Fix
Browse files- app.py +37 -55
- requirements.txt +0 -4
app.py
CHANGED
@@ -1,21 +1,20 @@
|
|
1 |
import os, gc, random
|
2 |
import gradio as gr
|
3 |
import torch, spaces
|
4 |
-
from PIL import Image
|
5 |
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
6 |
|
7 |
# ---- config ----
|
8 |
MODEL_ID = "runwayml/stable-diffusion-v1-5"
|
9 |
-
DTYPE = torch.float16
|
10 |
-
HF_TOKEN = os.getenv("HF_TOKEN") # optional (only
|
11 |
AUTH = {"token": HF_TOKEN} if HF_TOKEN else {}
|
12 |
|
13 |
-
#
|
14 |
-
_PIPE =
|
15 |
|
16 |
-
def
|
17 |
-
|
18 |
-
if _PIPE
|
19 |
pipe = StableDiffusionPipeline.from_pretrained(
|
20 |
MODEL_ID,
|
21 |
torch_dtype=DTYPE,
|
@@ -24,38 +23,30 @@ def _get_pipe():
|
|
24 |
low_cpu_mem_usage=True,
|
25 |
**AUTH
|
26 |
)
|
|
|
27 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
|
28 |
pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="dpmsolver++"
|
29 |
)
|
|
|
30 |
pipe.enable_attention_slicing()
|
31 |
pipe.enable_vae_slicing()
|
32 |
pipe.enable_model_cpu_offload()
|
33 |
-
_PIPE
|
34 |
-
return _PIPE
|
35 |
|
36 |
-
def
|
37 |
-
# diffusers likes multiples of 8; stay within safe VRAM for ZeroGPU
|
38 |
x = max(256, min(1024, int(x)))
|
39 |
return x - (x % 8)
|
40 |
|
41 |
-
@spaces.GPU(duration=120)
|
42 |
-
def generate(prompt: str,
|
43 |
-
|
44 |
-
|
45 |
-
guidance_scale: float,
|
46 |
-
width: int,
|
47 |
-
height: int,
|
48 |
-
seed: int):
|
49 |
-
pipe = _get_pipe()
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
# seed handling (reproducible on CUDA)
|
55 |
-
g = torch.Generator(device="cuda")
|
56 |
-
if int(seed) == -1:
|
57 |
seed = random.randint(0, 2**31 - 1)
|
58 |
-
|
59 |
|
60 |
if torch.cuda.is_available():
|
61 |
torch.cuda.empty_cache()
|
@@ -64,42 +55,33 @@ def generate(prompt: str,
|
|
64 |
with torch.autocast(device_type="cuda", dtype=DTYPE):
|
65 |
out = pipe(
|
66 |
prompt=str(prompt),
|
67 |
-
negative_prompt=str(
|
68 |
num_inference_steps=int(steps),
|
69 |
-
guidance_scale=float(
|
70 |
width=w, height=h,
|
71 |
-
generator=
|
72 |
)
|
73 |
-
|
74 |
-
return img, seed
|
75 |
|
76 |
-
#
|
77 |
with gr.Blocks() as demo:
|
78 |
-
gr.Markdown("#
|
79 |
|
80 |
with gr.Row():
|
81 |
with gr.Column():
|
82 |
-
prompt
|
83 |
-
|
84 |
-
|
85 |
-
)
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
)
|
90 |
-
steps = gr.Slider(4, 50, value=28, step=1, label="Steps")
|
91 |
-
cfg = gr.Slider(1.0, 12.0, value=7.0, step=0.5, label="CFG scale")
|
92 |
-
width = gr.Slider(256, 1024, value=640, step=16, label="Width")
|
93 |
-
height = gr.Slider(256, 1024, value=640, step=16, label="Height")
|
94 |
-
seed = gr.Number(value=-1, precision=0, label="Seed (-1 = random)")
|
95 |
-
|
96 |
-
btn = gr.Button("Generate", variant="primary")
|
97 |
with gr.Column():
|
98 |
-
|
99 |
-
out_seed = gr.Number(label="Used seed", interactive=False)
|
100 |
|
101 |
-
btn.click(generate, [prompt, negative, steps, cfg, width, height, seed],
|
102 |
|
103 |
if __name__ == "__main__":
|
104 |
-
|
105 |
-
|
|
|
1 |
import os, gc, random
|
2 |
import gradio as gr
|
3 |
import torch, spaces
|
|
|
4 |
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
5 |
|
6 |
# ---- config ----
|
7 |
MODEL_ID = "runwayml/stable-diffusion-v1-5"
|
8 |
+
DTYPE = torch.float16 # ZeroGPU slice supports fp16
|
9 |
+
HF_TOKEN = os.getenv("HF_TOKEN") # optional (only for private models)
|
10 |
AUTH = {"token": HF_TOKEN} if HF_TOKEN else {}
|
11 |
|
12 |
+
# lazy cache
|
13 |
+
_PIPE = None
|
14 |
|
15 |
+
def get_pipe():
|
16 |
+
global _PIPE
|
17 |
+
if _PIPE is None:
|
18 |
pipe = StableDiffusionPipeline.from_pretrained(
|
19 |
MODEL_ID,
|
20 |
torch_dtype=DTYPE,
|
|
|
23 |
low_cpu_mem_usage=True,
|
24 |
**AUTH
|
25 |
)
|
26 |
+
# fast, stable scheduler
|
27 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
|
28 |
pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="dpmsolver++"
|
29 |
)
|
30 |
+
# memory savers (good for Spaces/ZeroGPU)
|
31 |
pipe.enable_attention_slicing()
|
32 |
pipe.enable_vae_slicing()
|
33 |
pipe.enable_model_cpu_offload()
|
34 |
+
_PIPE = pipe
|
35 |
+
return _PIPE
|
36 |
|
37 |
+
def snap8(x: int) -> int:
|
|
|
38 |
x = max(256, min(1024, int(x)))
|
39 |
return x - (x % 8)
|
40 |
|
41 |
+
@spaces.GPU(duration=120)
|
42 |
+
def generate(prompt: str, negative: str, steps: int, cfg: float, width: int, height: int, seed: int):
|
43 |
+
pipe = get_pipe()
|
44 |
+
w, h = snap8(width), snap8(height)
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
+
# seed
|
47 |
+
if int(seed) < 0:
|
|
|
|
|
|
|
|
|
48 |
seed = random.randint(0, 2**31 - 1)
|
49 |
+
gen = torch.Generator(device="cuda").manual_seed(int(seed))
|
50 |
|
51 |
if torch.cuda.is_available():
|
52 |
torch.cuda.empty_cache()
|
|
|
55 |
with torch.autocast(device_type="cuda", dtype=DTYPE):
|
56 |
out = pipe(
|
57 |
prompt=str(prompt),
|
58 |
+
negative_prompt=str(negative or ""),
|
59 |
num_inference_steps=int(steps),
|
60 |
+
guidance_scale=float(cfg),
|
61 |
width=w, height=h,
|
62 |
+
generator=gen,
|
63 |
)
|
64 |
+
return out.images[0]
|
|
|
65 |
|
66 |
+
# -------- UI (one output, zero fancy) --------
|
67 |
with gr.Blocks() as demo:
|
68 |
+
gr.Markdown("# 🎨 Stable Diffusion 1.5 — ZeroGPU (minimal)")
|
69 |
|
70 |
with gr.Row():
|
71 |
with gr.Column():
|
72 |
+
prompt = gr.Textbox(label="Prompt", value="a cozy reading nook, warm sunlight, cinematic lighting, highly detailed")
|
73 |
+
negative = gr.Textbox(label="Negative (optional)", value="lowres, blurry, watermark, text")
|
74 |
+
steps = gr.Slider(8, 40, value=28, step=1, label="Steps")
|
75 |
+
cfg = gr.Slider(1.0, 12.0, value=7.0, step=0.5, label="CFG")
|
76 |
+
width = gr.Slider(256, 1024, value=640, step=16, label="Width")
|
77 |
+
height = gr.Slider(256, 1024, value=640, step=16, label="Height")
|
78 |
+
seed = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
|
79 |
+
btn = gr.Button("Generate", variant="primary")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
with gr.Column():
|
81 |
+
out = gr.Image(label="Result", interactive=False)
|
|
|
82 |
|
83 |
+
btn.click(generate, [prompt, negative, steps, cfg, width, height, seed], out)
|
84 |
|
85 |
if __name__ == "__main__":
|
86 |
+
# keep it plain so the Space "just builds"
|
87 |
+
demo.launch()
|
requirements.txt
CHANGED
@@ -5,7 +5,3 @@ accelerate>=0.31.0
|
|
5 |
safetensors
|
6 |
gradio==4.44.1
|
7 |
huggingface-hub
|
8 |
-
Pillow
|
9 |
-
spaces
|
10 |
-
numpy
|
11 |
-
mediapipe
|
|
|
5 |
safetensors
|
6 |
gradio==4.44.1
|
7 |
huggingface-hub
|
|
|
|
|
|
|
|