Tanut commited on
Commit
d9f072c
·
1 Parent(s): d6fcceb
Files changed (2) hide show
  1. app.py +37 -55
  2. requirements.txt +0 -4
app.py CHANGED
@@ -1,21 +1,20 @@
1
  import os, gc, random
2
  import gradio as gr
3
  import torch, spaces
4
- from PIL import Image
5
  from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
6
 
7
  # ---- config ----
8
  MODEL_ID = "runwayml/stable-diffusion-v1-5"
9
- DTYPE = torch.float16
10
- HF_TOKEN = os.getenv("HF_TOKEN") # optional (only needed for private models)
11
  AUTH = {"token": HF_TOKEN} if HF_TOKEN else {}
12
 
13
- # cache for lazy loading
14
- _PIPE = {"sd": None}
15
 
16
- def _get_pipe():
17
- """Lazy-load SD1.5 and enable memory savers for ZeroGPU."""
18
- if _PIPE["sd"] is None:
19
  pipe = StableDiffusionPipeline.from_pretrained(
20
  MODEL_ID,
21
  torch_dtype=DTYPE,
@@ -24,38 +23,30 @@ def _get_pipe():
24
  low_cpu_mem_usage=True,
25
  **AUTH
26
  )
 
27
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(
28
  pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="dpmsolver++"
29
  )
 
30
  pipe.enable_attention_slicing()
31
  pipe.enable_vae_slicing()
32
  pipe.enable_model_cpu_offload()
33
- _PIPE["sd"] = pipe
34
- return _PIPE["sd"]
35
 
36
- def _snap_dim(x: int) -> int:
37
- # diffusers likes multiples of 8; stay within safe VRAM for ZeroGPU
38
  x = max(256, min(1024, int(x)))
39
  return x - (x % 8)
40
 
41
- @spaces.GPU(duration=120) # allocate a ZeroGPU slice only during generation
42
- def generate(prompt: str,
43
- negative_prompt: str,
44
- steps: int,
45
- guidance_scale: float,
46
- width: int,
47
- height: int,
48
- seed: int):
49
- pipe = _get_pipe()
50
 
51
- w = _snap_dim(width)
52
- h = _snap_dim(height)
53
-
54
- # seed handling (reproducible on CUDA)
55
- g = torch.Generator(device="cuda")
56
- if int(seed) == -1:
57
  seed = random.randint(0, 2**31 - 1)
58
- g = g.manual_seed(int(seed))
59
 
60
  if torch.cuda.is_available():
61
  torch.cuda.empty_cache()
@@ -64,42 +55,33 @@ def generate(prompt: str,
64
  with torch.autocast(device_type="cuda", dtype=DTYPE):
65
  out = pipe(
66
  prompt=str(prompt),
67
- negative_prompt=str(negative_prompt or ""),
68
  num_inference_steps=int(steps),
69
- guidance_scale=float(guidance_scale),
70
  width=w, height=h,
71
- generator=g,
72
  )
73
- img: Image.Image = out.images[0]
74
- return img, seed
75
 
76
- # ---------- UI ----------
77
  with gr.Blocks() as demo:
78
- gr.Markdown("# 🧩 Stable Diffusion 1.5 (ZeroGPU)\nText prompt → image, lean & fast.")
79
 
80
  with gr.Row():
81
  with gr.Column():
82
- prompt = gr.Textbox(
83
- label="Prompt",
84
- value="a cozy reading nook with warm sunlight, soft textures, cinematic lighting, highly detailed"
85
- )
86
- negative = gr.Textbox(
87
- label="Negative prompt",
88
- value="lowres, blurry, watermark, text, logo, nsfw"
89
- )
90
- steps = gr.Slider(4, 50, value=28, step=1, label="Steps")
91
- cfg = gr.Slider(1.0, 12.0, value=7.0, step=0.5, label="CFG scale")
92
- width = gr.Slider(256, 1024, value=640, step=16, label="Width")
93
- height = gr.Slider(256, 1024, value=640, step=16, label="Height")
94
- seed = gr.Number(value=-1, precision=0, label="Seed (-1 = random)")
95
-
96
- btn = gr.Button("Generate", variant="primary")
97
  with gr.Column():
98
- out_img = gr.Image(label="Result", interactive=False)
99
- out_seed = gr.Number(label="Used seed", interactive=False)
100
 
101
- btn.click(generate, [prompt, negative, steps, cfg, width, height, seed], [out_img, out_seed])
102
 
103
  if __name__ == "__main__":
104
- demo.queue(max_size=8).launch()
105
-
 
1
  import os, gc, random
2
  import gradio as gr
3
  import torch, spaces
 
4
  from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
5
 
6
  # ---- config ----
7
  MODEL_ID = "runwayml/stable-diffusion-v1-5"
8
+ DTYPE = torch.float16 # ZeroGPU slice supports fp16
9
+ HF_TOKEN = os.getenv("HF_TOKEN") # optional (only for private models)
10
  AUTH = {"token": HF_TOKEN} if HF_TOKEN else {}
11
 
12
+ # lazy cache
13
+ _PIPE = None
14
 
15
+ def get_pipe():
16
+ global _PIPE
17
+ if _PIPE is None:
18
  pipe = StableDiffusionPipeline.from_pretrained(
19
  MODEL_ID,
20
  torch_dtype=DTYPE,
 
23
  low_cpu_mem_usage=True,
24
  **AUTH
25
  )
26
+ # fast, stable scheduler
27
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(
28
  pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="dpmsolver++"
29
  )
30
+ # memory savers (good for Spaces/ZeroGPU)
31
  pipe.enable_attention_slicing()
32
  pipe.enable_vae_slicing()
33
  pipe.enable_model_cpu_offload()
34
+ _PIPE = pipe
35
+ return _PIPE
36
 
37
+ def snap8(x: int) -> int:
 
38
  x = max(256, min(1024, int(x)))
39
  return x - (x % 8)
40
 
41
+ @spaces.GPU(duration=120)
42
+ def generate(prompt: str, negative: str, steps: int, cfg: float, width: int, height: int, seed: int):
43
+ pipe = get_pipe()
44
+ w, h = snap8(width), snap8(height)
 
 
 
 
 
45
 
46
+ # seed
47
+ if int(seed) < 0:
 
 
 
 
48
  seed = random.randint(0, 2**31 - 1)
49
+ gen = torch.Generator(device="cuda").manual_seed(int(seed))
50
 
51
  if torch.cuda.is_available():
52
  torch.cuda.empty_cache()
 
55
  with torch.autocast(device_type="cuda", dtype=DTYPE):
56
  out = pipe(
57
  prompt=str(prompt),
58
+ negative_prompt=str(negative or ""),
59
  num_inference_steps=int(steps),
60
+ guidance_scale=float(cfg),
61
  width=w, height=h,
62
+ generator=gen,
63
  )
64
+ return out.images[0]
 
65
 
66
+ # -------- UI (one output, zero fancy) --------
67
  with gr.Blocks() as demo:
68
+ gr.Markdown("# 🎨 Stable Diffusion 1.5 — ZeroGPU (minimal)")
69
 
70
  with gr.Row():
71
  with gr.Column():
72
+ prompt = gr.Textbox(label="Prompt", value="a cozy reading nook, warm sunlight, cinematic lighting, highly detailed")
73
+ negative = gr.Textbox(label="Negative (optional)", value="lowres, blurry, watermark, text")
74
+ steps = gr.Slider(8, 40, value=28, step=1, label="Steps")
75
+ cfg = gr.Slider(1.0, 12.0, value=7.0, step=0.5, label="CFG")
76
+ width = gr.Slider(256, 1024, value=640, step=16, label="Width")
77
+ height = gr.Slider(256, 1024, value=640, step=16, label="Height")
78
+ seed = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
79
+ btn = gr.Button("Generate", variant="primary")
 
 
 
 
 
 
 
80
  with gr.Column():
81
+ out = gr.Image(label="Result", interactive=False)
 
82
 
83
+ btn.click(generate, [prompt, negative, steps, cfg, width, height, seed], out)
84
 
85
  if __name__ == "__main__":
86
+ # keep it plain so the Space "just builds"
87
+ demo.launch()
requirements.txt CHANGED
@@ -5,7 +5,3 @@ accelerate>=0.31.0
5
  safetensors
6
  gradio==4.44.1
7
  huggingface-hub
8
- Pillow
9
- spaces
10
- numpy
11
- mediapipe
 
5
  safetensors
6
  gradio==4.44.1
7
  huggingface-hub