Spaces:
Sleeping
Sleeping
Tanut
commited on
Commit
·
708ab1d
1
Parent(s):
0e6feda
Testing 2 Stable Diffusion
Browse files- app.py +30 -41
- requirements.txt +11 -8
app.py
CHANGED
@@ -12,22 +12,31 @@ from diffusers import (
|
|
12 |
DPMSolverMultistepScheduler,
|
13 |
)
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# Quiet matplotlib cache warning on Spaces
|
16 |
os.environ.setdefault("MPLCONFIGDIR", "/tmp/mpl")
|
17 |
|
18 |
-
#
|
19 |
def _hf_auth():
|
20 |
-
# Add this in Space Settings → Variables → New secret
|
21 |
-
# Name: HUGGINGFACE_HUB_TOKEN Value: <your HF token>
|
22 |
tok = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN")
|
23 |
-
|
24 |
-
return {}
|
25 |
-
return {"token": tok, "use_auth_token": tok}
|
26 |
|
27 |
# ---- base models for the two tabs ----
|
28 |
BASE_MODELS = {
|
29 |
"stable-diffusion-v1-5": "runwayml/stable-diffusion-v1-5",
|
30 |
-
"dream":
|
31 |
}
|
32 |
|
33 |
# ControlNet (QR Monster v2 for SD15)
|
@@ -56,9 +65,6 @@ def normalize_color(c):
|
|
56 |
return "white"
|
57 |
|
58 |
def make_qr(url="https://example.com", size=768, border=12, back_color="#FFFFFF", blur_radius=0.0):
|
59 |
-
"""
|
60 |
-
IMPORTANT for Method 1: give ControlNet a sharp, black-on-WHITE QR (no blur).
|
61 |
-
"""
|
62 |
qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border))
|
63 |
qr.add_data(url.strip()); qr.make(fit=True)
|
64 |
img = qr.make_image(fill_color="black", back_color=normalize_color(back_color)).convert("RGB")
|
@@ -68,7 +74,6 @@ def make_qr(url="https://example.com", size=768, border=12, back_color="#FFFFFF"
|
|
68 |
return img
|
69 |
|
70 |
def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.0, feather: float = 1.0) -> Image.Image:
|
71 |
-
"""Optional gentle repair. Default OFF for Method 1."""
|
72 |
if strength <= 0: return stylized
|
73 |
q = qr_img.convert("L")
|
74 |
black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather)))
|
@@ -97,12 +102,7 @@ def _base_scheduler_for(pipe):
|
|
97 |
def get_cn():
|
98 |
global _CN
|
99 |
if _CN is None:
|
100 |
-
_CN = ControlNetModel.from_pretrained(
|
101 |
-
CN_QRMON,
|
102 |
-
torch_dtype=DTYPE,
|
103 |
-
use_safetensors=True,
|
104 |
-
**_hf_auth()
|
105 |
-
)
|
106 |
return _CN
|
107 |
|
108 |
def get_qrmon_txt2img_pipe(model_id: str):
|
@@ -114,7 +114,7 @@ def get_qrmon_txt2img_pipe(model_id: str):
|
|
114 |
safety_checker=None,
|
115 |
use_safetensors=True,
|
116 |
low_cpu_mem_usage=True,
|
117 |
-
**_hf_auth()
|
118 |
)
|
119 |
_CN_TXT2IMG[model_id] = _base_scheduler_for(pipe)
|
120 |
return _CN_TXT2IMG[model_id]
|
@@ -128,7 +128,7 @@ def get_qrmon_img2img_pipe(model_id: str):
|
|
128 |
safety_checker=None,
|
129 |
use_safetensors=True,
|
130 |
low_cpu_mem_usage=True,
|
131 |
-
**_hf_auth()
|
132 |
)
|
133 |
_CN_IMG2IMG[model_id] = _base_scheduler_for(pipe)
|
134 |
return _CN_IMG2IMG[model_id]
|
@@ -143,15 +143,12 @@ def _qr_txt2img_core(model_id: str,
|
|
143 |
|
144 |
s = snap8(size)
|
145 |
|
146 |
-
# Control image: crisp black-on-white QR
|
147 |
qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF", blur_radius=0.0)
|
148 |
|
149 |
-
# Seed / generator
|
150 |
if int(seed) < 0:
|
151 |
seed = random.randint(0, 2**31 - 1)
|
152 |
gen = torch.Generator(device="cuda").manual_seed(int(seed))
|
153 |
|
154 |
-
# --- Stage A: txt2img with ControlNet
|
155 |
pipe = get_qrmon_txt2img_pipe(model_id)
|
156 |
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
157 |
gc.collect()
|
@@ -159,8 +156,8 @@ def _qr_txt2img_core(model_id: str,
|
|
159 |
out = pipe(
|
160 |
prompt=str(style_prompt),
|
161 |
negative_prompt=str(negative or ""),
|
162 |
-
image=qr_img,
|
163 |
-
controlnet_conditioning_scale=float(qr_weight),
|
164 |
control_guidance_start=0.0,
|
165 |
control_guidance_end=1.0,
|
166 |
num_inference_steps=int(steps),
|
@@ -171,7 +168,6 @@ def _qr_txt2img_core(model_id: str,
|
|
171 |
lowres = out.images[0]
|
172 |
lowres = enforce_qr_contrast(lowres, qr_img, strength=float(repair_strength), feather=float(feather))
|
173 |
|
174 |
-
# --- Optional Stage B: Hi-Res Fix (img2img with same QR)
|
175 |
final = lowres
|
176 |
if use_hires:
|
177 |
up = max(1.0, min(2.0, float(hires_upscale)))
|
@@ -183,9 +179,9 @@ def _qr_txt2img_core(model_id: str,
|
|
183 |
out2 = pipe2(
|
184 |
prompt=str(style_prompt),
|
185 |
negative_prompt=str(negative or ""),
|
186 |
-
image=lowres,
|
187 |
-
control_image=qr_img,
|
188 |
-
strength=float(hires_strength),
|
189 |
controlnet_conditioning_scale=float(qr_weight),
|
190 |
control_guidance_start=0.0,
|
191 |
control_guidance_end=1.0,
|
@@ -199,7 +195,6 @@ def _qr_txt2img_core(model_id: str,
|
|
199 |
final = enforce_qr_contrast(final, qr_img, strength=float(repair_strength), feather=float(feather))
|
200 |
return final, lowres, qr_img
|
201 |
|
202 |
-
# Wrappers for each tab (so Gradio can bind without passing the model id)
|
203 |
@spaces.GPU(duration=120)
|
204 |
def qr_txt2img_anything(*args):
|
205 |
return _qr_txt2img_core(BASE_MODELS["stable-diffusion-v1-5"], *args)
|
@@ -212,7 +207,6 @@ def qr_txt2img_dream(*args):
|
|
212 |
with gr.Blocks() as demo:
|
213 |
gr.Markdown("# ZeroGPU • Method 1: QR Control (two base models)")
|
214 |
|
215 |
-
# ---- Tab 1: stable-diffusion-v1-5 ----
|
216 |
with gr.Tab("stable-diffusion-v1-5"):
|
217 |
url1 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
|
218 |
s_prompt1 = gr.Textbox(label="Style prompt", value="japanese painting, elegant shrine and torii, distant mount fuji, autumn maple trees, warm sunlight, 1girl in kimono, highly detailed, intricate patterns, anime key visual, dramatic composition")
|
@@ -223,18 +217,14 @@ with gr.Blocks() as demo:
|
|
223 |
border1 = gr.Slider(2, 16, value=4, step=1, label="QR border (quiet zone)")
|
224 |
qr_w1 = gr.Slider(0.6, 1.6, value=1.5, step=0.05, label="QR control weight")
|
225 |
seed1 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
|
226 |
-
|
227 |
use_hires1 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
|
228 |
hires_up1 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)")
|
229 |
hires_str1 = gr.Slider(0.3, 0.9, value=0.7, step=0.05, label="Hi-Res denoise strength")
|
230 |
-
|
231 |
repair1 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)")
|
232 |
feather1 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
|
233 |
-
|
234 |
final_img1 = gr.Image(label="Final (or Hi-Res) image")
|
235 |
low_img1 = gr.Image(label="Low-res (Stage A) preview")
|
236 |
ctrl_img1 = gr.Image(label="Control QR used")
|
237 |
-
|
238 |
gr.Button("Generate with stable-diffusion-v1-5").click(
|
239 |
qr_txt2img_anything,
|
240 |
[url1, s_prompt1, s_negative1, steps1, cfg1, size1, border1, qr_w1, seed1,
|
@@ -242,7 +232,6 @@ with gr.Blocks() as demo:
|
|
242 |
[final_img1, low_img1, ctrl_img1]
|
243 |
)
|
244 |
|
245 |
-
# ---- Tab 2: DreamShaper (general art/painterly) ----
|
246 |
with gr.Tab("DreamShaper 8"):
|
247 |
url2 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
|
248 |
s_prompt2 = gr.Textbox(label="Style prompt", value="ornate baroque palace interior, gilded details, chandeliers, volumetric light, ultra detailed, cinematic")
|
@@ -253,18 +242,14 @@ with gr.Blocks() as demo:
|
|
253 |
border2 = gr.Slider(2, 16, value=8, step=1, label="QR border (quiet zone)")
|
254 |
qr_w2 = gr.Slider(0.6, 1.6, value=1.5, step=0.05, label="QR control weight")
|
255 |
seed2 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
|
256 |
-
|
257 |
use_hires2 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
|
258 |
hires_up2 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)")
|
259 |
hires_str2 = gr.Slider(0.3, 0.9, value=0.7, step=0.05, label="Hi-Res denoise strength")
|
260 |
-
|
261 |
repair2 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)")
|
262 |
feather2 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
|
263 |
-
|
264 |
final_img2 = gr.Image(label="Final (or Hi-Res) image")
|
265 |
low_img2 = gr.Image(label="Low-res (Stage A) preview")
|
266 |
ctrl_img2 = gr.Image(label="Control QR used")
|
267 |
-
|
268 |
gr.Button("Generate with DreamShaper 8").click(
|
269 |
qr_txt2img_dream,
|
270 |
[url2, s_prompt2, s_negative2, steps2, cfg2, size2, border2, qr_w2, seed2,
|
@@ -273,5 +258,9 @@ with gr.Blocks() as demo:
|
|
273 |
)
|
274 |
|
275 |
if __name__ == "__main__":
|
276 |
-
#
|
277 |
-
demo.queue(max_size=12).launch(
|
|
|
|
|
|
|
|
|
|
12 |
DPMSolverMultistepScheduler,
|
13 |
)
|
14 |
|
15 |
+
# --- gradio_client bool-schema hotfix (prevents blank page on Spaces) ---
|
16 |
+
try:
|
17 |
+
import gradio_client.utils as _gcu
|
18 |
+
_orig_get_type = _gcu.get_type
|
19 |
+
def _get_type_safe(schema):
|
20 |
+
if isinstance(schema, bool): # handle JSON Schema True/False
|
21 |
+
return "any"
|
22 |
+
return _orig_get_type(schema)
|
23 |
+
_gcu.get_type = _get_type_safe
|
24 |
+
except Exception:
|
25 |
+
pass
|
26 |
+
# -----------------------------------------------------------------------
|
27 |
+
|
28 |
# Quiet matplotlib cache warning on Spaces
|
29 |
os.environ.setdefault("MPLCONFIGDIR", "/tmp/mpl")
|
30 |
|
31 |
+
# Token helper: add a Secret in your Space named HUGGINGFACE_HUB_TOKEN
|
32 |
def _hf_auth():
|
|
|
|
|
33 |
tok = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN")
|
34 |
+
return {"token": tok, "use_auth_token": tok} if tok else {}
|
|
|
|
|
35 |
|
36 |
# ---- base models for the two tabs ----
|
37 |
BASE_MODELS = {
|
38 |
"stable-diffusion-v1-5": "runwayml/stable-diffusion-v1-5",
|
39 |
+
"dream": "Lykon/dreamshaper-8",
|
40 |
}
|
41 |
|
42 |
# ControlNet (QR Monster v2 for SD15)
|
|
|
65 |
return "white"
|
66 |
|
67 |
def make_qr(url="https://example.com", size=768, border=12, back_color="#FFFFFF", blur_radius=0.0):
|
|
|
|
|
|
|
68 |
qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border))
|
69 |
qr.add_data(url.strip()); qr.make(fit=True)
|
70 |
img = qr.make_image(fill_color="black", back_color=normalize_color(back_color)).convert("RGB")
|
|
|
74 |
return img
|
75 |
|
76 |
def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.0, feather: float = 1.0) -> Image.Image:
|
|
|
77 |
if strength <= 0: return stylized
|
78 |
q = qr_img.convert("L")
|
79 |
black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather)))
|
|
|
102 |
def get_cn():
|
103 |
global _CN
|
104 |
if _CN is None:
|
105 |
+
_CN = ControlNetModel.from_pretrained(CN_QRMON, torch_dtype=DTYPE, use_safetensors=True, **_hf_auth())
|
|
|
|
|
|
|
|
|
|
|
106 |
return _CN
|
107 |
|
108 |
def get_qrmon_txt2img_pipe(model_id: str):
|
|
|
114 |
safety_checker=None,
|
115 |
use_safetensors=True,
|
116 |
low_cpu_mem_usage=True,
|
117 |
+
**_hf_auth(),
|
118 |
)
|
119 |
_CN_TXT2IMG[model_id] = _base_scheduler_for(pipe)
|
120 |
return _CN_TXT2IMG[model_id]
|
|
|
128 |
safety_checker=None,
|
129 |
use_safetensors=True,
|
130 |
low_cpu_mem_usage=True,
|
131 |
+
**_hf_auth(),
|
132 |
)
|
133 |
_CN_IMG2IMG[model_id] = _base_scheduler_for(pipe)
|
134 |
return _CN_IMG2IMG[model_id]
|
|
|
143 |
|
144 |
s = snap8(size)
|
145 |
|
|
|
146 |
qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF", blur_radius=0.0)
|
147 |
|
|
|
148 |
if int(seed) < 0:
|
149 |
seed = random.randint(0, 2**31 - 1)
|
150 |
gen = torch.Generator(device="cuda").manual_seed(int(seed))
|
151 |
|
|
|
152 |
pipe = get_qrmon_txt2img_pipe(model_id)
|
153 |
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
154 |
gc.collect()
|
|
|
156 |
out = pipe(
|
157 |
prompt=str(style_prompt),
|
158 |
negative_prompt=str(negative or ""),
|
159 |
+
image=qr_img,
|
160 |
+
controlnet_conditioning_scale=float(qr_weight),
|
161 |
control_guidance_start=0.0,
|
162 |
control_guidance_end=1.0,
|
163 |
num_inference_steps=int(steps),
|
|
|
168 |
lowres = out.images[0]
|
169 |
lowres = enforce_qr_contrast(lowres, qr_img, strength=float(repair_strength), feather=float(feather))
|
170 |
|
|
|
171 |
final = lowres
|
172 |
if use_hires:
|
173 |
up = max(1.0, min(2.0, float(hires_upscale)))
|
|
|
179 |
out2 = pipe2(
|
180 |
prompt=str(style_prompt),
|
181 |
negative_prompt=str(negative or ""),
|
182 |
+
image=lowres,
|
183 |
+
control_image=qr_img,
|
184 |
+
strength=float(hires_strength),
|
185 |
controlnet_conditioning_scale=float(qr_weight),
|
186 |
control_guidance_start=0.0,
|
187 |
control_guidance_end=1.0,
|
|
|
195 |
final = enforce_qr_contrast(final, qr_img, strength=float(repair_strength), feather=float(feather))
|
196 |
return final, lowres, qr_img
|
197 |
|
|
|
198 |
@spaces.GPU(duration=120)
|
199 |
def qr_txt2img_anything(*args):
|
200 |
return _qr_txt2img_core(BASE_MODELS["stable-diffusion-v1-5"], *args)
|
|
|
207 |
with gr.Blocks() as demo:
|
208 |
gr.Markdown("# ZeroGPU • Method 1: QR Control (two base models)")
|
209 |
|
|
|
210 |
with gr.Tab("stable-diffusion-v1-5"):
|
211 |
url1 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
|
212 |
s_prompt1 = gr.Textbox(label="Style prompt", value="japanese painting, elegant shrine and torii, distant mount fuji, autumn maple trees, warm sunlight, 1girl in kimono, highly detailed, intricate patterns, anime key visual, dramatic composition")
|
|
|
217 |
border1 = gr.Slider(2, 16, value=4, step=1, label="QR border (quiet zone)")
|
218 |
qr_w1 = gr.Slider(0.6, 1.6, value=1.5, step=0.05, label="QR control weight")
|
219 |
seed1 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
|
|
|
220 |
use_hires1 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
|
221 |
hires_up1 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)")
|
222 |
hires_str1 = gr.Slider(0.3, 0.9, value=0.7, step=0.05, label="Hi-Res denoise strength")
|
|
|
223 |
repair1 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)")
|
224 |
feather1 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
|
|
|
225 |
final_img1 = gr.Image(label="Final (or Hi-Res) image")
|
226 |
low_img1 = gr.Image(label="Low-res (Stage A) preview")
|
227 |
ctrl_img1 = gr.Image(label="Control QR used")
|
|
|
228 |
gr.Button("Generate with stable-diffusion-v1-5").click(
|
229 |
qr_txt2img_anything,
|
230 |
[url1, s_prompt1, s_negative1, steps1, cfg1, size1, border1, qr_w1, seed1,
|
|
|
232 |
[final_img1, low_img1, ctrl_img1]
|
233 |
)
|
234 |
|
|
|
235 |
with gr.Tab("DreamShaper 8"):
|
236 |
url2 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
|
237 |
s_prompt2 = gr.Textbox(label="Style prompt", value="ornate baroque palace interior, gilded details, chandeliers, volumetric light, ultra detailed, cinematic")
|
|
|
242 |
border2 = gr.Slider(2, 16, value=8, step=1, label="QR border (quiet zone)")
|
243 |
qr_w2 = gr.Slider(0.6, 1.6, value=1.5, step=0.05, label="QR control weight")
|
244 |
seed2 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
|
|
|
245 |
use_hires2 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
|
246 |
hires_up2 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)")
|
247 |
hires_str2 = gr.Slider(0.3, 0.9, value=0.7, step=0.05, label="Hi-Res denoise strength")
|
|
|
248 |
repair2 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)")
|
249 |
feather2 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
|
|
|
250 |
final_img2 = gr.Image(label="Final (or Hi-Res) image")
|
251 |
low_img2 = gr.Image(label="Low-res (Stage A) preview")
|
252 |
ctrl_img2 = gr.Image(label="Control QR used")
|
|
|
253 |
gr.Button("Generate with DreamShaper 8").click(
|
254 |
qr_txt2img_dream,
|
255 |
[url2, s_prompt2, s_negative2, steps2, cfg2, size2, border2, qr_w2, seed2,
|
|
|
258 |
)
|
259 |
|
260 |
if __name__ == "__main__":
|
261 |
+
# Keep launch simple on Spaces
|
262 |
+
demo.queue(max_size=12).launch(
|
263 |
+
server_name="0.0.0.0",
|
264 |
+
server_port=int(os.environ.get("PORT", 7860)),
|
265 |
+
show_error=True,
|
266 |
+
)
|
requirements.txt
CHANGED
@@ -1,13 +1,16 @@
|
|
|
|
1 |
numpy==1.26.4
|
2 |
torch==2.2.0
|
3 |
diffusers==0.30.2
|
4 |
-
accelerate
|
5 |
-
transformers
|
6 |
-
safetensors
|
7 |
-
gradio>=4.44.1
|
8 |
-
pydantic==2.10.6
|
9 |
huggingface_hub==0.29.3
|
10 |
-
spaces
|
11 |
-
qrcode[pil]
|
12 |
-
Pillow
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Core
|
2 |
numpy==1.26.4
|
3 |
torch==2.2.0
|
4 |
diffusers==0.30.2
|
5 |
+
accelerate==0.25.0
|
6 |
+
transformers==4.40.2
|
7 |
+
safetensors==0.4.3
|
|
|
|
|
8 |
huggingface_hub==0.29.3
|
|
|
|
|
|
|
9 |
|
10 |
+
# UI / HF Spaces
|
11 |
+
gradio==4.44.1
|
12 |
+
spaces==0.27.1
|
13 |
+
|
14 |
+
# QR + imaging
|
15 |
+
qrcode[pil]==7.4.2
|
16 |
+
Pillow==10.3.0
|