Spaces:
Running
on
Zero
Running
on
Zero
Tanut
commited on
Commit
·
6400e55
1
Parent(s):
223fbde
Testing 2 Stable Diffusion
Browse files
app.py
CHANGED
@@ -7,22 +7,21 @@ import qrcode
|
|
7 |
from qrcode.constants import ERROR_CORRECT_H
|
8 |
from diffusers import (
|
9 |
StableDiffusionControlNetPipeline,
|
10 |
-
StableDiffusionControlNetImg2ImgPipeline,
|
11 |
-
StableDiffusionPipeline,
|
12 |
ControlNetModel,
|
13 |
DPMSolverMultistepScheduler,
|
14 |
)
|
15 |
-
from huggingface_hub import hf_hub_download
|
16 |
-
from huggingface_hub.errors import EntryNotFoundError
|
17 |
|
18 |
# Quiet matplotlib cache warning on Spaces
|
19 |
os.environ.setdefault("MPLCONFIGDIR", "/tmp/mpl")
|
20 |
|
|
|
21 |
BASE_MODELS = {
|
22 |
-
"
|
23 |
-
"dream":
|
24 |
}
|
25 |
|
|
|
26 |
CN_QRMON = "monster-labs/control_v1p_sd15_qrcode_monster"
|
27 |
DTYPE = torch.float16
|
28 |
|
@@ -48,6 +47,9 @@ def normalize_color(c):
|
|
48 |
return "white"
|
49 |
|
50 |
def make_qr(url="https://example.com", size=768, border=12, back_color="#FFFFFF", blur_radius=0.0):
|
|
|
|
|
|
|
51 |
qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border))
|
52 |
qr.add_data(url.strip()); qr.make(fit=True)
|
53 |
img = qr.make_image(fill_color="black", back_color=normalize_color(back_color)).convert("RGB")
|
@@ -57,6 +59,7 @@ def make_qr(url="https://example.com", size=768, border=12, back_color="#FFFFFF"
|
|
57 |
return img
|
58 |
|
59 |
def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.0, feather: float = 1.0) -> Image.Image:
|
|
|
60 |
if strength <= 0: return stylized
|
61 |
q = qr_img.convert("L")
|
62 |
black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather)))
|
@@ -69,10 +72,9 @@ def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: fl
|
|
69 |
return Image.fromarray((s * 255.0).astype(np.uint8), mode="RGB")
|
70 |
|
71 |
# ---------- lazy pipelines (CPU-offloaded for ZeroGPU) ----------
|
72 |
-
_CN = None
|
73 |
-
_CN_TXT2IMG = {}
|
74 |
-
_CN_IMG2IMG = {}
|
75 |
-
ACTIVE_MODEL_ID = None
|
76 |
|
77 |
def _base_scheduler_for(pipe):
|
78 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
|
@@ -83,142 +85,35 @@ def _base_scheduler_for(pipe):
|
|
83 |
pipe.enable_model_cpu_offload()
|
84 |
return pipe
|
85 |
|
86 |
-
def unload_all_pipes(keep_id=None):
|
87 |
-
for mid in list(_CN_TXT2IMG.keys()):
|
88 |
-
if mid != keep_id:
|
89 |
-
try: _CN_TXT2IMG[mid].to("cpu")
|
90 |
-
except: pass
|
91 |
-
del _CN_TXT2IMG[mid]
|
92 |
-
for mid in list(_CN_IMG2IMG.keys()):
|
93 |
-
if mid != keep_id:
|
94 |
-
try: _CN_IMG2IMG[mid].to("cpu")
|
95 |
-
except: pass
|
96 |
-
del _CN_IMG2IMG[mid]
|
97 |
-
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
98 |
-
gc.collect()
|
99 |
-
|
100 |
def get_cn():
|
101 |
global _CN
|
102 |
if _CN is None:
|
103 |
_CN = ControlNetModel.from_pretrained(CN_QRMON, torch_dtype=DTYPE, use_safetensors=True)
|
104 |
return _CN
|
105 |
|
106 |
-
# ----- single-file fallback for Counterfeit (assemble ControlNet pipelines from components) -----
|
107 |
-
CF_CANDIDATES = [
|
108 |
-
"Counterfeit-V3.0_fp16.safetensors",
|
109 |
-
"Counterfeit-V3.0.safetensors",
|
110 |
-
"Counterfeit-V3.0-pruned.safetensors",
|
111 |
-
"Counterfeit-V3.0.ckpt",
|
112 |
-
]
|
113 |
-
|
114 |
-
def _download_counterfeit_single_file():
|
115 |
-
last_err = None
|
116 |
-
for fname in CF_CANDIDATES:
|
117 |
-
try:
|
118 |
-
path = hf_hub_download(repo_id=BASE_MODELS["counterfeit"], filename=fname)
|
119 |
-
return path
|
120 |
-
except Exception as e:
|
121 |
-
last_err = e
|
122 |
-
raise last_err or RuntimeError("No suitable Counterfeit file found.")
|
123 |
-
|
124 |
-
def _build_controlnet_pipes_from_single_file():
|
125 |
-
"""Load Counterfeit via single .safetensors/.ckpt and assemble ControlNet txt2img/img2img pipelines."""
|
126 |
-
# 1) base SD pipeline from single file
|
127 |
-
ckpt_path = _download_counterfeit_single_file()
|
128 |
-
base = StableDiffusionPipeline.from_single_file(
|
129 |
-
ckpt_path,
|
130 |
-
torch_dtype=DTYPE,
|
131 |
-
safety_checker=None,
|
132 |
-
use_safetensors=True,
|
133 |
-
)
|
134 |
-
# 2) ControlNet
|
135 |
-
cn = get_cn()
|
136 |
-
# 3) Common kwargs (handle both image_processor/feature_extractor)
|
137 |
-
common = dict(
|
138 |
-
vae=base.vae,
|
139 |
-
text_encoder=base.text_encoder,
|
140 |
-
tokenizer=base.tokenizer,
|
141 |
-
unet=base.unet,
|
142 |
-
controlnet=cn,
|
143 |
-
scheduler=DPMSolverMultistepScheduler.from_config(
|
144 |
-
base.scheduler.config, use_karras_sigmas=True, algorithm_type="dpmsolver++"
|
145 |
-
),
|
146 |
-
safety_checker=None,
|
147 |
-
)
|
148 |
-
if hasattr(base, "image_processor") and base.image_processor is not None:
|
149 |
-
common["image_processor"] = base.image_processor
|
150 |
-
if hasattr(base, "feature_extractor") and base.feature_extractor is not None:
|
151 |
-
common["feature_extractor"] = base.feature_extractor
|
152 |
-
|
153 |
-
txt = StableDiffusionControlNetPipeline(**common)
|
154 |
-
img = StableDiffusionControlNetImg2ImgPipeline(**common)
|
155 |
-
# memory-savers
|
156 |
-
for p in (txt, img):
|
157 |
-
p.enable_attention_slicing()
|
158 |
-
p.enable_vae_slicing()
|
159 |
-
p.enable_model_cpu_offload()
|
160 |
-
return txt, img
|
161 |
-
|
162 |
def get_qrmon_txt2img_pipe(model_id: str):
|
163 |
-
global ACTIVE_MODEL_ID
|
164 |
-
if ACTIVE_MODEL_ID != model_id:
|
165 |
-
unload_all_pipes(keep_id=model_id)
|
166 |
-
ACTIVE_MODEL_ID = model_id
|
167 |
if model_id not in _CN_TXT2IMG:
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
)
|
177 |
-
except EntryNotFoundError:
|
178 |
-
# Fallback: assemble from single file (Counterfeit)
|
179 |
-
txt, img = _build_controlnet_pipes_from_single_file()
|
180 |
-
_CN_TXT2IMG[model_id] = _base_scheduler_for(txt)
|
181 |
-
_CN_IMG2IMG[model_id] = _base_scheduler_for(img)
|
182 |
-
return _CN_TXT2IMG[model_id]
|
183 |
_CN_TXT2IMG[model_id] = _base_scheduler_for(pipe)
|
184 |
return _CN_TXT2IMG[model_id]
|
185 |
|
186 |
def get_qrmon_img2img_pipe(model_id: str):
|
187 |
-
global ACTIVE_MODEL_ID
|
188 |
-
if ACTIVE_MODEL_ID != model_id:
|
189 |
-
unload_all_pipes(keep_id=model_id)
|
190 |
-
ACTIVE_MODEL_ID = model_id
|
191 |
if model_id not in _CN_IMG2IMG:
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
)
|
201 |
-
except EntryNotFoundError:
|
202 |
-
# Fallback already built in get_qrmon_txt2img_pipe; reuse if exists
|
203 |
-
if model_id not in _CN_TXT2IMG:
|
204 |
-
txt, img = _build_controlnet_pipes_from_single_file()
|
205 |
-
_CN_TXT2IMG[model_id] = _base_scheduler_for(txt)
|
206 |
-
_CN_IMG2IMG[model_id] = _base_scheduler_for(img)
|
207 |
-
return _CN_IMG2IMG[model_id]
|
208 |
-
else:
|
209 |
-
# Build only img2img from base components again
|
210 |
-
txt = _CN_TXT2IMG[model_id]
|
211 |
-
# Pull components from txt to create img2img
|
212 |
-
common = dict(
|
213 |
-
vae=txt.vae, text_encoder=txt.text_encoder, tokenizer=txt.tokenizer,
|
214 |
-
unet=txt.unet, controlnet=txt.controlnet, scheduler=txt.scheduler,
|
215 |
-
safety_checker=None,
|
216 |
-
)
|
217 |
-
if hasattr(txt, "image_processor"):
|
218 |
-
common["image_processor"] = txt.image_processor
|
219 |
-
if hasattr(txt, "feature_extractor"):
|
220 |
-
common["feature_extractor"] = txt.feature_extractor
|
221 |
-
pipe = StableDiffusionControlNetImg2ImgPipeline(**common)
|
222 |
_CN_IMG2IMG[model_id] = _base_scheduler_for(pipe)
|
223 |
return _CN_IMG2IMG[model_id]
|
224 |
|
@@ -231,12 +126,16 @@ def _qr_txt2img_core(model_id: str,
|
|
231 |
repair_strength: float, feather: float):
|
232 |
|
233 |
s = snap8(size)
|
|
|
|
|
234 |
qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF", blur_radius=0.0)
|
235 |
|
|
|
236 |
if int(seed) < 0:
|
237 |
seed = random.randint(0, 2**31 - 1)
|
238 |
gen = torch.Generator(device="cuda").manual_seed(int(seed))
|
239 |
|
|
|
240 |
pipe = get_qrmon_txt2img_pipe(model_id)
|
241 |
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
242 |
gc.collect()
|
@@ -244,8 +143,8 @@ def _qr_txt2img_core(model_id: str,
|
|
244 |
out = pipe(
|
245 |
prompt=str(style_prompt),
|
246 |
negative_prompt=str(negative or ""),
|
247 |
-
image=qr_img,
|
248 |
-
controlnet_conditioning_scale=float(qr_weight),
|
249 |
control_guidance_start=0.0,
|
250 |
control_guidance_end=1.0,
|
251 |
num_inference_steps=int(steps),
|
@@ -256,6 +155,7 @@ def _qr_txt2img_core(model_id: str,
|
|
256 |
lowres = out.images[0]
|
257 |
lowres = enforce_qr_contrast(lowres, qr_img, strength=float(repair_strength), feather=float(feather))
|
258 |
|
|
|
259 |
final = lowres
|
260 |
if use_hires:
|
261 |
up = max(1.0, min(2.0, float(hires_upscale)))
|
@@ -267,9 +167,9 @@ def _qr_txt2img_core(model_id: str,
|
|
267 |
out2 = pipe2(
|
268 |
prompt=str(style_prompt),
|
269 |
negative_prompt=str(negative or ""),
|
270 |
-
image=lowres,
|
271 |
-
control_image=qr_img,
|
272 |
-
strength=float(hires_strength),
|
273 |
controlnet_conditioning_scale=float(qr_weight),
|
274 |
control_guidance_start=0.0,
|
275 |
control_guidance_end=1.0,
|
@@ -283,9 +183,10 @@ def _qr_txt2img_core(model_id: str,
|
|
283 |
final = enforce_qr_contrast(final, qr_img, strength=float(repair_strength), feather=float(feather))
|
284 |
return final, lowres, qr_img
|
285 |
|
|
|
286 |
@spaces.GPU(duration=120)
|
287 |
-
def
|
288 |
-
return _qr_txt2img_core(BASE_MODELS["
|
289 |
|
290 |
@spaces.GPU(duration=120)
|
291 |
def qr_txt2img_dream(*args):
|
@@ -293,19 +194,18 @@ def qr_txt2img_dream(*args):
|
|
293 |
|
294 |
# ---------- UI ----------
|
295 |
with gr.Blocks() as demo:
|
296 |
-
gr.Markdown("# ZeroGPU • Method 1: QR Control (
|
297 |
|
298 |
-
|
|
|
299 |
url1 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
|
300 |
-
s_prompt1 = gr.Textbox(label="Style prompt",
|
301 |
-
|
302 |
-
s_negative1= gr.Textbox(label="Negative prompt",
|
303 |
-
value="ugly, low quality, blurry, nsfw, watermark, text, low contrast, deformed, extra digits")
|
304 |
size1 = gr.Slider(384, 1024, value=512, step=64, label="Canvas (px)")
|
305 |
-
steps1 = gr.Slider(10, 50, value=
|
306 |
cfg1 = gr.Slider(1.0, 12.0, value=7.0, step=0.1, label="CFG")
|
307 |
-
border1 = gr.Slider(2, 16, value=
|
308 |
-
qr_w1 = gr.Slider(0.
|
309 |
seed1 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
|
310 |
|
311 |
use_hires1 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
|
@@ -319,22 +219,23 @@ with gr.Blocks() as demo:
|
|
319 |
low_img1 = gr.Image(label="Low-res (Stage A) preview")
|
320 |
ctrl_img1 = gr.Image(label="Control QR used")
|
321 |
|
322 |
-
gr.Button("Generate with
|
323 |
-
|
324 |
[url1, s_prompt1, s_negative1, steps1, cfg1, size1, border1, qr_w1, seed1,
|
325 |
use_hires1, hires_up1, hires_str1, repair1, feather1],
|
326 |
[final_img1, low_img1, ctrl_img1]
|
327 |
)
|
328 |
|
329 |
-
|
|
|
330 |
url2 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
|
331 |
-
s_prompt2 = gr.Textbox(label="Style prompt", value="baroque palace interior, gilded details, chandeliers, volumetric
|
332 |
s_negative2= gr.Textbox(label="Negative prompt", value="lowres, low contrast, blurry, jpeg artifacts, watermark, text, bad anatomy")
|
333 |
size2 = gr.Slider(384, 1024, value=512, step=64, label="Canvas (px)")
|
334 |
steps2 = gr.Slider(10, 50, value=24, step=1, label="Steps")
|
335 |
cfg2 = gr.Slider(1.0, 12.0, value=6.8, step=0.1, label="CFG")
|
336 |
border2 = gr.Slider(2, 16, value=8, step=1, label="QR border (quiet zone)")
|
337 |
-
qr_w2 = gr.Slider(0.
|
338 |
seed2 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
|
339 |
|
340 |
use_hires2 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
|
@@ -356,5 +257,4 @@ with gr.Blocks() as demo:
|
|
356 |
)
|
357 |
|
358 |
if __name__ == "__main__":
|
359 |
-
gr.set_static_paths(["/tmp"])
|
360 |
demo.queue(max_size=12).launch()
|
|
|
7 |
from qrcode.constants import ERROR_CORRECT_H
|
8 |
from diffusers import (
|
9 |
StableDiffusionControlNetPipeline,
|
10 |
+
StableDiffusionControlNetImg2ImgPipeline, # for Hi-Res Fix
|
|
|
11 |
ControlNetModel,
|
12 |
DPMSolverMultistepScheduler,
|
13 |
)
|
|
|
|
|
14 |
|
15 |
# Quiet matplotlib cache warning on Spaces
|
16 |
os.environ.setdefault("MPLCONFIGDIR", "/tmp/mpl")
|
17 |
|
18 |
+
# ---- base models for the two tabs ----
|
19 |
BASE_MODELS = {
|
20 |
+
"stable-diffusion-v1-5": "runwayml/stable-diffusion-v1-5",
|
21 |
+
"dream": "Lykon/dreamshaper-8",
|
22 |
}
|
23 |
|
24 |
+
# ControlNet (QR Monster v2 for SD15)
|
25 |
CN_QRMON = "monster-labs/control_v1p_sd15_qrcode_monster"
|
26 |
DTYPE = torch.float16
|
27 |
|
|
|
47 |
return "white"
|
48 |
|
49 |
def make_qr(url="https://example.com", size=768, border=12, back_color="#FFFFFF", blur_radius=0.0):
|
50 |
+
"""
|
51 |
+
IMPORTANT for Method 1: give ControlNet a sharp, black-on-WHITE QR (no blur).
|
52 |
+
"""
|
53 |
qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border))
|
54 |
qr.add_data(url.strip()); qr.make(fit=True)
|
55 |
img = qr.make_image(fill_color="black", back_color=normalize_color(back_color)).convert("RGB")
|
|
|
59 |
return img
|
60 |
|
61 |
def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.0, feather: float = 1.0) -> Image.Image:
|
62 |
+
"""Optional gentle repair. Default OFF for Method 1."""
|
63 |
if strength <= 0: return stylized
|
64 |
q = qr_img.convert("L")
|
65 |
black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather)))
|
|
|
72 |
return Image.fromarray((s * 255.0).astype(np.uint8), mode="RGB")
|
73 |
|
74 |
# ---------- lazy pipelines (CPU-offloaded for ZeroGPU) ----------
|
75 |
+
_CN = None # shared ControlNet QR Monster
|
76 |
+
_CN_TXT2IMG = {} # per-base-model txt2img pipes
|
77 |
+
_CN_IMG2IMG = {} # per-base-model img2img pipes
|
|
|
78 |
|
79 |
def _base_scheduler_for(pipe):
|
80 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
|
|
|
85 |
pipe.enable_model_cpu_offload()
|
86 |
return pipe
|
87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
def get_cn():
|
89 |
global _CN
|
90 |
if _CN is None:
|
91 |
_CN = ControlNetModel.from_pretrained(CN_QRMON, torch_dtype=DTYPE, use_safetensors=True)
|
92 |
return _CN
|
93 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
def get_qrmon_txt2img_pipe(model_id: str):
|
|
|
|
|
|
|
|
|
95 |
if model_id not in _CN_TXT2IMG:
|
96 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
97 |
+
model_id,
|
98 |
+
controlnet=get_cn(),
|
99 |
+
torch_dtype=DTYPE,
|
100 |
+
safety_checker=None,
|
101 |
+
use_safetensors=True,
|
102 |
+
low_cpu_mem_usage=True,
|
103 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
_CN_TXT2IMG[model_id] = _base_scheduler_for(pipe)
|
105 |
return _CN_TXT2IMG[model_id]
|
106 |
|
107 |
def get_qrmon_img2img_pipe(model_id: str):
|
|
|
|
|
|
|
|
|
108 |
if model_id not in _CN_IMG2IMG:
|
109 |
+
pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
110 |
+
model_id,
|
111 |
+
controlnet=get_cn(),
|
112 |
+
torch_dtype=DTYPE,
|
113 |
+
safety_checker=None,
|
114 |
+
use_safetensors=True,
|
115 |
+
low_cpu_mem_usage=True,
|
116 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
_CN_IMG2IMG[model_id] = _base_scheduler_for(pipe)
|
118 |
return _CN_IMG2IMG[model_id]
|
119 |
|
|
|
126 |
repair_strength: float, feather: float):
|
127 |
|
128 |
s = snap8(size)
|
129 |
+
|
130 |
+
# Control image: crisp black-on-white QR
|
131 |
qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF", blur_radius=0.0)
|
132 |
|
133 |
+
# Seed / generator
|
134 |
if int(seed) < 0:
|
135 |
seed = random.randint(0, 2**31 - 1)
|
136 |
gen = torch.Generator(device="cuda").manual_seed(int(seed))
|
137 |
|
138 |
+
# --- Stage A: txt2img with ControlNet
|
139 |
pipe = get_qrmon_txt2img_pipe(model_id)
|
140 |
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
141 |
gc.collect()
|
|
|
143 |
out = pipe(
|
144 |
prompt=str(style_prompt),
|
145 |
negative_prompt=str(negative or ""),
|
146 |
+
image=qr_img, # control image for txt2img
|
147 |
+
controlnet_conditioning_scale=float(qr_weight), # ~1.0–1.2 works well
|
148 |
control_guidance_start=0.0,
|
149 |
control_guidance_end=1.0,
|
150 |
num_inference_steps=int(steps),
|
|
|
155 |
lowres = out.images[0]
|
156 |
lowres = enforce_qr_contrast(lowres, qr_img, strength=float(repair_strength), feather=float(feather))
|
157 |
|
158 |
+
# --- Optional Stage B: Hi-Res Fix (img2img with same QR)
|
159 |
final = lowres
|
160 |
if use_hires:
|
161 |
up = max(1.0, min(2.0, float(hires_upscale)))
|
|
|
167 |
out2 = pipe2(
|
168 |
prompt=str(style_prompt),
|
169 |
negative_prompt=str(negative or ""),
|
170 |
+
image=lowres, # init image
|
171 |
+
control_image=qr_img, # same QR
|
172 |
+
strength=float(hires_strength), # ~0.7 like "Hires Fix"
|
173 |
controlnet_conditioning_scale=float(qr_weight),
|
174 |
control_guidance_start=0.0,
|
175 |
control_guidance_end=1.0,
|
|
|
183 |
final = enforce_qr_contrast(final, qr_img, strength=float(repair_strength), feather=float(feather))
|
184 |
return final, lowres, qr_img
|
185 |
|
186 |
+
# Wrappers for each tab (so Gradio can bind without passing the model id)
|
187 |
@spaces.GPU(duration=120)
|
188 |
+
def qr_txt2img_anything(*args):
|
189 |
+
return _qr_txt2img_core(BASE_MODELS["stable-diffusion-v1-5"], *args)
|
190 |
|
191 |
@spaces.GPU(duration=120)
|
192 |
def qr_txt2img_dream(*args):
|
|
|
194 |
|
195 |
# ---------- UI ----------
|
196 |
with gr.Blocks() as demo:
|
197 |
+
gr.Markdown("# ZeroGPU • Method 1: QR Control (two base models)")
|
198 |
|
199 |
+
# ---- Tab 1: stable-diffusion-v1-5 (anime/illustration) ----
|
200 |
+
with gr.Tab("stable-diffusion-v1-5"):
|
201 |
url1 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
|
202 |
+
s_prompt1 = gr.Textbox(label="Style prompt", value="japanese painting, elegant shrine and torii, distant mount fuji, autumn maple trees, warm sunlight, 1girl in kimono, highly detailed, intricate patterns, anime key visual, dramatic composition")
|
203 |
+
s_negative1= gr.Textbox(label="Negative prompt", value="ugly, low quality, blurry, nsfw, watermark, text, low contrast, deformed, extra digits")
|
|
|
|
|
204 |
size1 = gr.Slider(384, 1024, value=512, step=64, label="Canvas (px)")
|
205 |
+
steps1 = gr.Slider(10, 50, value=20, step=1, label="Steps")
|
206 |
cfg1 = gr.Slider(1.0, 12.0, value=7.0, step=0.1, label="CFG")
|
207 |
+
border1 = gr.Slider(2, 16, value=4, step=1, label="QR border (quiet zone)")
|
208 |
+
qr_w1 = gr.Slider(0.6, 1.6, value=1.5, step=0.05, label="QR control weight")
|
209 |
seed1 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
|
210 |
|
211 |
use_hires1 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
|
|
|
219 |
low_img1 = gr.Image(label="Low-res (Stage A) preview")
|
220 |
ctrl_img1 = gr.Image(label="Control QR used")
|
221 |
|
222 |
+
gr.Button("Generate with Anything v4.5").click(
|
223 |
+
qr_txt2img_anything,
|
224 |
[url1, s_prompt1, s_negative1, steps1, cfg1, size1, border1, qr_w1, seed1,
|
225 |
use_hires1, hires_up1, hires_str1, repair1, feather1],
|
226 |
[final_img1, low_img1, ctrl_img1]
|
227 |
)
|
228 |
|
229 |
+
# ---- Tab 2: DreamShaper (general art/painterly) ----
|
230 |
+
with gr.Tab("DreamShaper 8"):
|
231 |
url2 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
|
232 |
+
s_prompt2 = gr.Textbox(label="Style prompt", value="ornate baroque palace interior, gilded details, chandeliers, volumetric light, ultra detailed, cinematic")
|
233 |
s_negative2= gr.Textbox(label="Negative prompt", value="lowres, low contrast, blurry, jpeg artifacts, watermark, text, bad anatomy")
|
234 |
size2 = gr.Slider(384, 1024, value=512, step=64, label="Canvas (px)")
|
235 |
steps2 = gr.Slider(10, 50, value=24, step=1, label="Steps")
|
236 |
cfg2 = gr.Slider(1.0, 12.0, value=6.8, step=0.1, label="CFG")
|
237 |
border2 = gr.Slider(2, 16, value=8, step=1, label="QR border (quiet zone)")
|
238 |
+
qr_w2 = gr.Slider(0.6, 1.6, value=1.5, step=0.05, label="QR control weight")
|
239 |
seed2 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
|
240 |
|
241 |
use_hires2 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
|
|
|
257 |
)
|
258 |
|
259 |
if __name__ == "__main__":
|
|
|
260 |
demo.queue(max_size=12).launch()
|