Tanut commited on
Commit
51276d0
·
1 Parent(s): e8943d1

Testing img2img

Browse files
Files changed (1) hide show
  1. app.py +88 -180
app.py CHANGED
@@ -7,17 +7,18 @@ import qrcode
7
  from qrcode.constants import ERROR_CORRECT_H
8
  from diffusers import (
9
  StableDiffusionPipeline,
10
- StableDiffusionControlNetPipeline, # TXT2IMG (Method 1)
11
- StableDiffusionControlNetImg2ImgPipeline, # two-stage img2img
12
  ControlNetModel,
13
  DPMSolverMultistepScheduler,
14
  )
15
 
16
- # Optional: silence matplotlib cache warning in Spaces
17
  os.environ.setdefault("MPLCONFIGDIR", "/tmp/mpl")
18
 
19
  MODEL_ID = "runwayml/stable-diffusion-v1-5"
20
- CN_QRMON = "monster-labs/control_v1p_sd15_qrcode_monster"
 
21
  DTYPE = torch.float16
22
 
23
  # ---------- helpers ----------
@@ -41,8 +42,11 @@ def normalize_color(c):
41
  return s
42
  return "white"
43
 
44
- def make_qr(url="http://www.mybirdfire.com", size=768, border=12, back_color="#808080", blur_radius=1.2):
45
- # Mid-gray background improves blending & scan rate with QR-Monster.
 
 
 
46
  qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border))
47
  qr.add_data(url.strip()); qr.make(fit=True)
48
  img = qr.make_image(fill_color="black", back_color=normalize_color(back_color)).convert("RGB")
@@ -51,16 +55,16 @@ def make_qr(url="http://www.mybirdfire.com", size=768, border=12, back_color="#8
51
  img = img.filter(ImageFilter.GaussianBlur(radius=float(blur_radius)))
52
  return img
53
 
54
- def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.6, feather: float = 1.0) -> Image.Image:
55
- """Gently push ControlNet-required blacks/whites for scannability."""
56
  if strength <= 0: return stylized
57
  q = qr_img.convert("L")
58
  black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather)))
59
  black = np.asarray(black_mask, dtype=np.float32) / 255.0
60
  white = 1.0 - black
61
  s = np.asarray(stylized.convert("RGB"), dtype=np.float32) / 255.0
62
- s = s * (1.0 - float(strength) * black[..., None]) # deepen blacks
63
- s = s + (1.0 - s) * (float(strength) * 0.85 * white[..., None]) # lift whites
64
  s = np.clip(s, 0.0, 1.0)
65
  return Image.fromarray((s * 255.0).astype(np.uint8), mode="RGB")
66
 
@@ -80,43 +84,29 @@ def get_sd_pipe():
80
  global _SD
81
  if _SD is None:
82
  pipe = StableDiffusionPipeline.from_pretrained(
83
- MODEL_ID,
84
- torch_dtype=DTYPE,
85
- safety_checker=None,
86
- use_safetensors=True,
87
- low_cpu_mem_usage=True,
88
  )
89
  _SD = _base_scheduler_for(pipe)
90
  return _SD
91
 
92
  def get_qrmon_txt2img_pipe():
93
- """Method 1 (TXT2IMG): SD + ControlNet QR-Monster, no init image, only conditioning image."""
94
  global _CN_TXT2IMG
95
  if _CN_TXT2IMG is None:
96
  cn = ControlNetModel.from_pretrained(CN_QRMON, torch_dtype=DTYPE, use_safetensors=True)
97
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
98
- MODEL_ID,
99
- controlnet=cn,
100
- torch_dtype=DTYPE,
101
- safety_checker=None,
102
- use_safetensors=True,
103
- low_cpu_mem_usage=True,
104
  )
105
  _CN_TXT2IMG = _base_scheduler_for(pipe)
106
  return _CN_TXT2IMG
107
 
108
  def get_qrmon_img2img_pipe():
109
- """Two-stage B: SD img2img with ControlNet QR-Monster (kept so you can compare)."""
110
  global _CN_IMG2IMG
111
  if _CN_IMG2IMG is None:
112
  cn = ControlNetModel.from_pretrained(CN_QRMON, torch_dtype=DTYPE, use_safetensors=True)
113
  pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
114
- MODEL_ID,
115
- controlnet=cn,
116
- torch_dtype=DTYPE,
117
- safety_checker=None,
118
- use_safetensors=True,
119
- low_cpu_mem_usage=True,
120
  )
121
  _CN_IMG2IMG = _base_scheduler_for(pipe)
122
  return _CN_IMG2IMG
@@ -142,196 +132,114 @@ def txt2img(prompt: str, negative: str, steps: int, cfg: float, width: int, heig
142
  )
143
  return out.images[0]
144
 
145
- # ---- Method 1: TXT2IMG ControlNet (no init image; QR as conditioning only) ----
146
  @spaces.GPU(duration=120)
147
- def qr_txt2img(url: str, style_prompt: str, negative: str, steps: int, cfg: float,
148
- size: int, border: int, back_color: str, blur: float,
149
- qr_weight: float, start: float, end: float, seed: int,
 
150
  repair_strength: float, feather: float):
 
151
  s = snap8(size)
152
- qr_img = make_qr(url=url, size=s, border=int(border), back_color=back_color, blur_radius=float(blur))
153
 
 
 
 
 
154
  if int(seed) < 0:
155
  seed = random.randint(0, 2**31 - 1)
156
  gen = torch.Generator(device="cuda").manual_seed(int(seed))
157
 
 
158
  pipe = get_qrmon_txt2img_pipe()
159
  if torch.cuda.is_available(): torch.cuda.empty_cache()
160
  gc.collect()
161
- with torch.autocast(device_type="cuda", dtype=DTYPE):
162
- try:
163
- out = pipe(
164
- prompt=str(style_prompt),
165
- negative_prompt=str(negative or ""),
166
- image=qr_img, # ControlNet conditioning
167
- controlnet_conditioning_scale=float(qr_weight),
168
- control_guidance_start=float(start),
169
- control_guidance_end=float(end),
170
- num_inference_steps=int(steps),
171
- guidance_scale=float(cfg),
172
- width=s, height=s,
173
- generator=gen,
174
- )
175
- except TypeError:
176
- # Fallback for older diffusers param names
177
- out = pipe(
178
- prompt=str(style_prompt),
179
- negative_prompt=str(negative or ""),
180
- control_image=qr_img,
181
- controlnet_conditioning_scale=float(qr_weight),
182
- controlnet_start=float(start),
183
- controlnet_end=float(end),
184
- num_inference_steps=int(steps),
185
- guidance_scale=float(cfg),
186
- width=s, height=s,
187
- generator=gen,
188
- )
189
-
190
- img = out.images[0]
191
- img = enforce_qr_contrast(img, qr_img, strength=float(repair_strength), feather=float(feather))
192
- return img, qr_img
193
-
194
- # ---- Two-stage (your previous Method-1 variant using IMG2IMG) ----
195
- @spaces.GPU(duration=120)
196
- def qr_stylize(url: str, style_prompt: str, negative: str, steps: int, cfg: float,
197
- size: int, border: int, back_color: str, blur: float,
198
- qr_weight: float, repair_strength: float, feather: float, seed: int,
199
- denoise: float = 0.45):
200
- s = snap8(size)
201
-
202
- # Stage A: base art (txt2img)
203
- sd = get_sd_pipe()
204
- if int(seed) < 0:
205
- seed = random.randint(0, 2**31 - 1)
206
- gen = torch.Generator(device="cuda").manual_seed(int(seed))
207
 
208
- if torch.cuda.is_available(): torch.cuda.empty_cache()
209
- gc.collect()
210
  with torch.autocast(device_type="cuda", dtype=DTYPE):
211
- base = sd(
 
212
  prompt=str(style_prompt),
213
  negative_prompt=str(negative or ""),
214
- num_inference_steps=max(int(steps)//2, 12),
 
 
 
 
215
  guidance_scale=float(cfg),
216
  width=s, height=s,
217
  generator=gen,
218
- ).images[0]
219
-
220
- # control image (QR)
221
- qr_img = make_qr(url=url, size=s, border=int(border),
222
- back_color=back_color, blur_radius=float(blur))
223
-
224
- # Stage B: img2img with ControlNet QR
225
- pipe = get_qrmon_img2img_pipe()
226
- if torch.cuda.is_available(): torch.cuda.empty_cache()
227
- gc.collect()
228
- with torch.autocast(device_type="cuda", dtype=DTYPE):
229
- try:
230
- out = pipe(
231
- prompt=str(style_prompt),
232
- negative_prompt=str(negative or ""),
233
- image=base, # init image
234
- image_guidance_scale=None,
235
- control_image=qr_img, # QR conditioning
236
- strength=float(denoise),
237
- controlnet_conditioning_scale=float(qr_weight),
238
- control_guidance_start=0.05,
239
- control_guidance_end=0.95,
240
- num_inference_steps=int(steps),
241
- guidance_scale=float(cfg),
242
- width=s, height=s,
243
- generator=gen,
244
- )
245
- except TypeError:
246
- out = pipe(
247
  prompt=str(style_prompt),
248
  negative_prompt=str(negative or ""),
249
- image=base,
250
- control_image=qr_img,
251
- strength=float(denoise),
252
  controlnet_conditioning_scale=float(qr_weight),
253
- controlnet_start=0.05,
254
- controlnet_end=0.95,
255
  num_inference_steps=int(steps),
256
  guidance_scale=float(cfg),
257
- width=s, height=s,
258
  generator=gen,
259
  )
 
260
 
261
- img = out.images[0]
262
- img = enforce_qr_contrast(img, qr_img, strength=float(repair_strength), feather=float(feather))
263
- return img, qr_img, base
264
 
265
  # ---------- UI ----------
266
  with gr.Blocks() as demo:
267
- gr.Markdown("# ZeroGPU Stable Diffusion + AI QR Codes")
268
 
269
- with gr.Tab("Text → Image"):
270
- prompt = gr.Textbox(label="Prompt", value="a cozy reading nook, warm sunlight, cinematic lighting, highly detailed")
271
- negative = gr.Textbox(label="Negative (optional)", value="lowres, blurry, watermark, text")
272
- steps = gr.Slider(8, 40, value=28, step=1, label="Steps")
273
  cfg = gr.Slider(1.0, 12.0, value=7.0, step=0.5, label="CFG")
274
- width = gr.Slider(256, 1024, value=640, step=16, label="Width")
275
- height = gr.Slider(256, 1024, value=640, step=16, label="Height")
276
  seed = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
277
  out_img = gr.Image(label="Image", interactive=False)
278
  gr.Button("Generate").click(txt2img, [prompt, negative, steps, cfg, width, height, seed], out_img)
279
 
280
- # ---- Method 1: TXT2IMG ControlNet ----
281
- with gr.Tab("QR (Method 1 — TXT2IMG)"):
282
- url_m1 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
283
- prompt_m1 = gr.Textbox(label="Style prompt (no 'QR code' needed)",
284
- value="epic phoenix in flames, dramatic lighting, detailed, 8k")
285
- neg_m1 = gr.Textbox(label="Negative prompt",
286
- value="lowres, low contrast, blurry, jpeg artifacts, worst quality, bad anatomy, extra digits")
287
- size_m1 = gr.Slider(384, 1024, value=768, step=64, label="Canvas (px)")
288
- steps_m1 = gr.Slider(10, 60, value=28, step=1, label="Steps")
289
- cfg_m1 = gr.Slider(1.0, 12.0, value=6.5, step=0.1, label="CFG")
290
- border_m1 = gr.Slider(4, 20, value=12, step=1, label="QR border (quiet zone)")
291
- back_m1 = gr.ColorPicker(value="#808080", label="QR background")
292
- blur_m1 = gr.Slider(0.0, 3.0, value=1.2, step=0.1, label="Soften control (blur)")
293
- weight_m1 = gr.Slider(0.6, 1.6, value=1.2, step=0.05, label="QR control weight")
294
- start_m1 = gr.Slider(0.0, 1.0, value=0.05, step=0.01, label="Control start")
295
- end_m1 = gr.Slider(0.0, 1.0, value=0.95, step=0.01, label="Control end")
296
- seed_m1 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
297
- repair_m1 = gr.Slider(0.0, 1.0, value=0.6, step=0.05, label="Post repair strength")
298
- feather_m1 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
299
-
300
- final_m1 = gr.Image(label="Final QR (TXT2IMG)")
301
- ctrl_m1 = gr.Image(label="Control QR used")
302
 
303
- gr.Button("Generate (Method 1)").click(
304
- qr_txt2img,
305
- [url_m1, prompt_m1, neg_m1, steps_m1, cfg_m1, size_m1, border_m1, back_m1, blur_m1,
306
- weight_m1, start_m1, end_m1, seed_m1, repair_m1, feather_m1],
307
- [final_m1, ctrl_m1]
308
- )
309
 
310
- # ---- Two-stage (Method-1 variant, IMG2IMG) ----
311
- with gr.Tab("QR (Two-stage IMG2IMG)"):
312
- url = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
313
- s_prompt = gr.Textbox(label="Style prompt (no 'QR code' needed)",
314
- value="epic phoenix in flames, dramatic lighting, detailed, 8k")
315
- s_negative= gr.Textbox(label="Negative prompt",
316
- value="lowres, low contrast, blurry, jpeg artifacts, worst quality, bad anatomy, extra digits")
317
- size = gr.Slider(384, 1024, value=768, step=64, label="Canvas (px)")
318
- steps2 = gr.Slider(10, 60, value=28, step=1, label="Total steps")
319
- cfg2 = gr.Slider(1.0, 12.0, value=6.5, step=0.1, label="CFG")
320
- border = gr.Slider(4, 20, value=12, step=1, label="QR border (quiet zone)")
321
- back_col = gr.ColorPicker(value="#808080", label="QR background")
322
- blur = gr.Slider(0.0, 3.0, value=1.2, step=0.1, label="Soften control (blur)")
323
- qr_w = gr.Slider(0.6, 1.6, value=1.2, step=0.05, label="QR control weight")
324
- denoise = gr.Slider(0.2, 0.8, value=0.45, step=0.01, label="Denoising strength (Stage B)")
325
- repair = gr.Slider(0.0, 1.0, value=0.6, step=0.05, label="Post repair strength")
326
  feather = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
327
- seed2 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
328
- final_img = gr.Image(label="Final stylized QR")
 
329
  ctrl_img = gr.Image(label="Control QR used")
330
- base_img = gr.Image(label="Base art (Stage A)")
331
- gr.Button("Stylize QR (Two-stage)").click(
332
- qr_stylize,
333
- [url, s_prompt, s_negative, steps2, cfg2, size, border, back_col, blur, qr_w, repair, feather, seed2, denoise],
334
- [final_img, ctrl_img, base_img]
335
  )
336
 
337
  if __name__ == "__main__":
 
7
  from qrcode.constants import ERROR_CORRECT_H
8
  from diffusers import (
9
  StableDiffusionPipeline,
10
+ StableDiffusionControlNetPipeline,
11
+ StableDiffusionControlNetImg2ImgPipeline, # for Hi-Res Fix
12
  ControlNetModel,
13
  DPMSolverMultistepScheduler,
14
  )
15
 
16
+ # Quiet matplotlib cache warning on Spaces
17
  os.environ.setdefault("MPLCONFIGDIR", "/tmp/mpl")
18
 
19
  MODEL_ID = "runwayml/stable-diffusion-v1-5"
20
+ # You can swap to a QR-Pattern-v2 repo if you know one on HF.
21
+ CN_QRMON = "monster-labs/control_v1p_sd15_qrcode_monster"
22
  DTYPE = torch.float16
23
 
24
  # ---------- helpers ----------
 
42
  return s
43
  return "white"
44
 
45
+ def make_qr(url="http://www.mybirdfire.com", size=768, border=12, back_color="#FFFFFF", blur_radius=0.0):
46
+ """
47
+ IMPORTANT for Method 1: give ControlNet a sharp, black-on-WHITE QR.
48
+ (No blur. Pixel-perfect.)
49
+ """
50
  qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border))
51
  qr.add_data(url.strip()); qr.make(fit=True)
52
  img = qr.make_image(fill_color="black", back_color=normalize_color(back_color)).convert("RGB")
 
55
  img = img.filter(ImageFilter.GaussianBlur(radius=float(blur_radius)))
56
  return img
57
 
58
+ def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.0, feather: float = 1.0) -> Image.Image:
59
+ """Optional gentle repair. Default OFF for Method 1."""
60
  if strength <= 0: return stylized
61
  q = qr_img.convert("L")
62
  black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather)))
63
  black = np.asarray(black_mask, dtype=np.float32) / 255.0
64
  white = 1.0 - black
65
  s = np.asarray(stylized.convert("RGB"), dtype=np.float32) / 255.0
66
+ s = s * (1.0 - float(strength) * black[..., None])
67
+ s = s + (1.0 - s) * (float(strength) * 0.85 * white[..., None])
68
  s = np.clip(s, 0.0, 1.0)
69
  return Image.fromarray((s * 255.0).astype(np.uint8), mode="RGB")
70
 
 
84
  global _SD
85
  if _SD is None:
86
  pipe = StableDiffusionPipeline.from_pretrained(
87
+ MODEL_ID, torch_dtype=DTYPE, safety_checker=None, use_safetensors=True, low_cpu_mem_usage=True
 
 
 
 
88
  )
89
  _SD = _base_scheduler_for(pipe)
90
  return _SD
91
 
92
  def get_qrmon_txt2img_pipe():
 
93
  global _CN_TXT2IMG
94
  if _CN_TXT2IMG is None:
95
  cn = ControlNetModel.from_pretrained(CN_QRMON, torch_dtype=DTYPE, use_safetensors=True)
96
  pipe = StableDiffusionControlNetPipeline.from_pretrained(
97
+ MODEL_ID, controlnet=cn, torch_dtype=DTYPE, safety_checker=None,
98
+ use_safetensors=True, low_cpu_mem_usage=True
 
 
 
 
99
  )
100
  _CN_TXT2IMG = _base_scheduler_for(pipe)
101
  return _CN_TXT2IMG
102
 
103
  def get_qrmon_img2img_pipe():
 
104
  global _CN_IMG2IMG
105
  if _CN_IMG2IMG is None:
106
  cn = ControlNetModel.from_pretrained(CN_QRMON, torch_dtype=DTYPE, use_safetensors=True)
107
  pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
108
+ MODEL_ID, controlnet=cn, torch_dtype=DTYPE, safety_checker=None,
109
+ use_safetensors=True, low_cpu_mem_usage=True
 
 
 
 
110
  )
111
  _CN_IMG2IMG = _base_scheduler_for(pipe)
112
  return _CN_IMG2IMG
 
132
  )
133
  return out.images[0]
134
 
135
+ # -------- Method 1: QR control model in text-to-image (+ optional Hi-Res Fix) --------
136
  @spaces.GPU(duration=120)
137
+ def qr_txt2img(url: str, style_prompt: str, negative: str,
138
+ steps: int, cfg: float, size: int, border: int,
139
+ qr_weight: float, seed: int,
140
+ use_hires: bool, hires_upscale: float, hires_strength: float,
141
  repair_strength: float, feather: float):
142
+
143
  s = snap8(size)
 
144
 
145
+ # Control image: crisp black-on-white QR
146
+ qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF", blur_radius=0.0)
147
+
148
+ # Seed / generator
149
  if int(seed) < 0:
150
  seed = random.randint(0, 2**31 - 1)
151
  gen = torch.Generator(device="cuda").manual_seed(int(seed))
152
 
153
+ # --- Stage A: txt2img with ControlNet (the actual "Method 1")
154
  pipe = get_qrmon_txt2img_pipe()
155
  if torch.cuda.is_available(): torch.cuda.empty_cache()
156
  gc.collect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
 
 
 
158
  with torch.autocast(device_type="cuda", dtype=DTYPE):
159
+ # diffusers ≥ 0.30.x uses `image=` for control image
160
+ out = pipe(
161
  prompt=str(style_prompt),
162
  negative_prompt=str(negative or ""),
163
+ image=qr_img,
164
+ controlnet_conditioning_scale=float(qr_weight), # ~1.0–1.2 works well
165
+ control_guidance_start=0.0, # "Balanced" feel
166
+ control_guidance_end=1.0,
167
+ num_inference_steps=int(steps),
168
  guidance_scale=float(cfg),
169
  width=s, height=s,
170
  generator=gen,
171
+ )
172
+ lowres = out.images[0]
173
+
174
+ # --- Optional Stage B: Hi-Res Fix (img2img with same QR)
175
+ final = lowres
176
+ if use_hires:
177
+ up = max(1.0, min(2.0, float(hires_upscale)))
178
+ W = snap8(int(s * up)); H = W
179
+ pipe2 = get_qrmon_img2img_pipe()
180
+ if torch.cuda.is_available(): torch.cuda.empty_cache()
181
+ gc.collect()
182
+ with torch.autocast(device_type="cuda", dtype=DTYPE):
183
+ out2 = pipe2(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  prompt=str(style_prompt),
185
  negative_prompt=str(negative or ""),
186
+ image=lowres, # init image
187
+ control_image=qr_img, # same QR
188
+ strength=float(hires_strength), # ~0.7 like "Hires Fix"
189
  controlnet_conditioning_scale=float(qr_weight),
190
+ control_guidance_start=0.0,
191
+ control_guidance_end=1.0,
192
  num_inference_steps=int(steps),
193
  guidance_scale=float(cfg),
194
+ width=W, height=H,
195
  generator=gen,
196
  )
197
+ final = out2.images[0]
198
 
199
+ final = enforce_qr_contrast(final, qr_img, strength=float(repair_strength), feather=float(feather))
200
+ return final, lowres, qr_img
 
201
 
202
  # ---------- UI ----------
203
  with gr.Blocks() as demo:
204
+ gr.Markdown("# ZeroGPU SD1.5 + AI QR (Method 1)")
205
 
206
+ with gr.Tab("Plain Text → Image"):
207
+ prompt = gr.Textbox(label="Prompt", value="Japanese painting, mountains")
208
+ negative = gr.Textbox(label="Negative (optional)", value="ugly, disfigured, low quality, blurry, nsfw")
209
+ steps = gr.Slider(8, 40, value=20, step=1, label="Steps")
210
  cfg = gr.Slider(1.0, 12.0, value=7.0, step=0.5, label="CFG")
211
+ width = gr.Slider(256, 1024, value=512, step=16, label="Width")
212
+ height = gr.Slider(256, 1024, value=512, step=16, label="Height")
213
  seed = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
214
  out_img = gr.Image(label="Image", interactive=False)
215
  gr.Button("Generate").click(txt2img, [prompt, negative, steps, cfg, width, height, seed], out_img)
216
 
217
+ with gr.Tab("Method 1: QR control (txt2img)"):
218
+ url = gr.Textbox(label="URL/Text", value="https://example.com")
219
+ s_prompt = gr.Textbox(label="Style prompt", value="Japanese painting, mountains, 1girl")
220
+ s_negative= gr.Textbox(label="Negative prompt", value="ugly, disfigured, low quality, blurry, nsfw")
221
+ size = gr.Slider(384, 1024, value=512, step=64, label="Canvas (px)")
222
+ steps2 = gr.Slider(10, 50, value=20, step=1, label="Steps")
223
+ cfg2 = gr.Slider(1.0, 12.0, value=7.0, step=0.1, label="CFG")
224
+ border = gr.Slider(2, 16, value=4, step=1, label="QR border (quiet zone)")
225
+ qr_w = gr.Slider(0.6, 1.6, value=1.1, step=0.05, label="QR control weight")
226
+ seed2 = gr.Number(value=-1, precision=0, label="Seed (-1 random)")
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
+ use_hires = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)")
229
+ hires_up = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)")
230
+ hires_str = gr.Slider(0.3, 0.9, value=0.7, step=0.05, label="Hi-Res denoise strength")
 
 
 
231
 
232
+ repair = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
  feather = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
234
+
235
+ final_img = gr.Image(label="Final (or Hi-Res) image")
236
+ low_img = gr.Image(label="Low-res (Stage A) preview")
237
  ctrl_img = gr.Image(label="Control QR used")
238
+
239
+ gr.Button("Generate QR Art").click(
240
+ qr_txt2img,
241
+ [url, s_prompt, s_negative, steps2, cfg2, size, border, qr_w, seed2, use_hires, hires_up, hires_str, repair, feather],
242
+ [final_img, low_img, ctrl_img]
243
  )
244
 
245
  if __name__ == "__main__":