Spaces:
Running
on
Zero
Running
on
Zero
Tanut
commited on
Commit
·
19c2aa1
1
Parent(s):
6ae079b
Fix bug
Browse files- README.md +11 -6
- app.py +159 -247
- requirements.txt +2 -1
README.md
CHANGED
@@ -1,12 +1,17 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
|
|
|
|
4 |
sdk: gradio
|
5 |
-
sdk_version:
|
6 |
-
|
|
|
|
|
|
|
7 |
preload_from_hub:
|
8 |
- runwayml/stable-diffusion-v1-5
|
9 |
- monster-labs/control_v1p_sd15_qrcode_monster
|
10 |
-
- latentcat/control_v1p_sd15_brightness
|
11 |
-
license: openrail++
|
12 |
---
|
|
|
|
|
|
1 |
---
|
2 |
+
title: GenImages ControlNet
|
3 |
+
emoji: 🐢
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: green
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.42.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: openrail
|
11 |
+
short_description: Testing generate Images from Controlnet
|
12 |
preload_from_hub:
|
13 |
- runwayml/stable-diffusion-v1-5
|
14 |
- monster-labs/control_v1p_sd15_qrcode_monster
|
|
|
|
|
15 |
---
|
16 |
+
|
17 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -1,291 +1,203 @@
|
|
1 |
-
import os,
|
2 |
-
import numpy as np
|
3 |
-
from contextlib import nullcontext
|
4 |
-
from typing import Tuple
|
5 |
-
|
6 |
import gradio as gr
|
7 |
-
|
|
|
8 |
import qrcode
|
9 |
from qrcode.constants import ERROR_CORRECT_H
|
10 |
|
11 |
import torch
|
|
|
|
|
12 |
from diffusers import (
|
13 |
StableDiffusionPipeline,
|
14 |
-
|
15 |
ControlNetModel,
|
16 |
-
DPMSolverMultistepScheduler,
|
17 |
)
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
#
|
22 |
-
#
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
#
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
return pos2, neg2
|
53 |
-
|
54 |
-
def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.6, feather: float = 1.0) -> Image.Image:
|
55 |
-
if strength <= 0: return stylized
|
56 |
-
q = qr_img.convert("L")
|
57 |
-
black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather)))
|
58 |
-
black = np.asarray(black_mask, dtype=np.float32) / 255.0
|
59 |
-
white = 1.0 - black
|
60 |
-
s = np.asarray(stylized.convert("RGB"), dtype=np.float32) / 255.0
|
61 |
-
s = s * (1.0 - float(strength) * black[..., None])
|
62 |
-
s = s + (1.0 - s) * (float(strength) * 0.85 * white[..., None])
|
63 |
-
s = np.clip(s, 0.0, 1.0)
|
64 |
-
return Image.fromarray((s * 255.0).astype(np.uint8), mode="RGB")
|
65 |
-
|
66 |
-
# =========================================================
|
67 |
-
# Models & loading (ZeroGPU-friendly lazy load)
|
68 |
-
# =========================================================
|
69 |
-
BASE_15 = "runwayml/stable-diffusion-v1-5"
|
70 |
-
QR_MONSTER_15 = "monster-labs/control_v1p_sd15_qrcode_monster" # v2 subfolder is handled by authors; base path is fine
|
71 |
-
BRIGHTNESS_15 = "latentcat/control_v1p_sd15_brightness" # optional helper
|
72 |
-
|
73 |
-
_sd = {"pipe": None}
|
74 |
-
_cn = {"pipe": None}
|
75 |
-
|
76 |
-
def _setup_scheduler(pipe):
|
77 |
-
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
|
78 |
-
pipe.scheduler.config,
|
79 |
-
use_karras_sigmas=True,
|
80 |
-
algorithm_type="dpmsolver++"
|
81 |
-
)
|
82 |
|
83 |
-
def _enable_memory_savers(pipe):
|
84 |
-
# Good defaults for Spaces/ZeroGPU
|
85 |
-
pipe.enable_attention_slicing()
|
86 |
-
pipe.enable_vae_slicing()
|
87 |
-
pipe.enable_vae_tiling()
|
88 |
-
pipe.enable_model_cpu_offload()
|
89 |
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
92 |
pipe = StableDiffusionPipeline.from_pretrained(
|
93 |
BASE_15,
|
94 |
-
torch_dtype=
|
95 |
safety_checker=None,
|
96 |
use_safetensors=True,
|
97 |
low_cpu_mem_usage=True,
|
98 |
-
**
|
99 |
)
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
)
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
114 |
BASE_15,
|
115 |
-
controlnet=[
|
116 |
-
torch_dtype=
|
117 |
safety_checker=None,
|
118 |
use_safetensors=True,
|
119 |
low_cpu_mem_usage=True,
|
120 |
-
**
|
121 |
)
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
|
|
|
|
|
|
|
|
138 |
|
139 |
if torch.cuda.is_available():
|
140 |
torch.cuda.empty_cache()
|
141 |
gc.collect()
|
142 |
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
return out.images[0]
|
152 |
|
153 |
-
def make_qr(url="http://www.mybirdfire.com", size=512, border=10, back_color="#808080", blur_radius=0.0):
|
154 |
-
qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border))
|
155 |
-
qr.add_data(url.strip()); qr.make(fit=True)
|
156 |
-
bg = normalize_color(back_color)
|
157 |
-
img = qr.make_image(fill_color="black", back_color=bg).convert("RGB").resize((size, size), Image.NEAREST)
|
158 |
-
if blur_radius and blur_radius > 0:
|
159 |
-
img = img.filter(ImageFilter.GaussianBlur(radius=float(blur_radius)))
|
160 |
-
return img
|
161 |
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
-
#
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
prompt, negative,
|
170 |
-
base_steps, base_cfg, base_seed,
|
171 |
-
stylize_steps, stylize_cfg, stylize_seed,
|
172 |
-
size, url, border, back_color,
|
173 |
-
denoise, qr_weight, bright_weight,
|
174 |
-
qr_start, qr_end, bright_start, bright_end,
|
175 |
-
control_blur, repair_strength, feather_px
|
176 |
-
):
|
177 |
-
size = max(384, int(size) // 8 * 8)
|
178 |
-
|
179 |
-
# Stage A: base art (txt2img)
|
180 |
-
p_pos, p_neg = strengthen_qr_prompts(prompt, negative)
|
181 |
-
base_img = sd_generate(p_pos, p_neg, base_steps, base_cfg, base_seed, size=size)
|
182 |
-
|
183 |
-
# Stage B: img2img + ControlNet
|
184 |
-
qr_img = make_qr(url=url, size=size, border=border, back_color=back_color, blur_radius=control_blur)
|
185 |
-
pipe = _load_cn_img2img()
|
186 |
|
187 |
if torch.cuda.is_available():
|
188 |
torch.cuda.empty_cache()
|
189 |
gc.collect()
|
190 |
|
191 |
-
|
192 |
-
if int(stylize_seed) != 0:
|
193 |
-
gen = gen.manual_seed(int(stylize_seed))
|
194 |
-
else:
|
195 |
-
gen = gen.manual_seed(random.randint(0, 2**31 - 1))
|
196 |
-
|
197 |
-
kwargs = dict(
|
198 |
-
prompt=p_pos,
|
199 |
-
negative_prompt=p_neg or NEG_DEFAULT,
|
200 |
-
image=base_img, # init image for img2img
|
201 |
-
control_image=[qr_img, qr_img], # Monster + Brightness
|
202 |
-
strength=float(denoise), # how much we allow change
|
203 |
-
num_inference_steps=int(stylize_steps),
|
204 |
-
guidance_scale=float(stylize_cfg),
|
205 |
-
generator=gen,
|
206 |
-
controlnet_conditioning_scale=[float(qr_weight), float(bright_weight)],
|
207 |
-
width=size, height=size, # (diffusers uses init image size; harmless here)
|
208 |
-
)
|
209 |
-
|
210 |
-
try:
|
211 |
-
out = pipe(
|
212 |
-
**kwargs,
|
213 |
-
control_guidance_start=[float(qr_start), float(bright_start)],
|
214 |
-
control_guidance_end=[float(qr_end), float(bright_end)],
|
215 |
-
)
|
216 |
-
except TypeError:
|
217 |
out = pipe(
|
218 |
-
|
219 |
-
|
220 |
-
|
|
|
|
|
|
|
|
|
221 |
)
|
|
|
222 |
|
223 |
-
img = out.images[0]
|
224 |
-
|
225 |
-
# Optional post repair to push blacks/whites where modules demand
|
226 |
-
img = enforce_qr_contrast(img, qr_img, strength=float(repair_strength), feather=float(feather_px))
|
227 |
-
return img, base_img, qr_img
|
228 |
|
229 |
-
#
|
230 |
-
# UI (
|
231 |
-
#
|
232 |
with gr.Blocks() as demo:
|
233 |
-
gr.Markdown("##
|
234 |
-
|
235 |
-
with gr.Tab("
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
border = gr.Slider(4, 20, value=12, step=1, label="QR border (quiet zone)")
|
266 |
-
back_color = gr.ColorPicker(value="#808080", label="QR background (mid-gray blends better)")
|
267 |
-
control_blur = gr.Slider(0.0, 3.0, value=1.2, step=0.1, label="Soften control (Gaussian blur radius)")
|
268 |
-
repair_strength = gr.Slider(0.0, 1.0, value=0.65, step=0.05, label="Post repair strength")
|
269 |
-
feather_px = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)")
|
270 |
-
|
271 |
-
go = gr.Button("Generate QR Art", variant="primary")
|
272 |
-
|
273 |
-
with gr.Column():
|
274 |
-
final_img = gr.Image(label="Final stylized QR")
|
275 |
-
base_img = gr.Image(label="Base art (Stage A)")
|
276 |
-
ctrl_img = gr.Image(label="Control image (QR used)")
|
277 |
-
|
278 |
-
go.click(
|
279 |
-
qr_art_two_stage,
|
280 |
-
inputs=[prompt, negative,
|
281 |
-
base_steps, base_cfg, base_seed,
|
282 |
-
stylize_steps, stylize_cfg, stylize_seed,
|
283 |
-
size, url, border, back_color,
|
284 |
-
denoise, qr_weight, bright_weight,
|
285 |
-
qr_start, qr_end, bright_start, bright_end,
|
286 |
-
control_blur, repair_strength, feather_px],
|
287 |
-
outputs=[final_img, base_img, ctrl_img]
|
288 |
)
|
289 |
|
290 |
if __name__ == "__main__":
|
291 |
-
demo.launch(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, gc, random
|
|
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
import qrcode
|
6 |
from qrcode.constants import ERROR_CORRECT_H
|
7 |
|
8 |
import torch
|
9 |
+
import spaces # <- ZeroGPU decorator
|
10 |
+
|
11 |
from diffusers import (
|
12 |
StableDiffusionPipeline,
|
13 |
+
StableDiffusionControlNetPipeline,
|
14 |
ControlNetModel,
|
|
|
15 |
)
|
16 |
+
from diffusers.schedulers.scheduling_euler_discrete import EulerDiscreteScheduler
|
17 |
+
from controlnet_aux import CannyDetector
|
18 |
+
|
19 |
+
# -----------------------------
|
20 |
+
# Versions / env
|
21 |
+
# -----------------------------
|
22 |
+
TORCH_DTYPE = torch.float16 # Spaces GPU slice supports fp16 well
|
23 |
+
|
24 |
+
# Optional (private models): set HF_TOKEN in Space secrets
|
25 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
26 |
+
AUTH = {"token": HF_TOKEN} if HF_TOKEN else {}
|
27 |
+
|
28 |
+
# -----------------------------
|
29 |
+
# Global caches (lazy)
|
30 |
+
# -----------------------------
|
31 |
+
_sd_txt = {"pipe": None}
|
32 |
+
_sd_cn = {"pipe": None, "canny": None}
|
33 |
+
|
34 |
+
BASE_15 = "runwayml/stable-diffusion-v1-5"
|
35 |
+
CN_CANNY_15 = "lllyasviel/sd-controlnet-canny"
|
36 |
+
CN_TILE_15 = "lllyasviel/control_v11f1e_sd15_tile"
|
37 |
+
|
38 |
+
NEG_DEFAULT = "lowres, low contrast, blurry, jpeg artifacts, worst quality, bad anatomy, extra digits"
|
39 |
+
|
40 |
+
|
41 |
+
# -----------------------------
|
42 |
+
# QR maker (unchanged behavior)
|
43 |
+
# -----------------------------
|
44 |
+
def make_qr(url: str = "http://www.mybirdfire.com", size: int = 512, border: int = 4) -> Image.Image:
|
45 |
+
qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border))
|
46 |
+
qr.add_data(url.strip())
|
47 |
+
qr.make(fit=True)
|
48 |
+
img = qr.make_image(fill_color="black", back_color="white").convert("RGB")
|
49 |
+
return img.resize((int(size), int(size)), resample=Image.NEAREST)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
+
# -----------------------------
|
53 |
+
# Lazy loaders (Spaces-safe)
|
54 |
+
# -----------------------------
|
55 |
+
def _get_sd15_txt2img():
|
56 |
+
if _sd_txt["pipe"] is None:
|
57 |
pipe = StableDiffusionPipeline.from_pretrained(
|
58 |
BASE_15,
|
59 |
+
torch_dtype=TORCH_DTYPE,
|
60 |
safety_checker=None,
|
61 |
use_safetensors=True,
|
62 |
low_cpu_mem_usage=True,
|
63 |
+
**AUTH
|
64 |
)
|
65 |
+
# Memory savers — ok to call before GPU is attached
|
66 |
+
pipe.enable_attention_slicing()
|
67 |
+
pipe.enable_vae_slicing()
|
68 |
+
pipe.enable_model_cpu_offload()
|
69 |
+
_sd_txt["pipe"] = pipe
|
70 |
+
return _sd_txt["pipe"]
|
71 |
+
|
72 |
+
def _get_sd15_canny_tile():
|
73 |
+
if _sd_cn["pipe"] is None:
|
74 |
+
canny = ControlNetModel.from_pretrained(CN_CANNY_15, torch_dtype=TORCH_DTYPE, use_safetensors=True, **AUTH)
|
75 |
+
tile = ControlNetModel.from_pretrained(CN_TILE_15, torch_dtype=TORCH_DTYPE, use_safetensors=True, **AUTH)
|
76 |
+
|
77 |
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
|
|
78 |
BASE_15,
|
79 |
+
controlnet=[canny, tile],
|
80 |
+
torch_dtype=TORCH_DTYPE,
|
81 |
safety_checker=None,
|
82 |
use_safetensors=True,
|
83 |
low_cpu_mem_usage=True,
|
84 |
+
**AUTH
|
85 |
)
|
86 |
+
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
87 |
+
pipe.enable_attention_slicing()
|
88 |
+
pipe.enable_vae_slicing()
|
89 |
+
pipe.enable_model_cpu_offload()
|
90 |
+
|
91 |
+
_sd_cn["pipe"] = pipe
|
92 |
+
_sd_cn["canny"] = CannyDetector()
|
93 |
+
return _sd_cn["pipe"], _sd_cn["canny"]
|
94 |
+
|
95 |
+
|
96 |
+
# -----------------------------
|
97 |
+
# SD 1.5 (prompt-only)
|
98 |
+
# -----------------------------
|
99 |
+
@spaces.GPU(duration=120)
|
100 |
+
def sd_generate(prompt, negative, steps, guidance, seed):
|
101 |
+
pipe = _get_sd15_txt2img()
|
102 |
+
|
103 |
+
# Reproducible generator on CUDA (available during @GPU call)
|
104 |
+
g = torch.Generator(device="cuda")
|
105 |
+
g = g.manual_seed(int(seed)) if int(seed) != 0 else g.manual_seed(random.randint(0, 2**31 - 1))
|
106 |
|
107 |
if torch.cuda.is_available():
|
108 |
torch.cuda.empty_cache()
|
109 |
gc.collect()
|
110 |
|
111 |
+
with torch.autocast(device_type="cuda", dtype=TORCH_DTYPE):
|
112 |
+
out = pipe(
|
113 |
+
prompt=str(prompt),
|
114 |
+
negative_prompt=(negative or ""),
|
115 |
+
num_inference_steps=int(steps),
|
116 |
+
guidance_scale=float(guidance),
|
117 |
+
generator=g
|
118 |
+
)
|
119 |
return out.images[0]
|
120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
+
# -----------------------------
|
123 |
+
# Stylizer (SD1.5 + ControlNet canny + tile)
|
124 |
+
# -----------------------------
|
125 |
+
@spaces.GPU(duration=180)
|
126 |
+
def stylize_qr_sd15(prompt: str, negative: str, steps: int, guidance: float, seed: int,
|
127 |
+
canny_low: int, canny_high: int, border: int):
|
128 |
+
|
129 |
+
pipe, canny = _get_sd15_canny_tile()
|
130 |
+
|
131 |
+
# Fresh QR → edges
|
132 |
+
qr_img = make_qr("http://www.mybirdfire.com", size=512, border=int(border))
|
133 |
+
edges = canny(qr_img, low_threshold=int(canny_low), high_threshold=int(canny_high))
|
134 |
|
135 |
+
# Control weights (canny, tile). Tune to taste.
|
136 |
+
cn_scales = [1.2, 0.6]
|
137 |
+
|
138 |
+
g = torch.Generator(device="cuda")
|
139 |
+
g = g.manual_seed(int(seed)) if int(seed) != 0 else g.manual_seed(random.randint(0, 2**31 - 1))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
if torch.cuda.is_available():
|
142 |
torch.cuda.empty_cache()
|
143 |
gc.collect()
|
144 |
|
145 |
+
with torch.autocast(device_type="cuda", dtype=TORCH_DTYPE):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
out = pipe(
|
147 |
+
prompt=str(prompt),
|
148 |
+
negative_prompt=(negative or NEG_DEFAULT),
|
149 |
+
image=[edges, qr_img], # txt2img ControlNet: control images
|
150 |
+
controlnet_conditioning_scale=cn_scales,
|
151 |
+
num_inference_steps=int(steps),
|
152 |
+
guidance_scale=float(guidance),
|
153 |
+
generator=g
|
154 |
)
|
155 |
+
return out.images[0]
|
156 |
|
|
|
|
|
|
|
|
|
|
|
157 |
|
158 |
+
# -----------------------------
|
159 |
+
# UI (same layout as yours)
|
160 |
+
# -----------------------------
|
161 |
with gr.Blocks() as demo:
|
162 |
+
gr.Markdown("## Stable Diffusion + QR Code + ControlNet (SD1.5) — ZeroGPU")
|
163 |
+
|
164 |
+
with gr.Tab("Stable Diffusion (prompt → image)"):
|
165 |
+
prompt = gr.Textbox(label="Prompt", value="Sky, Moon, Bird, Blue, In the dark, Goddess, Sweet, Beautiful, Fantasy, Art, Anime")
|
166 |
+
negative = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, worst quality")
|
167 |
+
steps = gr.Slider(10, 50, value=30, label="Steps", step=1)
|
168 |
+
cfg = gr.Slider(1, 12, value=7.0, label="Guidance Scale", step=0.1)
|
169 |
+
seed = gr.Number(value=0, label="Seed (0 = random)", precision=0)
|
170 |
+
out_sd = gr.Image(label="Generated Image")
|
171 |
+
gr.Button("Generate").click(sd_generate, [prompt, negative, steps, cfg, seed], out_sd)
|
172 |
+
|
173 |
+
with gr.Tab("QR Maker (mybirdfire)"):
|
174 |
+
url = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com")
|
175 |
+
size = gr.Slider(256, 1024, value=512, step=64, label="Size (px)")
|
176 |
+
quiet = gr.Slider(0, 8, value=4, step=1, label="Border (quiet zone)")
|
177 |
+
out_qr = gr.Image(label="QR Code", type="pil")
|
178 |
+
gr.Button("Generate QR").click(make_qr, [url, size, quiet], out_qr)
|
179 |
+
|
180 |
+
with gr.Tab("QR Stylizer (SD1.5 canny + tile, Euler)"):
|
181 |
+
s_prompt = gr.Textbox(label="Style Prompt", value="Sky, Moon, Bird, Blue, In the dark, Goddess, Sweet, Beautiful, Fantasy, Art, Anime")
|
182 |
+
s_negative = gr.Textbox(label="Negative Prompt", value=NEG_DEFAULT)
|
183 |
+
s_steps = gr.Slider(10, 50, value=28, label="Steps", step=1)
|
184 |
+
s_cfg = gr.Slider(1, 12, value=7.0, label="CFG", step=0.1)
|
185 |
+
s_seed = gr.Number(value=1470713301, label="Seed", precision=0)
|
186 |
+
canny_l = gr.Slider(0, 255, value=80, step=1, label="Canny low")
|
187 |
+
canny_h = gr.Slider(0, 255, value=160, step=1, label="Canny high")
|
188 |
+
s_border = gr.Slider(2, 10, value=6, step=1, label="QR border")
|
189 |
+
out_styl = gr.Image(label="Stylized QR")
|
190 |
+
gr.Button("Stylize").click(
|
191 |
+
stylize_qr_sd15,
|
192 |
+
[s_prompt, s_negative, s_steps, s_cfg, s_seed, canny_l, canny_h, s_border],
|
193 |
+
out_styl
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
)
|
195 |
|
196 |
if __name__ == "__main__":
|
197 |
+
demo.queue(max_size=12, concurrency_count=1).launch(
|
198 |
+
server_name="0.0.0.0",
|
199 |
+
server_port=7860,
|
200 |
+
share=True,
|
201 |
+
show_api=True,
|
202 |
+
analytics_enabled=False
|
203 |
+
)
|
requirements.txt
CHANGED
@@ -3,9 +3,10 @@ diffusers>=0.27.2
|
|
3 |
transformers>=4.42.0
|
4 |
accelerate>=0.31.0
|
5 |
safetensors
|
6 |
-
gradio
|
7 |
qrcode[pil]
|
8 |
Pillow
|
9 |
huggingface-hub
|
|
|
10 |
spaces
|
11 |
numpy
|
|
|
3 |
transformers>=4.42.0
|
4 |
accelerate>=0.31.0
|
5 |
safetensors
|
6 |
+
gradio==4.44.1
|
7 |
qrcode[pil]
|
8 |
Pillow
|
9 |
huggingface-hub
|
10 |
+
controlnet-aux>=0.0.8
|
11 |
spaces
|
12 |
numpy
|