Spaces:
Running
on
Zero
Running
on
Zero
Update app_turbo.py
Browse files- app_turbo.py +80 -7
app_turbo.py
CHANGED
@@ -126,6 +126,7 @@ def process(
|
|
126 |
input_image: Image.Image,
|
127 |
user_prompt: str,
|
128 |
use_KDS: bool,
|
|
|
129 |
num_particles: int,
|
130 |
positive_prompt: str,
|
131 |
negative_prompt: str,
|
@@ -177,8 +178,8 @@ def process(
|
|
177 |
height=height, width=width,
|
178 |
guidance_scale=cfg_scale, conditioning_scale=1,
|
179 |
start_point='lr', start_steps=999,ram_encoder_hidden_states=ram_encoder_hidden_states,
|
180 |
-
latent_tiled_size=latent_tiled_size, latent_tiled_overlap=latent_tiled_overlap,
|
181 |
-
num_particles=num_particles
|
182 |
).images[0]
|
183 |
|
184 |
if True: # alpha<1.0:
|
@@ -210,8 +211,9 @@ with block:
|
|
210 |
with gr.Row():
|
211 |
with gr.Column():
|
212 |
input_image = gr.Image(type="pil")
|
213 |
-
num_particles = gr.Slider(label="Num of Partickes", minimum=1, maximum=16, step=1, value=
|
214 |
-
|
|
|
215 |
run_button = gr.Button("Run")
|
216 |
with gr.Accordion("Options", open=True):
|
217 |
user_prompt = gr.Textbox(label="User Prompt", value="")
|
@@ -220,8 +222,8 @@ with block:
|
|
220 |
label="Negative Prompt",
|
221 |
value="dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
|
222 |
)
|
223 |
-
cfg_scale = gr.Slider(label="Classifier Free Guidance Scale (Set to 1.0 in sd-turbo)", minimum=1, maximum=
|
224 |
-
num_inference_steps = gr.Slider(label="Inference Steps", minimum=2, maximum=
|
225 |
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=231)
|
226 |
sample_times = gr.Slider(label="Sample Times", minimum=1, maximum=10, step=1, value=1)
|
227 |
latent_tiled_size = gr.Slider(label="Diffusion Tile Size", minimum=128, maximum=480, value=320, step=1)
|
@@ -229,11 +231,82 @@ with block:
|
|
229 |
scale_factor = gr.Number(label="SR Scale", value=4)
|
230 |
with gr.Column():
|
231 |
result_gallery = gr.Gallery(label="Output", show_label=False, elem_id="gallery")
|
232 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
inputs = [
|
234 |
input_image,
|
235 |
user_prompt,
|
236 |
use_KDS,
|
|
|
237 |
num_particles,
|
238 |
positive_prompt,
|
239 |
negative_prompt,
|
|
|
126 |
input_image: Image.Image,
|
127 |
user_prompt: str,
|
128 |
use_KDS: bool,
|
129 |
+
bandwidth: float,
|
130 |
num_particles: int,
|
131 |
positive_prompt: str,
|
132 |
negative_prompt: str,
|
|
|
178 |
height=height, width=width,
|
179 |
guidance_scale=cfg_scale, conditioning_scale=1,
|
180 |
start_point='lr', start_steps=999,ram_encoder_hidden_states=ram_encoder_hidden_states,
|
181 |
+
latent_tiled_size=latent_tiled_size, latent_tiled_overlap=latent_tiled_overlap,
|
182 |
+
use_KDS=use_KDS, bandwidth=bandwidth, num_particles=num_particles
|
183 |
).images[0]
|
184 |
|
185 |
if True: # alpha<1.0:
|
|
|
211 |
with gr.Row():
|
212 |
with gr.Column():
|
213 |
input_image = gr.Image(type="pil")
|
214 |
+
num_particles = gr.Slider(label="Num of Partickes", minimum=1, maximum=16, step=1, value=10)
|
215 |
+
bandwidth = gr.Slider(label="Bandwidth", minimum=0.1, maximum=0.8, step=0.1, value=0.1)
|
216 |
+
use_KDS = gr.Checkbox(label="Use Kernel Density Steering")
|
217 |
run_button = gr.Button("Run")
|
218 |
with gr.Accordion("Options", open=True):
|
219 |
user_prompt = gr.Textbox(label="User Prompt", value="")
|
|
|
222 |
label="Negative Prompt",
|
223 |
value="dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
|
224 |
)
|
225 |
+
cfg_scale = gr.Slider(label="Classifier Free Guidance Scale (Set to 1.0 in sd-turbo)", minimum=1, maximum=10, value=7.5, step=0)
|
226 |
+
num_inference_steps = gr.Slider(label="Inference Steps", minimum=2, maximum=100, value=50, step=1)
|
227 |
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=231)
|
228 |
sample_times = gr.Slider(label="Sample Times", minimum=1, maximum=10, step=1, value=1)
|
229 |
latent_tiled_size = gr.Slider(label="Diffusion Tile Size", minimum=128, maximum=480, value=320, step=1)
|
|
|
231 |
scale_factor = gr.Number(label="SR Scale", value=4)
|
232 |
with gr.Column():
|
233 |
result_gallery = gr.Gallery(label="Output", show_label=False, elem_id="gallery")
|
234 |
+
examples = gr.Examples(
|
235 |
+
examples=[
|
236 |
+
[
|
237 |
+
"preset/datasets/test_datasets/woman.png",
|
238 |
+
"",
|
239 |
+
False,
|
240 |
+
0.1,
|
241 |
+
4,
|
242 |
+
"clean, high-resolution, 8k, best quality, masterpiece",
|
243 |
+
"dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
244 |
+
50,
|
245 |
+
4,
|
246 |
+
7.5,
|
247 |
+
123,
|
248 |
+
320,
|
249 |
+
4,
|
250 |
+
1,
|
251 |
+
],
|
252 |
+
[
|
253 |
+
"preset/datasets/test_datasets/woman.png",
|
254 |
+
"",
|
255 |
+
True,
|
256 |
+
0.1,
|
257 |
+
4,
|
258 |
+
"clean, high-resolution, 8k, best quality, masterpiece",
|
259 |
+
"dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
260 |
+
50,
|
261 |
+
4,
|
262 |
+
7.5,
|
263 |
+
123,
|
264 |
+
320,
|
265 |
+
4,
|
266 |
+
1,
|
267 |
+
],
|
268 |
+
[
|
269 |
+
"preset/datasets/test_datasets/woman.png",
|
270 |
+
"",
|
271 |
+
True,
|
272 |
+
0.1,
|
273 |
+
16,
|
274 |
+
"clean, high-resolution, 8k, best quality, masterpiece",
|
275 |
+
"dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
|
276 |
+
50,
|
277 |
+
4,
|
278 |
+
7.5,
|
279 |
+
123,
|
280 |
+
320,
|
281 |
+
4,
|
282 |
+
1,
|
283 |
+
],
|
284 |
+
],
|
285 |
+
inputs=[
|
286 |
+
input_image,
|
287 |
+
user_prompt,
|
288 |
+
use_KDS,
|
289 |
+
bandwidth,
|
290 |
+
num_particles,
|
291 |
+
positive_prompt,
|
292 |
+
negative_prompt,
|
293 |
+
num_inference_steps,
|
294 |
+
scale_factor,
|
295 |
+
cfg_scale,
|
296 |
+
seed,
|
297 |
+
latent_tiled_size,
|
298 |
+
latent_tiled_overlap,
|
299 |
+
sample_times,
|
300 |
+
],
|
301 |
+
outputs=[result_gallery],
|
302 |
+
fn=process,
|
303 |
+
cache_examples=True,
|
304 |
+
)
|
305 |
inputs = [
|
306 |
input_image,
|
307 |
user_prompt,
|
308 |
use_KDS,
|
309 |
+
bandwidth,
|
310 |
num_particles,
|
311 |
positive_prompt,
|
312 |
negative_prompt,
|