Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,10 @@ import spaces
|
|
6 |
|
7 |
from PIL import Image
|
8 |
from diffusers import QwenImageEditPipeline
|
|
|
|
|
|
|
|
|
9 |
|
10 |
import os
|
11 |
|
@@ -14,7 +18,25 @@ dtype = torch.bfloat16
|
|
14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
15 |
|
16 |
# Load the model pipeline
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
# --- UI Constants and Helpers ---
|
20 |
MAX_SEED = np.iinfo(np.int32).max
|
@@ -28,7 +50,7 @@ def infer(
|
|
28 |
randomize_seed=False,
|
29 |
guidance_scale=4.0,
|
30 |
true_guidance_scale=1.0,
|
31 |
-
num_inference_steps=
|
32 |
progress=gr.Progress(track_tqdm=True),
|
33 |
):
|
34 |
"""
|
@@ -108,7 +130,7 @@ with gr.Blocks(css=css) as demo:
|
|
108 |
minimum=0.0,
|
109 |
maximum=10.0,
|
110 |
step=0.1,
|
111 |
-
value=
|
112 |
)
|
113 |
|
114 |
true_guidance_scale = gr.Slider(
|
@@ -124,7 +146,7 @@ with gr.Blocks(css=css) as demo:
|
|
124 |
minimum=1,
|
125 |
maximum=50,
|
126 |
step=1,
|
127 |
-
value=
|
128 |
)
|
129 |
|
130 |
# gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|
|
|
6 |
|
7 |
from PIL import Image
|
8 |
from diffusers import QwenImageEditPipeline
|
9 |
+
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
|
10 |
+
import torch
|
11 |
+
import math
|
12 |
+
|
13 |
|
14 |
import os
|
15 |
|
|
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
|
20 |
# Load the model pipeline
|
21 |
+
# From https://github.com/ModelTC/Qwen-Image-Lightning/blob/342260e8f5468d2f24d084ce04f55e101007118b/generate_with_diffusers.py#L82C9-L97C10
|
22 |
+
scheduler_config = {
|
23 |
+
"base_image_seq_len": 256,
|
24 |
+
"base_shift": math.log(3), # We use shift=3 in distillation
|
25 |
+
"invert_sigmas": False,
|
26 |
+
"max_image_seq_len": 8192,
|
27 |
+
"max_shift": math.log(3), # We use shift=3 in distillation
|
28 |
+
"num_train_timesteps": 1000,
|
29 |
+
"shift": 1.0,
|
30 |
+
"shift_terminal": None, # set shift_terminal to None
|
31 |
+
"stochastic_sampling": False,
|
32 |
+
"time_shift_type": "exponential",
|
33 |
+
"use_beta_sigmas": False,
|
34 |
+
"use_dynamic_shifting": True,
|
35 |
+
"use_exponential_sigmas": False,
|
36 |
+
"use_karras_sigmas": False,
|
37 |
+
}
|
38 |
+
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
|
39 |
+
pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", scheduler=scheduler, torch_dtype=dtype).to(device)
|
40 |
|
41 |
# --- UI Constants and Helpers ---
|
42 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
50 |
randomize_seed=False,
|
51 |
guidance_scale=4.0,
|
52 |
true_guidance_scale=1.0,
|
53 |
+
num_inference_steps=8,
|
54 |
progress=gr.Progress(track_tqdm=True),
|
55 |
):
|
56 |
"""
|
|
|
130 |
minimum=0.0,
|
131 |
maximum=10.0,
|
132 |
step=0.1,
|
133 |
+
value=1.0,
|
134 |
)
|
135 |
|
136 |
true_guidance_scale = gr.Slider(
|
|
|
146 |
minimum=1,
|
147 |
maximum=50,
|
148 |
step=1,
|
149 |
+
value=8,
|
150 |
)
|
151 |
|
152 |
# gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|