Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -18,6 +18,8 @@ dtype = torch.bfloat16
|
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
|
20 |
# Load the model pipeline
|
|
|
|
|
21 |
# From https://github.com/ModelTC/Qwen-Image-Lightning/blob/342260e8f5468d2f24d084ce04f55e101007118b/generate_with_diffusers.py#L82C9-L97C10
|
22 |
scheduler_config = {
|
23 |
"base_image_seq_len": 256,
|
@@ -38,6 +40,14 @@ scheduler_config = {
|
|
38 |
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
|
39 |
pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", scheduler=scheduler, torch_dtype=dtype).to(device)
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
# --- UI Constants and Helpers ---
|
42 |
MAX_SEED = np.iinfo(np.int32).max
|
43 |
|
|
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
|
20 |
# Load the model pipeline
|
21 |
+
|
22 |
+
# scheduler config needed for the LoRA
|
23 |
# From https://github.com/ModelTC/Qwen-Image-Lightning/blob/342260e8f5468d2f24d084ce04f55e101007118b/generate_with_diffusers.py#L82C9-L97C10
|
24 |
scheduler_config = {
|
25 |
"base_image_seq_len": 256,
|
|
|
40 |
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
|
41 |
pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", scheduler=scheduler, torch_dtype=dtype).to(device)
|
42 |
|
43 |
+
# lora loading
|
44 |
+
pipe.load_lora_weights(
|
45 |
+
"lightx2v/Qwen-Image-Lightning", weight_name="Qwen-Image-Lightning-8steps-V1.0.safetensors", adapter_name="lightx2v"
|
46 |
+
)
|
47 |
+
pipeline.set_adapters(["lightx2v"], adapter_weights=[1.])
|
48 |
+
pipeline.fuse_lora(adapter_names=["lightx2v"], lora_scale=1., components=["transformer"])
|
49 |
+
pipeline.unload_lora_weights()
|
50 |
+
|
51 |
# --- UI Constants and Helpers ---
|
52 |
MAX_SEED = np.iinfo(np.int32).max
|
53 |
|