cbensimon HF Staff commited on
Commit
66f5ac6
·
verified ·
1 Parent(s): 05cb184

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -36
app.py CHANGED
@@ -1,7 +1,3 @@
1
- # PyTorch 2.8 (temporary hack)
2
- import os
3
- os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
4
-
5
  import gradio as gr
6
  import numpy as np
7
  import random
@@ -20,39 +16,10 @@ from optimization import optimize_pipeline_
20
  # --- Model Loading ---
21
  dtype = torch.bfloat16
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
23
 
24
- # Load the model pipeline
25
-
26
- # scheduler config needed for the LoRA
27
- # From https://github.com/ModelTC/Qwen-Image-Lightning/blob/342260e8f5468d2f24d084ce04f55e101007118b/generate_with_diffusers.py#L82C9-L97C10
28
- scheduler_config = {
29
- "base_image_seq_len": 256,
30
- "base_shift": math.log(3), # We use shift=3 in distillation
31
- "invert_sigmas": False,
32
- "max_image_seq_len": 8192,
33
- "max_shift": math.log(3), # We use shift=3 in distillation
34
- "num_train_timesteps": 1000,
35
- "shift": 1.0,
36
- "shift_terminal": None, # set shift_terminal to None
37
- "stochastic_sampling": False,
38
- "time_shift_type": "exponential",
39
- "use_beta_sigmas": False,
40
- "use_dynamic_shifting": True,
41
- "use_exponential_sigmas": False,
42
- "use_karras_sigmas": False,
43
- }
44
- scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
45
- pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", scheduler=scheduler, torch_dtype=dtype).to(device)
46
-
47
- # lora loading
48
- pipe.load_lora_weights(
49
- "lightx2v/Qwen-Image-Lightning", weight_name="Qwen-Image-Lightning-8steps-V1.0.safetensors", adapter_name="lightx2v"
50
- )
51
- pipe.set_adapters(["lightx2v"], adapter_weights=[1.])
52
- pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=1., components=["transformer"])
53
- pipe.unload_lora_weights()
54
-
55
- # optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt='prompt')
56
 
57
  # --- UI Constants and Helpers ---
58
  MAX_SEED = np.iinfo(np.int32).max
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
 
16
  # --- Model Loading ---
17
  dtype = torch.bfloat16
18
  device = "cuda" if torch.cuda.is_available() else "cpu"
19
+ pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=dtype).to(device)
20
 
21
+ # --- Ahead-of-time compilation ---
22
+ optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt='prompt')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  # --- UI Constants and Helpers ---
25
  MAX_SEED = np.iinfo(np.int32).max