linoyts HF Staff commited on
Commit
883274d
·
verified ·
1 Parent(s): 285726a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -18
app.py CHANGED
@@ -11,6 +11,10 @@ from diffusers import QwenImageEditPipeline, FlowMatchEulerDiscreteScheduler
11
  from huggingface_hub import InferenceClient
12
  import math
13
 
 
 
 
 
14
  # --- Prompt Enhancement using Hugging Face InferenceClient ---
15
  def polish_prompt_hf(original_prompt, system_prompt):
16
  """
@@ -158,24 +162,11 @@ scheduler_config = {
158
  # Initialize scheduler with Lightning config
159
  scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
160
 
161
- # Load the edit pipeline with Lightning scheduler
162
- pipe = QwenImageEditPipeline.from_pretrained(
163
- "Qwen/Qwen-Image-Edit",
164
- scheduler=scheduler,
165
- torch_dtype=dtype
166
- ).to(device)
167
-
168
- # Load Lightning LoRA weights for acceleration
169
- try:
170
- pipe.load_lora_weights(
171
- "lightx2v/Qwen-Image-Lightning",
172
- weight_name="Qwen-Image-Lightning-8steps-V1.1.safetensors"
173
- )
174
- pipe.fuse_lora()
175
- print("Successfully loaded Lightning LoRA weights")
176
- except Exception as e:
177
- print(f"Warning: Could not load Lightning LoRA weights: {e}")
178
- print("Continuing with base model...")
179
 
180
  # --- UI Constants and Helpers ---
181
  MAX_SEED = np.iinfo(np.int32).max
 
11
  from huggingface_hub import InferenceClient
12
  import math
13
 
14
+ from optimization import optimize_pipeline_
15
+ from qwenimage.pipeline_qwen_image_edit import QwenImageEditPipeline
16
+ from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
17
+
18
  # --- Prompt Enhancement using Hugging Face InferenceClient ---
19
  def polish_prompt_hf(original_prompt, system_prompt):
20
  """
 
162
  # Initialize scheduler with Lightning config
163
  scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
164
 
165
+ pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", scheduler=scheduler,torch_dtype=dtype).to(device)
166
+ pipe.transformer.__class__ = QwenImageTransformer2DModel
167
+
168
+ # --- Ahead-of-time compilation ---
169
+ optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt="prompt")
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
  # --- UI Constants and Helpers ---
172
  MAX_SEED = np.iinfo(np.int32).max