rahul7star commited on
Commit
53c8156
·
verified ·
1 Parent(s): b29fa42

Create app_double_lora.py

Browse files
Files changed (1) hide show
  1. app_double_lora.py +546 -0
app_double_lora.py ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler
3
+ from diffusers.utils import export_to_video
4
+ from transformers import CLIPVisionModel
5
+ import gradio as gr
6
+ import tempfile
7
+ import spaces
8
+ from huggingface_hub import hf_hub_download
9
+ import numpy as np
10
+ from PIL import Image
11
+ import random
12
+
13
+ # Base MODEL_ID (using original Wan model that's compatible with diffusers)
14
+ MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
15
+
16
+ # FusionX enhancement LoRAs (based on FusionX composition)
17
+ LORA_REPO_ID = "Kijai/WanVideo_comfy"
18
+ LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors"
19
+
20
+ # Additional enhancement LoRAs for FusionX-like quality
21
+ ACCVIDEO_LORA_REPO = "alibaba-pai/Wan2.1-Fun-Reward-LoRAs"
22
+ MPS_LORA_FILENAME = "Wan2.1-MPS-Reward-LoRA.safetensors"
23
+
24
+ # Load enhanced model components
25
+ print("🚀 Loading FusionX Enhanced Wan2.1 I2V Model...")
26
+ image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
27
+ vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
28
+ pipe = WanImageToVideoPipeline.from_pretrained(
29
+ MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
30
+ )
31
+
32
+ # FusionX optimized scheduler settings
33
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
34
+ pipe.to("cuda")
35
+
36
+ # Load FusionX enhancement LoRAs
37
+ lora_adapters = []
38
+ lora_weights = []
39
+
40
+ try:
41
+ # Load CausVid LoRA (strength 1.0 as per FusionX)
42
+ causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
43
+ pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
44
+ lora_adapters.append("causvid_lora")
45
+ lora_weights.append(1.0) # FusionX uses 1.0 for CausVid
46
+ print("✅ CausVid LoRA loaded (strength: 1.0)")
47
+ except Exception as e:
48
+ print(f"⚠️ CausVid LoRA not loaded: {e}")
49
+
50
+ try:
51
+ # Load MPS Rewards LoRA (strength 0.7 as per FusionX)
52
+ mps_path = hf_hub_download(repo_id=ACCVIDEO_LORA_REPO, filename=MPS_LORA_FILENAME)
53
+ pipe.load_lora_weights(mps_path, adapter_name="mps_lora")
54
+ lora_adapters.append("mps_lora")
55
+ lora_weights.append(0.7) # FusionX uses 0.7 for MPS
56
+ print("✅ MPS Rewards LoRA loaded (strength: 0.7)")
57
+ except Exception as e:
58
+ print(f"⚠️ MPS LoRA not loaded: {e}")
59
+
60
+ # Apply LoRA adapters if any were loaded
61
+ if lora_adapters:
62
+ pipe.set_adapters(lora_adapters, adapter_weights=lora_weights)
63
+ pipe.fuse_lora()
64
+ print(f"🔥 FusionX Enhancement Applied: {len(lora_adapters)} LoRAs fused")
65
+ else:
66
+ print("📝 No LoRAs loaded - using base Wan model")
67
+
68
+ MOD_VALUE = 32
69
+ DEFAULT_H_SLIDER_VALUE = 576 # FusionX optimized default
70
+ DEFAULT_W_SLIDER_VALUE = 1024 # FusionX optimized default
71
+ NEW_FORMULA_MAX_AREA = 576.0 * 1024.0 # Updated for FusionX
72
+
73
+ SLIDER_MIN_H, SLIDER_MAX_H = 128, 1080
74
+ SLIDER_MIN_W, SLIDER_MAX_W = 128, 1920
75
+ MAX_SEED = np.iinfo(np.int32).max
76
+
77
+ FIXED_FPS = 24
78
+ MIN_FRAMES_MODEL = 8
79
+ MAX_FRAMES_MODEL = 121 # FusionX supports up to 121 frames
80
+
81
+ # Enhanced prompts for FusionX-style output
82
+ default_prompt_i2v = "Cinematic motion, smooth animation, detailed textures, dynamic lighting, professional cinematography"
83
+ default_negative_prompt = "Static image, no motion, blurred details, overexposed, underexposed, low quality, worst quality, JPEG artifacts, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, watermark, text, signature, three legs, many people in the background, walking backwards"
84
+
85
+ # Enhanced CSS for FusionX theme
86
+ custom_css = """
87
+ /* Enhanced FusionX theme with cinematic styling */
88
+ .gradio-container {
89
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
90
+ background: linear-gradient(135deg, #1a1a2e 0%, #16213e 25%, #0f3460 50%, #533a7d 75%, #6a4c93 100%) !important;
91
+ background-size: 400% 400% !important;
92
+ animation: cinematicShift 20s ease infinite !important;
93
+ }
94
+
95
+ @keyframes cinematicShift {
96
+ 0% { background-position: 0% 50%; }
97
+ 25% { background-position: 100% 50%; }
98
+ 50% { background-position: 100% 100%; }
99
+ 75% { background-position: 0% 100%; }
100
+ 100% { background-position: 0% 50%; }
101
+ }
102
+
103
+ /* Main container with cinematic glass effect */
104
+ .main-container {
105
+ backdrop-filter: blur(15px);
106
+ background: rgba(255, 255, 255, 0.08) !important;
107
+ border-radius: 25px !important;
108
+ padding: 35px !important;
109
+ box-shadow: 0 12px 40px 0 rgba(31, 38, 135, 0.4) !important;
110
+ border: 1px solid rgba(255, 255, 255, 0.15) !important;
111
+ position: relative;
112
+ overflow: hidden;
113
+ }
114
+
115
+ .main-container::before {
116
+ content: '';
117
+ position: absolute;
118
+ top: 0;
119
+ left: 0;
120
+ right: 0;
121
+ bottom: 0;
122
+ background: linear-gradient(45deg, rgba(255,255,255,0.1) 0%, transparent 50%, rgba(255,255,255,0.05) 100%);
123
+ pointer-events: none;
124
+ }
125
+
126
+ /* Enhanced header with FusionX branding */
127
+ h1 {
128
+ background: linear-gradient(45deg, #ffffff, #f0f8ff, #e6e6fa) !important;
129
+ -webkit-background-clip: text !important;
130
+ -webkit-text-fill-color: transparent !important;
131
+ background-clip: text !important;
132
+ font-weight: 900 !important;
133
+ font-size: 2.8rem !important;
134
+ text-align: center !important;
135
+ margin-bottom: 2.5rem !important;
136
+ text-shadow: 2px 2px 8px rgba(0,0,0,0.3) !important;
137
+ position: relative;
138
+ }
139
+
140
+ h1::after {
141
+ content: '🎬 FusionX Enhanced';
142
+ display: block;
143
+ font-size: 1rem;
144
+ color: #6a4c93;
145
+ margin-top: 0.5rem;
146
+ font-weight: 500;
147
+ }
148
+
149
+ /* Enhanced component containers */
150
+ .input-container, .output-container {
151
+ background: rgba(255, 255, 255, 0.06) !important;
152
+ border-radius: 20px !important;
153
+ padding: 25px !important;
154
+ margin: 15px 0 !important;
155
+ backdrop-filter: blur(10px) !important;
156
+ border: 1px solid rgba(255, 255, 255, 0.12) !important;
157
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1) !important;
158
+ }
159
+
160
+ /* Cinematic input styling */
161
+ input, textarea, .gr-box {
162
+ background: rgba(255, 255, 255, 0.95) !important;
163
+ border: 1px solid rgba(106, 76, 147, 0.3) !important;
164
+ border-radius: 12px !important;
165
+ color: #1a1a2e !important;
166
+ transition: all 0.4s ease !important;
167
+ box-shadow: 0 2px 8px rgba(106, 76, 147, 0.1) !important;
168
+ }
169
+
170
+ input:focus, textarea:focus {
171
+ background: rgba(255, 255, 255, 1) !important;
172
+ border-color: #6a4c93 !important;
173
+ box-shadow: 0 0 0 3px rgba(106, 76, 147, 0.15) !important;
174
+ transform: translateY(-1px) !important;
175
+ }
176
+
177
+ /* Enhanced FusionX button */
178
+ .generate-btn {
179
+ background: linear-gradient(135deg, #6a4c93 0%, #533a7d 50%, #0f3460 100%) !important;
180
+ color: white !important;
181
+ font-weight: 700 !important;
182
+ font-size: 1.2rem !important;
183
+ padding: 15px 40px !important;
184
+ border-radius: 60px !important;
185
+ border: none !important;
186
+ cursor: pointer !important;
187
+ transition: all 0.4s ease !important;
188
+ box-shadow: 0 6px 20px rgba(106, 76, 147, 0.4) !important;
189
+ position: relative;
190
+ overflow: hidden;
191
+ }
192
+
193
+ .generate-btn::before {
194
+ content: '';
195
+ position: absolute;
196
+ top: 0;
197
+ left: -100%;
198
+ width: 100%;
199
+ height: 100%;
200
+ background: linear-gradient(90deg, transparent, rgba(255,255,255,0.3), transparent);
201
+ transition: left 0.5s ease;
202
+ }
203
+
204
+ .generate-btn:hover::before {
205
+ left: 100%;
206
+ }
207
+
208
+ .generate-btn:hover {
209
+ transform: translateY(-3px) scale(1.02) !important;
210
+ box-shadow: 0 8px 25px rgba(106, 76, 147, 0.6) !important;
211
+ }
212
+
213
+ /* Enhanced slider styling */
214
+ input[type="range"] {
215
+ background: transparent !important;
216
+ }
217
+
218
+ input[type="range"]::-webkit-slider-track {
219
+ background: linear-gradient(90deg, rgba(106, 76, 147, 0.3), rgba(83, 58, 125, 0.5)) !important;
220
+ border-radius: 8px !important;
221
+ height: 8px !important;
222
+ }
223
+
224
+ input[type="range"]::-webkit-slider-thumb {
225
+ background: linear-gradient(135deg, #6a4c93, #533a7d) !important;
226
+ border: 3px solid white !important;
227
+ border-radius: 50% !important;
228
+ cursor: pointer !important;
229
+ width: 22px !important;
230
+ height: 22px !important;
231
+ -webkit-appearance: none !important;
232
+ box-shadow: 0 2px 8px rgba(106, 76, 147, 0.3) !important;
233
+ }
234
+
235
+ /* Enhanced accordion */
236
+ .gr-accordion {
237
+ background: rgba(255, 255, 255, 0.04) !important;
238
+ border-radius: 15px !important;
239
+ border: 1px solid rgba(255, 255, 255, 0.08) !important;
240
+ margin: 20px 0 !important;
241
+ backdrop-filter: blur(5px) !important;
242
+ }
243
+
244
+ /* Enhanced labels */
245
+ label {
246
+ color: #ffffff !important;
247
+ font-weight: 600 !important;
248
+ font-size: 1rem !important;
249
+ margin-bottom: 8px !important;
250
+ text-shadow: 1px 1px 2px rgba(0,0,0,0.5) !important;
251
+ }
252
+
253
+ /* Enhanced image upload */
254
+ .image-upload {
255
+ border: 3px dashed rgba(106, 76, 147, 0.4) !important;
256
+ border-radius: 20px !important;
257
+ background: rgba(255, 255, 255, 0.03) !important;
258
+ transition: all 0.4s ease !important;
259
+ position: relative;
260
+ }
261
+
262
+ .image-upload:hover {
263
+ border-color: rgba(106, 76, 147, 0.7) !important;
264
+ background: rgba(255, 255, 255, 0.08) !important;
265
+ transform: scale(1.01) !important;
266
+ }
267
+
268
+ /* Enhanced video output */
269
+ video {
270
+ border-radius: 20px !important;
271
+ box-shadow: 0 8px 30px rgba(0, 0, 0, 0.4) !important;
272
+ border: 2px solid rgba(106, 76, 147, 0.3) !important;
273
+ }
274
+
275
+ /* Enhanced examples section */
276
+ .gr-examples {
277
+ background: rgba(255, 255, 255, 0.04) !important;
278
+ border-radius: 20px !important;
279
+ padding: 25px !important;
280
+ margin-top: 25px !important;
281
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
282
+ }
283
+
284
+ /* Enhanced checkbox */
285
+ input[type="checkbox"] {
286
+ accent-color: #6a4c93 !important;
287
+ transform: scale(1.2) !important;
288
+ }
289
+
290
+ /* Responsive enhancements */
291
+ @media (max-width: 768px) {
292
+ h1 { font-size: 2.2rem !important; }
293
+ .main-container { padding: 25px !important; }
294
+ .generate-btn { padding: 12px 30px !important; font-size: 1.1rem !important; }
295
+ }
296
+
297
+ /* Badge container styling */
298
+ .badge-container {
299
+ display: flex;
300
+ justify-content: center;
301
+ gap: 15px;
302
+ margin: 20px 0;
303
+ flex-wrap: wrap;
304
+ }
305
+
306
+ .badge-container img {
307
+ border-radius: 8px;
308
+ transition: transform 0.3s ease;
309
+ }
310
+
311
+ .badge-container img:hover {
312
+ transform: scale(1.05);
313
+ }
314
+ """
315
+
316
+ def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
317
+ min_slider_h, max_slider_h,
318
+ min_slider_w, max_slider_w,
319
+ default_h, default_w):
320
+ orig_w, orig_h = pil_image.size
321
+ if orig_w <= 0 or orig_h <= 0:
322
+ return default_h, default_w
323
+
324
+ aspect_ratio = orig_h / orig_w
325
+
326
+ calc_h = round(np.sqrt(calculation_max_area * aspect_ratio))
327
+ calc_w = round(np.sqrt(calculation_max_area / aspect_ratio))
328
+
329
+ calc_h = max(mod_val, (calc_h // mod_val) * mod_val)
330
+ calc_w = max(mod_val, (calc_w // mod_val) * mod_val)
331
+
332
+ new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val))
333
+ new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val))
334
+
335
+ return new_h, new_w
336
+
337
+ def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_w_val):
338
+ if uploaded_pil_image is None:
339
+ return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
340
+ try:
341
+ new_h, new_w = _calculate_new_dimensions_wan(
342
+ uploaded_pil_image, MOD_VALUE, NEW_FORMULA_MAX_AREA,
343
+ SLIDER_MIN_H, SLIDER_MAX_H, SLIDER_MIN_W, SLIDER_MAX_W,
344
+ DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE
345
+ )
346
+ return gr.update(value=new_h), gr.update(value=new_w)
347
+ except Exception as e:
348
+ gr.Warning("Error attempting to calculate new dimensions")
349
+ return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
350
+
351
+ def get_duration(input_image, prompt, height, width,
352
+ negative_prompt, duration_seconds,
353
+ guidance_scale, steps,
354
+ seed, randomize_seed,
355
+ progress):
356
+ # FusionX optimized duration calculation
357
+ if steps > 8 and duration_seconds > 3:
358
+ return 100
359
+ elif steps > 8 or duration_seconds > 3:
360
+ return 80
361
+ else:
362
+ return 65
363
+
364
+ @spaces.GPU(duration=get_duration)
365
+ def generate_video(input_image, prompt, height, width,
366
+ negative_prompt=default_negative_prompt, duration_seconds=3,
367
+ guidance_scale=1, steps=8, # FusionX optimized default
368
+ seed=42, randomize_seed=False,
369
+ progress=gr.Progress(track_tqdm=True)):
370
+
371
+ if input_image is None:
372
+ raise gr.Error("Please upload an input image.")
373
+
374
+ target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
375
+ target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
376
+
377
+ num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
378
+
379
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
380
+
381
+ resized_image = input_image.resize((target_w, target_h))
382
+
383
+ # Enhanced prompt for FusionX-style output
384
+ enhanced_prompt = f"{prompt}, cinematic quality, smooth motion, detailed animation, dynamic lighting"
385
+
386
+ with torch.inference_mode():
387
+ output_frames_list = pipe(
388
+ image=resized_image,
389
+ prompt=enhanced_prompt,
390
+ negative_prompt=negative_prompt,
391
+ height=target_h,
392
+ width=target_w,
393
+ num_frames=num_frames,
394
+ guidance_scale=float(guidance_scale),
395
+ num_inference_steps=int(steps),
396
+ generator=torch.Generator(device="cuda").manual_seed(current_seed)
397
+ ).frames[0]
398
+
399
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
400
+ video_path = tmpfile.name
401
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
402
+ return video_path, current_seed
403
+
404
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
405
+ with gr.Column(elem_classes=["main-container"]):
406
+ gr.Markdown("# ⚡ FusionX Enhanced Wan 2.1 I2V (14B)")
407
+
408
+ # Enhanced badges for FusionX
409
+ gr.HTML("""
410
+ <div class="badge-container">
411
+ <a href="https://huggingface.co/vrgamedevgirl84/Wan14BT2VFusioniX" target="_blank">
412
+ <img src="https://img.shields.io/static/v1?label=FusionX&message=ENHANCED%20MODEL&color=%236a4c93&labelColor=%23533a7d&logo=huggingface&logoColor=%23ffffff&style=for-the-badge" alt="FusionX Enhanced">
413
+ </a>
414
+ <a href="https://huggingface.co/spaces/Heartsync/WAN2-1-fast-T2V-FusioniX" target="_blank">
415
+ <img src="https://img.shields.io/static/v1?label=BASE&message=WAN%202.1%20T2V-FusioniX&color=%23008080&labelColor=%23533a7d&logo=huggingface&logoColor=%23ffffff&style=for-the-badge" alt="Base Model">
416
+ </a>
417
+ <a href="https://huggingface.co/spaces/Heartsync/WAN2-1-fast-T2V-FusioniX2" target="_blank">
418
+ <img src="https://img.shields.io/static/v1?label=BASE&message=WAN%202.1%20T2V-Fusioni2X&color=%23008080&labelColor=%23533a7d&logo=huggingface&logoColor=%23ffffff&style=for-the-badge" alt="Base Model">
419
+ </a>
420
+ <a href="https://huggingface.co/spaces/Heartsync/wan2-1-fast-security" target="_blank">
421
+ <img src="https://img.shields.io/static/v1?label=WAN%202.1&message=FAST%20%26%20Furios&color=%23008080&labelColor=%230000ff&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
422
+ </a>
423
+ </div>
424
+ """)
425
+
426
+
427
+ with gr.Row():
428
+ with gr.Column(elem_classes=["input-container"]):
429
+ input_image_component = gr.Image(
430
+ type="pil",
431
+ label="🖼️ Input Image (auto-resized to target H/W)",
432
+ elem_classes=["image-upload"]
433
+ )
434
+ prompt_input = gr.Textbox(
435
+ label="✏️ Enhanced Prompt (FusionX-style enhancements applied)",
436
+ value=default_prompt_i2v,
437
+ lines=3
438
+ )
439
+ duration_seconds_input = gr.Slider(
440
+ minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1),
441
+ maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1),
442
+ step=0.1,
443
+ value=2,
444
+ label="⏱️ Duration (seconds)",
445
+ info=f"FusionX Enhanced supports {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps. Recommended: 2-5 seconds"
446
+ )
447
+
448
+ with gr.Accordion("⚙️ Advanced FusionX Settings", open=False):
449
+ negative_prompt_input = gr.Textbox(
450
+ label="❌ Negative Prompt (FusionX Enhanced)",
451
+ value=default_negative_prompt,
452
+ lines=4
453
+ )
454
+ seed_input = gr.Slider(
455
+ label="🎲 Seed",
456
+ minimum=0,
457
+ maximum=MAX_SEED,
458
+ step=1,
459
+ value=42,
460
+ interactive=True
461
+ )
462
+ randomize_seed_checkbox = gr.Checkbox(
463
+ label="🔀 Randomize seed",
464
+ value=True,
465
+ interactive=True
466
+ )
467
+ with gr.Row():
468
+ height_input = gr.Slider(
469
+ minimum=SLIDER_MIN_H,
470
+ maximum=SLIDER_MAX_H,
471
+ step=MOD_VALUE,
472
+ value=DEFAULT_H_SLIDER_VALUE,
473
+ label=f"📏 Output Height (FusionX optimized: {MOD_VALUE} multiples)"
474
+ )
475
+ width_input = gr.Slider(
476
+ minimum=SLIDER_MIN_W,
477
+ maximum=SLIDER_MAX_W,
478
+ step=MOD_VALUE,
479
+ value=DEFAULT_W_SLIDER_VALUE,
480
+ label=f"📐 Output Width (FusionX optimized: {MOD_VALUE} multiples)"
481
+ )
482
+ steps_slider = gr.Slider(
483
+ minimum=1,
484
+ maximum=20,
485
+ step=1,
486
+ value=8, # FusionX optimized
487
+ label="🚀 Inference Steps (FusionX Enhanced: 8-10 recommended)",
488
+ info="FusionX Enhanced delivers excellent results in just 8-10 steps!"
489
+ )
490
+ guidance_scale_input = gr.Slider(
491
+ minimum=0.0,
492
+ maximum=20.0,
493
+ step=0.5,
494
+ value=1.0,
495
+ label="🎯 Guidance Scale (FusionX optimized)",
496
+ visible=False
497
+ )
498
+
499
+ generate_button = gr.Button(
500
+ "🎬 Generate FusionX Enhanced Video",
501
+ variant="primary",
502
+ elem_classes=["generate-btn"]
503
+ )
504
+
505
+ with gr.Column(elem_classes=["output-container"]):
506
+ video_output = gr.Video(
507
+ label="🎥 FusionX Enhanced Generated Video",
508
+ autoplay=True,
509
+ interactive=False
510
+ )
511
+
512
+ input_image_component.upload(
513
+ fn=handle_image_upload_for_dims_wan,
514
+ inputs=[input_image_component, height_input, width_input],
515
+ outputs=[height_input, width_input]
516
+ )
517
+
518
+ input_image_component.clear(
519
+ fn=handle_image_upload_for_dims_wan,
520
+ inputs=[input_image_component, height_input, width_input],
521
+ outputs=[height_input, width_input]
522
+ )
523
+
524
+ ui_inputs = [
525
+ input_image_component, prompt_input, height_input, width_input,
526
+ negative_prompt_input, duration_seconds_input,
527
+ guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
528
+ ]
529
+ generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
530
+
531
+ with gr.Column():
532
+ gr.Examples(
533
+ examples=[
534
+ ["peng.png", "a penguin gracefully dancing in the pristine snow, cinematic motion with detailed feathers", 576, 576],
535
+ ["frog.jpg", "the frog jumps energetically with smooth, lifelike motion and detailed texture", 576, 576],
536
+ ],
537
+ inputs=[input_image_component, prompt_input, height_input, width_input],
538
+ outputs=[video_output, seed_input],
539
+ fn=generate_video,
540
+ cache_examples="lazy",
541
+ label="🌟 FusionX Enhanced Example Gallery"
542
+ )
543
+
544
+
545
+ if __name__ == "__main__":
546
+ demo.queue().launch()