rahul7star commited on
Commit
fabf8dc
·
verified ·
1 Parent(s): 3f60b26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +439 -94
app.py CHANGED
@@ -9,69 +9,302 @@ from huggingface_hub import hf_hub_download
9
  import numpy as np
10
  from PIL import Image
11
  import random
12
- import random
13
- import logging
14
- import torchaudio
15
- import os
16
- import gc
17
-
18
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512'
19
- os.environ['HF_HUB_CACHE'] = '/tmp/hub' # Use temp directory to avoid filling persistent storage
20
-
21
 
22
- from diffusers import UniPCMultistepScheduler
23
- from transformers import CLIPTextModel, CLIPTokenizer
24
 
25
- from safetensors.torch import load_file
26
- from huggingface_hub import hf_hub_download
27
- import torch
28
 
29
- # --- Base model setup (Wan T2V) ---
30
- MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-Diffusers"
31
- LORA_FILENAME = "FusionX_LoRa/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
32
 
33
- print("🚀 Loading Wan2.1 T2V base pipeline...")
 
 
34
 
 
 
 
 
35
  pipe = WanImageToVideoPipeline.from_pretrained(
36
- MODEL_ID,
37
- torch_dtype=torch.bfloat16,
38
  )
39
 
40
- # Optional: replace scheduler for more stable generation
41
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
42
  pipe.to("cuda")
43
 
44
- # --- Load FusionX-style LoRA ---
45
- print("🔧 Loading FusionX LoRA...")
 
 
46
  try:
47
- lora_path = hf_hub_download(repo_id="FusionX_LoRa", filename=LORA_FILENAME)
48
- pipe.load_lora_weights(lora_path, adapter_name="fusionx_lora")
49
- pipe.set_adapters(["fusionx_lora"], adapter_weights=[1.0])
50
- pipe.fuse_lora()
51
- print("✅ FusionX LoRA applied (strength: 1.0)")
 
52
  except Exception as e:
53
- print(f"⚠️ Failed to load FusionX LoRA: {e}")
 
54
 
55
- # --- Ready to generate ---
56
- print("✨ T2V model ready for text-to-video generation!")
57
 
 
 
 
 
 
 
 
58
 
59
  MOD_VALUE = 32
60
- DEFAULT_H_SLIDER_VALUE = 512
61
- DEFAULT_W_SLIDER_VALUE = 896
62
- NEW_FORMULA_MAX_AREA = 480.0 * 832.0
63
 
64
- SLIDER_MIN_H, SLIDER_MAX_H = 128, 896
65
- SLIDER_MIN_W, SLIDER_MAX_W = 128, 896
66
  MAX_SEED = np.iinfo(np.int32).max
67
 
68
  FIXED_FPS = 24
69
  MIN_FRAMES_MODEL = 8
70
- MAX_FRAMES_MODEL = 81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
- default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
73
- default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
 
 
 
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
77
  min_slider_h, max_slider_h,
@@ -108,11 +341,24 @@ def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_
108
  gr.Warning("Error attempting to calculate new dimensions")
109
  return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
110
 
111
- @spaces.GPU
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  def generate_video(input_image, prompt, height, width,
113
- negative_prompt=default_negative_prompt, duration_seconds = 10,
114
- guidance_scale = 1, steps = 4,
115
- seed = 42, randomize_seed = False,
116
  progress=gr.Progress(track_tqdm=True)):
117
 
118
  if input_image is None:
@@ -127,11 +373,19 @@ def generate_video(input_image, prompt, height, width,
127
 
128
  resized_image = input_image.resize((target_w, target_h))
129
 
 
 
 
130
  with torch.inference_mode():
131
  output_frames_list = pipe(
132
- image=resized_image, prompt=prompt, negative_prompt=negative_prompt,
133
- height=target_h, width=target_w, num_frames=num_frames,
134
- guidance_scale=float(guidance_scale), num_inference_steps=int(steps),
 
 
 
 
 
135
  generator=torch.Generator(device="cuda").manual_seed(current_seed)
136
  ).frames[0]
137
 
@@ -140,55 +394,146 @@ def generate_video(input_image, prompt, height, width,
140
  export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
141
  return video_path, current_seed
142
 
143
- with gr.Blocks() as demo:
144
- gr.Markdown("# Fast 4 steps Wan 2.1 I2V (14B) with CausVid LoRA")
145
- gr.Markdown("[CausVid](https://github.com/tianweiy/CausVid) is a distilled version of Wan 2.1 to run faster in just 4-8 steps, [extracted as LoRA by Kijai](https://huggingface.co/Kijai/WanVideo_comfy/blob/main/Wan21_CausVid_14B_T2V_lora_rank32.safetensors) and is compatible with 🧨 diffusers")
146
- with gr.Row():
147
- with gr.Column():
148
- input_image_component = gr.Image(type="pil", label="Input Image (auto-resized to target H/W)")
149
- prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
150
- duration_seconds_input = gr.Slider(minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1), step=0.1, value=2, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
151
-
152
- with gr.Accordion("Advanced Settings", open=False):
153
- negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
154
- seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
155
- randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
156
- with gr.Row():
157
- height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"Output Height (multiple of {MOD_VALUE})")
158
- width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"Output Width (multiple of {MOD_VALUE})")
159
- steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
160
- guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
161
-
162
- generate_button = gr.Button("Generate Video", variant="primary")
163
- with gr.Column():
164
- video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
165
 
166
- input_image_component.upload(
167
- fn=handle_image_upload_for_dims_wan,
168
- inputs=[input_image_component, height_input, width_input],
169
- outputs=[height_input, width_input]
170
- )
171
-
172
- input_image_component.clear(
173
- fn=handle_image_upload_for_dims_wan,
174
- inputs=[input_image_component, height_input, width_input],
175
- outputs=[height_input, width_input]
176
- )
177
-
178
- ui_inputs = [
179
- input_image_component, prompt_input, height_input, width_input,
180
- negative_prompt_input, duration_seconds_input,
181
- guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
182
- ]
183
- generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
184
-
185
- gr.Examples(
186
- examples=[
187
- ["peng.png", "a penguin playfully dancing in the snow, Antarctica", 896, 512],
188
- ["forg.jpg", "the frog jumps around", 448, 832],
189
- ],
190
- inputs=[input_image_component, prompt_input, height_input, width_input], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
191
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
  if __name__ == "__main__":
194
  demo.queue().launch()
 
9
  import numpy as np
10
  from PIL import Image
11
  import random
 
 
 
 
 
 
 
 
 
12
 
13
+ # Base MODEL_ID (using original Wan model that's compatible with diffusers)
14
+ MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
15
 
16
+ # FusionX enhancement LoRAs (based on FusionX composition)
17
+ LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
18
+ LORA_FILENAME = "FusionX_LoRa/Wan2.1_T2V_14B_FusionX_LoRA.safetensors"
19
 
 
 
 
20
 
21
+ # Additional enhancement LoRAs for FusionX-like quality
22
+ # ACCVIDEO_LORA_REPO = "alibaba-pai/Wan2.1-Fun-Reward-LoRAs"
23
+ # MPS_LORA_FILENAME = "Wan2.1-MPS-Reward-LoRA.safetensors"
24
 
25
+ # Load enhanced model components
26
+ print("🚀 Loading FusionX Enhanced Wan2.1 I2V Model...")
27
+ image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
28
+ vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
29
  pipe = WanImageToVideoPipeline.from_pretrained(
30
+ MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
 
31
  )
32
 
33
+ # FusionX optimized scheduler settings
34
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
35
  pipe.to("cuda")
36
 
37
+ # Load FusionX enhancement LoRAs
38
+ lora_adapters = []
39
+ lora_weights = []
40
+
41
  try:
42
+ # Load CausVid LoRA (strength 1.0 as per FusionX)
43
+ causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
44
+ pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
45
+ lora_adapters.append("causvid_lora")
46
+ lora_weights.append(1.0) # FusionX uses 1.0 for CausVid
47
+ print("✅ CausVid LoRA loaded (strength: 1.0)")
48
  except Exception as e:
49
+ print(f"⚠️ CausVid LoRA not loaded: {e}")
50
+
51
 
 
 
52
 
53
+ # Apply LoRA adapters if any were loaded
54
+ if lora_adapters:
55
+ pipe.set_adapters(lora_adapters, adapter_weights=lora_weights)
56
+ pipe.fuse_lora()
57
+ print(f"🔥 FusionX Enhancement Applied: {len(lora_adapters)} LoRAs fused")
58
+ else:
59
+ print("📝 No LoRAs loaded - using base Wan model")
60
 
61
  MOD_VALUE = 32
62
+ DEFAULT_H_SLIDER_VALUE = 576 # FusionX optimized default
63
+ DEFAULT_W_SLIDER_VALUE = 1024 # FusionX optimized default
64
+ NEW_FORMULA_MAX_AREA = 576.0 * 1024.0 # Updated for FusionX
65
 
66
+ SLIDER_MIN_H, SLIDER_MAX_H = 128, 1080
67
+ SLIDER_MIN_W, SLIDER_MAX_W = 128, 1920
68
  MAX_SEED = np.iinfo(np.int32).max
69
 
70
  FIXED_FPS = 24
71
  MIN_FRAMES_MODEL = 8
72
+ MAX_FRAMES_MODEL = 121 # FusionX supports up to 121 frames
73
+
74
+ # Enhanced prompts for FusionX-style output
75
+ default_prompt_i2v = "Cinematic motion, smooth animation, detailed textures, dynamic lighting, professional cinematography"
76
+ default_negative_prompt = "Static image, no motion, blurred details, overexposed, underexposed, low quality, worst quality, JPEG artifacts, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, watermark, text, signature, three legs, many people in the background, walking backwards"
77
+
78
+ # Enhanced CSS for FusionX theme
79
+ custom_css = """
80
+ /* Enhanced FusionX theme with cinematic styling */
81
+ .gradio-container {
82
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
83
+ background: linear-gradient(135deg, #1a1a2e 0%, #16213e 25%, #0f3460 50%, #533a7d 75%, #6a4c93 100%) !important;
84
+ background-size: 400% 400% !important;
85
+ animation: cinematicShift 20s ease infinite !important;
86
+ }
87
+
88
+ @keyframes cinematicShift {
89
+ 0% { background-position: 0% 50%; }
90
+ 25% { background-position: 100% 50%; }
91
+ 50% { background-position: 100% 100%; }
92
+ 75% { background-position: 0% 100%; }
93
+ 100% { background-position: 0% 50%; }
94
+ }
95
+
96
+ /* Main container with cinematic glass effect */
97
+ .main-container {
98
+ backdrop-filter: blur(15px);
99
+ background: rgba(255, 255, 255, 0.08) !important;
100
+ border-radius: 25px !important;
101
+ padding: 35px !important;
102
+ box-shadow: 0 12px 40px 0 rgba(31, 38, 135, 0.4) !important;
103
+ border: 1px solid rgba(255, 255, 255, 0.15) !important;
104
+ position: relative;
105
+ overflow: hidden;
106
+ }
107
+
108
+ .main-container::before {
109
+ content: '';
110
+ position: absolute;
111
+ top: 0;
112
+ left: 0;
113
+ right: 0;
114
+ bottom: 0;
115
+ background: linear-gradient(45deg, rgba(255,255,255,0.1) 0%, transparent 50%, rgba(255,255,255,0.05) 100%);
116
+ pointer-events: none;
117
+ }
118
+
119
+ /* Enhanced header with FusionX branding */
120
+ h1 {
121
+ background: linear-gradient(45deg, #ffffff, #f0f8ff, #e6e6fa) !important;
122
+ -webkit-background-clip: text !important;
123
+ -webkit-text-fill-color: transparent !important;
124
+ background-clip: text !important;
125
+ font-weight: 900 !important;
126
+ font-size: 2.8rem !important;
127
+ text-align: center !important;
128
+ margin-bottom: 2.5rem !important;
129
+ text-shadow: 2px 2px 8px rgba(0,0,0,0.3) !important;
130
+ position: relative;
131
+ }
132
+
133
+ h1::after {
134
+ content: '🎬 FusionX Enhanced';
135
+ display: block;
136
+ font-size: 1rem;
137
+ color: #6a4c93;
138
+ margin-top: 0.5rem;
139
+ font-weight: 500;
140
+ }
141
+
142
+ /* Enhanced component containers */
143
+ .input-container, .output-container {
144
+ background: rgba(255, 255, 255, 0.06) !important;
145
+ border-radius: 20px !important;
146
+ padding: 25px !important;
147
+ margin: 15px 0 !important;
148
+ backdrop-filter: blur(10px) !important;
149
+ border: 1px solid rgba(255, 255, 255, 0.12) !important;
150
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1) !important;
151
+ }
152
+
153
+ /* Cinematic input styling */
154
+ input, textarea, .gr-box {
155
+ background: rgba(255, 255, 255, 0.95) !important;
156
+ border: 1px solid rgba(106, 76, 147, 0.3) !important;
157
+ border-radius: 12px !important;
158
+ color: #1a1a2e !important;
159
+ transition: all 0.4s ease !important;
160
+ box-shadow: 0 2px 8px rgba(106, 76, 147, 0.1) !important;
161
+ }
162
+
163
+ input:focus, textarea:focus {
164
+ background: rgba(255, 255, 255, 1) !important;
165
+ border-color: #6a4c93 !important;
166
+ box-shadow: 0 0 0 3px rgba(106, 76, 147, 0.15) !important;
167
+ transform: translateY(-1px) !important;
168
+ }
169
+
170
+ /* Enhanced FusionX button */
171
+ .generate-btn {
172
+ background: linear-gradient(135deg, #6a4c93 0%, #533a7d 50%, #0f3460 100%) !important;
173
+ color: white !important;
174
+ font-weight: 700 !important;
175
+ font-size: 1.2rem !important;
176
+ padding: 15px 40px !important;
177
+ border-radius: 60px !important;
178
+ border: none !important;
179
+ cursor: pointer !important;
180
+ transition: all 0.4s ease !important;
181
+ box-shadow: 0 6px 20px rgba(106, 76, 147, 0.4) !important;
182
+ position: relative;
183
+ overflow: hidden;
184
+ }
185
+
186
+ .generate-btn::before {
187
+ content: '';
188
+ position: absolute;
189
+ top: 0;
190
+ left: -100%;
191
+ width: 100%;
192
+ height: 100%;
193
+ background: linear-gradient(90deg, transparent, rgba(255,255,255,0.3), transparent);
194
+ transition: left 0.5s ease;
195
+ }
196
+
197
+ .generate-btn:hover::before {
198
+ left: 100%;
199
+ }
200
+
201
+ .generate-btn:hover {
202
+ transform: translateY(-3px) scale(1.02) !important;
203
+ box-shadow: 0 8px 25px rgba(106, 76, 147, 0.6) !important;
204
+ }
205
+
206
+ /* Enhanced slider styling */
207
+ input[type="range"] {
208
+ background: transparent !important;
209
+ }
210
 
211
+ input[type="range"]::-webkit-slider-track {
212
+ background: linear-gradient(90deg, rgba(106, 76, 147, 0.3), rgba(83, 58, 125, 0.5)) !important;
213
+ border-radius: 8px !important;
214
+ height: 8px !important;
215
+ }
216
 
217
+ input[type="range"]::-webkit-slider-thumb {
218
+ background: linear-gradient(135deg, #6a4c93, #533a7d) !important;
219
+ border: 3px solid white !important;
220
+ border-radius: 50% !important;
221
+ cursor: pointer !important;
222
+ width: 22px !important;
223
+ height: 22px !important;
224
+ -webkit-appearance: none !important;
225
+ box-shadow: 0 2px 8px rgba(106, 76, 147, 0.3) !important;
226
+ }
227
+
228
+ /* Enhanced accordion */
229
+ .gr-accordion {
230
+ background: rgba(255, 255, 255, 0.04) !important;
231
+ border-radius: 15px !important;
232
+ border: 1px solid rgba(255, 255, 255, 0.08) !important;
233
+ margin: 20px 0 !important;
234
+ backdrop-filter: blur(5px) !important;
235
+ }
236
+
237
+ /* Enhanced labels */
238
+ label {
239
+ color: #ffffff !important;
240
+ font-weight: 600 !important;
241
+ font-size: 1rem !important;
242
+ margin-bottom: 8px !important;
243
+ text-shadow: 1px 1px 2px rgba(0,0,0,0.5) !important;
244
+ }
245
+
246
+ /* Enhanced image upload */
247
+ .image-upload {
248
+ border: 3px dashed rgba(106, 76, 147, 0.4) !important;
249
+ border-radius: 20px !important;
250
+ background: rgba(255, 255, 255, 0.03) !important;
251
+ transition: all 0.4s ease !important;
252
+ position: relative;
253
+ }
254
+
255
+ .image-upload:hover {
256
+ border-color: rgba(106, 76, 147, 0.7) !important;
257
+ background: rgba(255, 255, 255, 0.08) !important;
258
+ transform: scale(1.01) !important;
259
+ }
260
+
261
+ /* Enhanced video output */
262
+ video {
263
+ border-radius: 20px !important;
264
+ box-shadow: 0 8px 30px rgba(0, 0, 0, 0.4) !important;
265
+ border: 2px solid rgba(106, 76, 147, 0.3) !important;
266
+ }
267
+
268
+ /* Enhanced examples section */
269
+ .gr-examples {
270
+ background: rgba(255, 255, 255, 0.04) !important;
271
+ border-radius: 20px !important;
272
+ padding: 25px !important;
273
+ margin-top: 25px !important;
274
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
275
+ }
276
+
277
+ /* Enhanced checkbox */
278
+ input[type="checkbox"] {
279
+ accent-color: #6a4c93 !important;
280
+ transform: scale(1.2) !important;
281
+ }
282
+
283
+ /* Responsive enhancements */
284
+ @media (max-width: 768px) {
285
+ h1 { font-size: 2.2rem !important; }
286
+ .main-container { padding: 25px !important; }
287
+ .generate-btn { padding: 12px 30px !important; font-size: 1.1rem !important; }
288
+ }
289
+
290
+ /* Badge container styling */
291
+ .badge-container {
292
+ display: flex;
293
+ justify-content: center;
294
+ gap: 15px;
295
+ margin: 20px 0;
296
+ flex-wrap: wrap;
297
+ }
298
+
299
+ .badge-container img {
300
+ border-radius: 8px;
301
+ transition: transform 0.3s ease;
302
+ }
303
+
304
+ .badge-container img:hover {
305
+ transform: scale(1.05);
306
+ }
307
+ """
308
 
309
  def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
310
  min_slider_h, max_slider_h,
 
341
  gr.Warning("Error attempting to calculate new dimensions")
342
  return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
343
 
344
+ def get_duration(input_image, prompt, height, width,
345
+ negative_prompt, duration_seconds,
346
+ guidance_scale, steps,
347
+ seed, randomize_seed,
348
+ progress):
349
+ # FusionX optimized duration calculation
350
+ if steps > 8 and duration_seconds > 3:
351
+ return 100
352
+ elif steps > 8 or duration_seconds > 3:
353
+ return 80
354
+ else:
355
+ return 65
356
+
357
+ @spaces.GPU(duration=get_duration)
358
  def generate_video(input_image, prompt, height, width,
359
+ negative_prompt=default_negative_prompt, duration_seconds=3,
360
+ guidance_scale=1, steps=8, # FusionX optimized default
361
+ seed=42, randomize_seed=False,
362
  progress=gr.Progress(track_tqdm=True)):
363
 
364
  if input_image is None:
 
373
 
374
  resized_image = input_image.resize((target_w, target_h))
375
 
376
+ # Enhanced prompt for FusionX-style output
377
+ enhanced_prompt = f"{prompt}, cinematic quality, smooth motion, detailed animation, dynamic lighting"
378
+
379
  with torch.inference_mode():
380
  output_frames_list = pipe(
381
+ image=resized_image,
382
+ prompt=enhanced_prompt,
383
+ negative_prompt=negative_prompt,
384
+ height=target_h,
385
+ width=target_w,
386
+ num_frames=num_frames,
387
+ guidance_scale=float(guidance_scale),
388
+ num_inference_steps=int(steps),
389
  generator=torch.Generator(device="cuda").manual_seed(current_seed)
390
  ).frames[0]
391
 
 
394
  export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
395
  return video_path, current_seed
396
 
397
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
398
+ with gr.Column(elem_classes=["main-container"]):
399
+ gr.Markdown("# FusionX Enhanced Wan 2.1 I2V (14B)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
 
401
+ # Enhanced badges for FusionX
402
+ gr.HTML("""
403
+ <div class="badge-container">
404
+ <a href="https://huggingface.co/vrgamedevgirl84/Wan14BT2VFusioniX" target="_blank">
405
+ <img src="https://img.shields.io/static/v1?label=FusionX&message=ENHANCED%20MODEL&color=%236a4c93&labelColor=%23533a7d&logo=huggingface&logoColor=%23ffffff&style=for-the-badge" alt="FusionX Enhanced">
406
+ </a>
407
+ <a href="https://huggingface.co/spaces/Heartsync/WAN2-1-fast-T2V-FusioniX" target="_blank">
408
+ <img src="https://img.shields.io/static/v1?label=BASE&message=WAN%202.1%20T2V-FusioniX&color=%23008080&labelColor=%23533a7d&logo=huggingface&logoColor=%23ffffff&style=for-the-badge" alt="Base Model">
409
+ </a>
410
+ <a href="https://huggingface.co/spaces/Heartsync/WAN2-1-fast-T2V-FusioniX2" target="_blank">
411
+ <img src="https://img.shields.io/static/v1?label=BASE&message=WAN%202.1%20T2V-Fusioni2X&color=%23008080&labelColor=%23533a7d&logo=huggingface&logoColor=%23ffffff&style=for-the-badge" alt="Base Model">
412
+ </a>
413
+ <a href="https://huggingface.co/spaces/Heartsync/wan2-1-fast-security" target="_blank">
414
+ <img src="https://img.shields.io/static/v1?label=WAN%202.1&message=FAST%20%26%20Furios&color=%23008080&labelColor=%230000ff&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
415
+ </a>
416
+ </div>
417
+ """)
418
+
419
+
420
+ with gr.Row():
421
+ with gr.Column(elem_classes=["input-container"]):
422
+ input_image_component = gr.Image(
423
+ type="pil",
424
+ label="🖼️ Input Image (auto-resized to target H/W)",
425
+ elem_classes=["image-upload"]
426
+ )
427
+ prompt_input = gr.Textbox(
428
+ label="✏️ Enhanced Prompt (FusionX-style enhancements applied)",
429
+ value=default_prompt_i2v,
430
+ lines=3
431
+ )
432
+ duration_seconds_input = gr.Slider(
433
+ minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1),
434
+ maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1),
435
+ step=0.1,
436
+ value=2,
437
+ label="⏱️ Duration (seconds)",
438
+ info=f"FusionX Enhanced supports {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps. Recommended: 2-5 seconds"
439
+ )
440
+
441
+ with gr.Accordion("⚙️ Advanced FusionX Settings", open=False):
442
+ negative_prompt_input = gr.Textbox(
443
+ label="❌ Negative Prompt (FusionX Enhanced)",
444
+ value=default_negative_prompt,
445
+ lines=4
446
+ )
447
+ seed_input = gr.Slider(
448
+ label="🎲 Seed",
449
+ minimum=0,
450
+ maximum=MAX_SEED,
451
+ step=1,
452
+ value=42,
453
+ interactive=True
454
+ )
455
+ randomize_seed_checkbox = gr.Checkbox(
456
+ label="🔀 Randomize seed",
457
+ value=True,
458
+ interactive=True
459
+ )
460
+ with gr.Row():
461
+ height_input = gr.Slider(
462
+ minimum=SLIDER_MIN_H,
463
+ maximum=SLIDER_MAX_H,
464
+ step=MOD_VALUE,
465
+ value=DEFAULT_H_SLIDER_VALUE,
466
+ label=f"📏 Output Height (FusionX optimized: {MOD_VALUE} multiples)"
467
+ )
468
+ width_input = gr.Slider(
469
+ minimum=SLIDER_MIN_W,
470
+ maximum=SLIDER_MAX_W,
471
+ step=MOD_VALUE,
472
+ value=DEFAULT_W_SLIDER_VALUE,
473
+ label=f"📐 Output Width (FusionX optimized: {MOD_VALUE} multiples)"
474
+ )
475
+ steps_slider = gr.Slider(
476
+ minimum=1,
477
+ maximum=20,
478
+ step=1,
479
+ value=8, # FusionX optimized
480
+ label="🚀 Inference Steps (FusionX Enhanced: 8-10 recommended)",
481
+ info="FusionX Enhanced delivers excellent results in just 8-10 steps!"
482
+ )
483
+ guidance_scale_input = gr.Slider(
484
+ minimum=0.0,
485
+ maximum=20.0,
486
+ step=0.5,
487
+ value=1.0,
488
+ label="🎯 Guidance Scale (FusionX optimized)",
489
+ visible=False
490
+ )
491
+
492
+ generate_button = gr.Button(
493
+ "🎬 Generate FusionX Enhanced Video",
494
+ variant="primary",
495
+ elem_classes=["generate-btn"]
496
+ )
497
+
498
+ with gr.Column(elem_classes=["output-container"]):
499
+ video_output = gr.Video(
500
+ label="🎥 FusionX Enhanced Generated Video",
501
+ autoplay=True,
502
+ interactive=False
503
+ )
504
+
505
+ input_image_component.upload(
506
+ fn=handle_image_upload_for_dims_wan,
507
+ inputs=[input_image_component, height_input, width_input],
508
+ outputs=[height_input, width_input]
509
+ )
510
+
511
+ input_image_component.clear(
512
+ fn=handle_image_upload_for_dims_wan,
513
+ inputs=[input_image_component, height_input, width_input],
514
+ outputs=[height_input, width_input]
515
+ )
516
+
517
+ ui_inputs = [
518
+ input_image_component, prompt_input, height_input, width_input,
519
+ negative_prompt_input, duration_seconds_input,
520
+ guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
521
+ ]
522
+ generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
523
+
524
+ with gr.Column():
525
+ gr.Examples(
526
+ examples=[
527
+ ["peng.png", "a penguin gracefully dancing in the pristine snow, cinematic motion with detailed feathers", 576, 576],
528
+ ["frog.jpg", "the frog jumps energetically with smooth, lifelike motion and detailed texture", 576, 576],
529
+ ],
530
+ inputs=[input_image_component, prompt_input, height_input, width_input],
531
+ outputs=[video_output, seed_input],
532
+ fn=generate_video,
533
+ cache_examples="lazy",
534
+ label="🌟 FusionX Enhanced Example Gallery"
535
+ )
536
+
537
 
538
  if __name__ == "__main__":
539
  demo.queue().launch()