Spaces:
Runtime error
Runtime error
Update app_lora.py
Browse files- app_lora.py +48 -137
app_lora.py
CHANGED
@@ -16,17 +16,13 @@ import warnings
|
|
16 |
warnings.filterwarnings("ignore", message=".*Attempting to use legacy OpenCV backend.*")
|
17 |
warnings.filterwarnings("ignore", message=".*num_frames - 1.*")
|
18 |
|
19 |
-
# This decorator is specific to HuggingFace Spaces and will cause an error in other environments.
|
20 |
-
# import spaces
|
21 |
-
|
22 |
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
|
23 |
-
|
24 |
LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
|
25 |
LORA_FILENAME = "FusionX_LoRa/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
|
26 |
|
27 |
-
#
|
28 |
-
# This section requires a GPU and CUDA to be available
|
29 |
pipe = None
|
|
|
30 |
if torch.cuda.is_available():
|
31 |
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float16)
|
32 |
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float16)
|
@@ -34,65 +30,47 @@ if torch.cuda.is_available():
|
|
34 |
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.float16
|
35 |
)
|
36 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
|
37 |
-
|
38 |
-
# Enable memory efficient attention and CPU offloading for large videos
|
39 |
pipe.enable_model_cpu_offload()
|
40 |
|
41 |
try:
|
42 |
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
|
43 |
print("β
LoRA downloaded to:", causvid_path)
|
44 |
-
|
45 |
pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
|
46 |
pipe.set_adapters(["causvid_lora"], adapter_weights=[0.75])
|
47 |
pipe.fuse_lora()
|
48 |
-
|
49 |
except Exception as e:
|
50 |
import traceback
|
51 |
print("β Error during LoRA loading:")
|
52 |
traceback.print_exc()
|
53 |
else:
|
54 |
-
print("CUDA is not available. This script requires a GPU
|
55 |
-
|
56 |
|
|
|
57 |
MOD_VALUE = 32
|
58 |
-
DEFAULT_H_SLIDER_VALUE = 640
|
59 |
-
DEFAULT_W_SLIDER_VALUE = 1024
|
60 |
NEW_FORMULA_MAX_AREA = 640.0 * 1024.0
|
61 |
-
|
62 |
SLIDER_MIN_H, SLIDER_MAX_H = 128, 1024
|
63 |
SLIDER_MIN_W, SLIDER_MAX_W = 128, 1024
|
64 |
MAX_SEED = np.iinfo(np.int32).max
|
65 |
-
|
66 |
-
FIXED_FPS = 24
|
67 |
-
MIN_FRAMES_MODEL = 8 # Minimum 8 frames (~0.33s)
|
68 |
-
MAX_FRAMES_MODEL = 240 # Maximum 240 frames (10 seconds at 24fps)
|
69 |
-
|
70 |
default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
|
71 |
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
|
72 |
|
73 |
-
|
74 |
def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
|
75 |
-
min_slider_h, max_slider_h,
|
76 |
-
min_slider_w, max_slider_w,
|
77 |
default_h, default_w):
|
78 |
orig_w, orig_h = pil_image.size
|
79 |
-
if orig_w <= 0 or orig_h <= 0:
|
80 |
-
return default_h, default_w
|
81 |
-
|
82 |
aspect_ratio = orig_h / orig_w
|
83 |
-
|
84 |
calc_h = round(np.sqrt(calculation_max_area * aspect_ratio))
|
85 |
calc_w = round(np.sqrt(calculation_max_area / aspect_ratio))
|
86 |
-
|
87 |
calc_h = max(mod_val, (calc_h // mod_val) * mod_val)
|
88 |
calc_w = max(mod_val, (calc_w // mod_val) * mod_val)
|
89 |
-
|
90 |
new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val))
|
91 |
new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val))
|
92 |
-
|
93 |
return new_h, new_w
|
94 |
|
95 |
-
def handle_image_upload_for_dims_wan(uploaded_pil_image
|
96 |
if uploaded_pil_image is None:
|
97 |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
|
98 |
try:
|
@@ -103,12 +81,10 @@ def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_
|
|
103 |
)
|
104 |
return gr.update(value=new_h), gr.update(value=new_w)
|
105 |
except Exception as e:
|
106 |
-
gr.Warning("Error
|
107 |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
|
108 |
|
109 |
-
|
110 |
def export_video_with_ffmpeg(frames, output_path, fps=24):
|
111 |
-
"""Export video using imageio if available, otherwise fall back to OpenCV"""
|
112 |
try:
|
113 |
import imageio
|
114 |
writer = imageio.get_writer(output_path, fps=fps, codec='libx264',
|
@@ -116,152 +92,87 @@ def export_video_with_ffmpeg(frames, output_path, fps=24):
|
|
116 |
for frame in frames:
|
117 |
writer.append_data(np.array(frame))
|
118 |
writer.close()
|
119 |
-
return True
|
120 |
except ImportError:
|
121 |
export_to_video(frames, output_path, fps=fps)
|
122 |
-
return False
|
123 |
|
124 |
def generate_video(input_image, prompt, height, width,
|
125 |
-
negative_prompt
|
126 |
-
guidance_scale
|
127 |
-
seed=42, randomize_seed=False,
|
128 |
progress=gr.Progress(track_tqdm=True)):
|
129 |
-
|
130 |
-
|
131 |
-
raise gr.Error("Pipeline not initialized or CUDA not available. Please check the console for errors.")
|
132 |
-
|
133 |
if input_image is None:
|
134 |
raise gr.Error("Please upload an input image.")
|
135 |
|
136 |
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
|
137 |
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
|
138 |
-
|
139 |
raw_frames = int(round(duration_seconds * FIXED_FPS))
|
140 |
num_frames = ((raw_frames - 1) // 4) * 4 + 1
|
141 |
num_frames = np.clip(num_frames, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
|
142 |
|
143 |
-
if num_frames > 120:
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
target_w = max(MOD_VALUE, (int(target_w * scale_factor) // MOD_VALUE) * MOD_VALUE)
|
149 |
-
gr.Info(f"Reduced resolution to {target_w}x{target_h} for long video generation")
|
150 |
-
|
151 |
-
print(f"Generating {num_frames} frames (requested {raw_frames}) at {target_w}x{target_h}")
|
152 |
|
153 |
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
|
154 |
resized_image = input_image.resize((target_w, target_h), Image.Resampling.LANCZOS)
|
155 |
-
|
156 |
torch.cuda.empty_cache()
|
157 |
|
158 |
try:
|
159 |
-
with torch.inference_mode():
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
return_dict=True
|
167 |
-
).frames[0]
|
168 |
except torch.cuda.OutOfMemoryError:
|
|
|
|
|
169 |
torch.cuda.empty_cache()
|
170 |
-
raise gr.Error("Out of GPU memory. Try reducing the duration or resolution.")
|
171 |
-
except Exception as e:
|
172 |
-
torch.cuda.empty_cache()
|
173 |
-
raise gr.Error(f"Generation failed: {str(e)}")
|
174 |
-
|
175 |
-
torch.cuda.empty_cache()
|
176 |
|
177 |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
|
178 |
video_path = tmpfile.name
|
179 |
export_video_with_ffmpeg(output_frames_list, video_path, fps=FIXED_FPS)
|
180 |
-
|
181 |
-
|
182 |
-
try:
|
183 |
-
subprocess.run(['ffmpeg', '-version'], capture_output=True, check=True)
|
184 |
-
optimized_path = video_path + "_opt.mp4"
|
185 |
-
cmd = [
|
186 |
-
'ffmpeg', '-y', '-i', video_path, '-c:v', 'libx264', '-pix_fmt', 'yuv420p',
|
187 |
-
'-profile:v', 'main', '-level', '4.0', '-movflags', '+faststart', '-crf', '23',
|
188 |
-
'-preset', 'medium', '-maxrate', '10M', '-bufsize', '20M', optimized_path
|
189 |
-
]
|
190 |
-
result = subprocess.run(cmd, capture_output=True, text=True)
|
191 |
-
if result.returncode == 0 and os.path.exists(optimized_path) and os.path.getsize(optimized_path) > 0:
|
192 |
-
os.unlink(video_path)
|
193 |
-
video_path = optimized_path
|
194 |
-
else:
|
195 |
-
print(f"FFmpeg optimization failed: {result.stderr}")
|
196 |
-
except (subprocess.CalledProcessError, FileNotFoundError):
|
197 |
-
print("FFmpeg not available or optimization failed, using original export")
|
198 |
-
|
199 |
return video_path, current_seed
|
200 |
|
201 |
-
# Gradio
|
202 |
with gr.Blocks() as demo:
|
203 |
-
gr.Markdown("#
|
204 |
-
gr.Markdown("
|
205 |
-
|
206 |
with gr.Row():
|
207 |
with gr.Column():
|
208 |
-
input_image_component = gr.Image(type="pil", label="Input Image
|
209 |
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
|
210 |
-
duration_seconds_input = gr.Slider(
|
211 |
-
minimum=round(MIN_FRAMES_MODEL/FIXED_FPS, 1),
|
212 |
-
maximum=round(MAX_FRAMES_MODEL/FIXED_FPS, 1),
|
213 |
-
step=0.1, value=2, label="Duration (seconds)",
|
214 |
-
info=f"Video length: {MIN_FRAMES_MODEL/FIXED_FPS:.1f}-{MAX_FRAMES_MODEL/FIXED_FPS:.1f}s."
|
215 |
-
)
|
216 |
with gr.Accordion("Advanced Settings", open=False):
|
217 |
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
|
218 |
-
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42
|
219 |
-
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True
|
220 |
with gr.Row():
|
221 |
-
height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=
|
222 |
-
width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=
|
223 |
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
|
224 |
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
|
225 |
-
|
226 |
-
generate_button = gr.Button("Generate Video", variant="primary")
|
227 |
with gr.Column():
|
228 |
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
|
229 |
-
gr.Markdown("### Tips
|
230 |
-
|
231 |
-
input_image_component.upload(
|
232 |
-
fn=handle_image_upload_for_dims_wan,
|
233 |
-
inputs=[input_image_component, height_input, width_input],
|
234 |
-
outputs=[height_input, width_input]
|
235 |
-
)
|
236 |
-
input_image_component.clear(
|
237 |
-
fn=handle_image_upload_for_dims_wan,
|
238 |
-
inputs=[input_image_component, height_input, width_input],
|
239 |
-
outputs=[height_input, width_input]
|
240 |
-
)
|
241 |
|
242 |
-
|
243 |
-
input_image_component, prompt_input, height_input, width_input,
|
244 |
-
negative_prompt_input, duration_seconds_input,
|
245 |
-
guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
|
246 |
-
]
|
247 |
-
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
|
248 |
|
249 |
-
|
250 |
-
|
251 |
-
# gr.Examples(
|
252 |
-
# examples=[
|
253 |
-
# ["path/to/your/peng.png", "a penguin playfully dancing in the snow, Antarctica", 896, 512],
|
254 |
-
# ["path/to/your/forg.jpg", "the frog jumps around", 448, 832],
|
255 |
-
# ],
|
256 |
-
# inputs=[input_image_component, prompt_input, height_input, width_input],
|
257 |
-
# outputs=[video_output, seed_input],
|
258 |
-
# fn=generate_video,
|
259 |
-
# cache_examples="lazy"
|
260 |
-
# )
|
261 |
|
262 |
if __name__ == "__main__":
|
263 |
if pipe is not None:
|
264 |
demo.queue(max_size=3).launch()
|
265 |
else:
|
266 |
-
|
267 |
-
gr.
|
|
|
|
16 |
warnings.filterwarnings("ignore", message=".*Attempting to use legacy OpenCV backend.*")
|
17 |
warnings.filterwarnings("ignore", message=".*num_frames - 1.*")
|
18 |
|
|
|
|
|
|
|
19 |
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
|
|
|
20 |
LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
|
21 |
LORA_FILENAME = "FusionX_LoRa/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
|
22 |
|
23 |
+
# --- Model Initialization ---
|
|
|
24 |
pipe = None
|
25 |
+
# This check correctly identifies if the Hugging Face Space has a GPU.
|
26 |
if torch.cuda.is_available():
|
27 |
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float16)
|
28 |
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float16)
|
|
|
30 |
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.float16
|
31 |
)
|
32 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
|
|
|
|
|
33 |
pipe.enable_model_cpu_offload()
|
34 |
|
35 |
try:
|
36 |
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
|
37 |
print("β
LoRA downloaded to:", causvid_path)
|
|
|
38 |
pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
|
39 |
pipe.set_adapters(["causvid_lora"], adapter_weights=[0.75])
|
40 |
pipe.fuse_lora()
|
|
|
41 |
except Exception as e:
|
42 |
import traceback
|
43 |
print("β Error during LoRA loading:")
|
44 |
traceback.print_exc()
|
45 |
else:
|
46 |
+
print("CUDA is not available. This script requires a GPU. Please upgrade your Space hardware.")
|
|
|
47 |
|
48 |
+
# --- Constants and Helper Functions ---
|
49 |
MOD_VALUE = 32
|
50 |
+
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE = 640, 1024
|
|
|
51 |
NEW_FORMULA_MAX_AREA = 640.0 * 1024.0
|
|
|
52 |
SLIDER_MIN_H, SLIDER_MAX_H = 128, 1024
|
53 |
SLIDER_MIN_W, SLIDER_MAX_W = 128, 1024
|
54 |
MAX_SEED = np.iinfo(np.int32).max
|
55 |
+
FIXED_FPS, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL = 24, 8, 240
|
|
|
|
|
|
|
|
|
56 |
default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
|
57 |
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
|
58 |
|
|
|
59 |
def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
|
60 |
+
min_slider_h, max_slider_h, min_slider_w, max_slider_w,
|
|
|
61 |
default_h, default_w):
|
62 |
orig_w, orig_h = pil_image.size
|
63 |
+
if orig_w <= 0 or orig_h <= 0: return default_h, default_w
|
|
|
|
|
64 |
aspect_ratio = orig_h / orig_w
|
|
|
65 |
calc_h = round(np.sqrt(calculation_max_area * aspect_ratio))
|
66 |
calc_w = round(np.sqrt(calculation_max_area / aspect_ratio))
|
|
|
67 |
calc_h = max(mod_val, (calc_h // mod_val) * mod_val)
|
68 |
calc_w = max(mod_val, (calc_w // mod_val) * mod_val)
|
|
|
69 |
new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val))
|
70 |
new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val))
|
|
|
71 |
return new_h, new_w
|
72 |
|
73 |
+
def handle_image_upload_for_dims_wan(uploaded_pil_image):
|
74 |
if uploaded_pil_image is None:
|
75 |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
|
76 |
try:
|
|
|
81 |
)
|
82 |
return gr.update(value=new_h), gr.update(value=new_w)
|
83 |
except Exception as e:
|
84 |
+
gr.Warning("Error calculating new dimensions.")
|
85 |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
|
86 |
|
|
|
87 |
def export_video_with_ffmpeg(frames, output_path, fps=24):
|
|
|
88 |
try:
|
89 |
import imageio
|
90 |
writer = imageio.get_writer(output_path, fps=fps, codec='libx264',
|
|
|
92 |
for frame in frames:
|
93 |
writer.append_data(np.array(frame))
|
94 |
writer.close()
|
|
|
95 |
except ImportError:
|
96 |
export_to_video(frames, output_path, fps=fps)
|
|
|
97 |
|
98 |
def generate_video(input_image, prompt, height, width,
|
99 |
+
negative_prompt, duration_seconds,
|
100 |
+
guidance_scale, steps, seed, randomize_seed,
|
|
|
101 |
progress=gr.Progress(track_tqdm=True)):
|
102 |
+
if pipe is None:
|
103 |
+
raise gr.Error("Pipeline not initialized. Check logs for GPU availability.")
|
|
|
|
|
104 |
if input_image is None:
|
105 |
raise gr.Error("Please upload an input image.")
|
106 |
|
107 |
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
|
108 |
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
|
|
|
109 |
raw_frames = int(round(duration_seconds * FIXED_FPS))
|
110 |
num_frames = ((raw_frames - 1) // 4) * 4 + 1
|
111 |
num_frames = np.clip(num_frames, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
|
112 |
|
113 |
+
if num_frames > 120 and max(target_h, target_w) > 768:
|
114 |
+
scale_factor = 768 / max(target_h, target_w)
|
115 |
+
target_h = max(MOD_VALUE, int(target_h * scale_factor) // MOD_VALUE * MOD_VALUE)
|
116 |
+
target_w = max(MOD_VALUE, int(target_w * scale_factor) // MOD_VALUE * MOD_VALUE)
|
117 |
+
gr.Info(f"Reduced resolution to {target_w}x{target_h} for long video.")
|
|
|
|
|
|
|
|
|
118 |
|
119 |
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
|
120 |
resized_image = input_image.resize((target_w, target_h), Image.Resampling.LANCZOS)
|
|
|
121 |
torch.cuda.empty_cache()
|
122 |
|
123 |
try:
|
124 |
+
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float16):
|
125 |
+
output_frames_list = pipe(
|
126 |
+
image=resized_image, prompt=prompt, negative_prompt=negative_prompt,
|
127 |
+
height=target_h, width=target_w, num_frames=num_frames,
|
128 |
+
guidance_scale=float(guidance_scale), num_inference_steps=int(steps),
|
129 |
+
generator=torch.Generator(device="cuda").manual_seed(current_seed)
|
130 |
+
).frames[0]
|
|
|
|
|
131 |
except torch.cuda.OutOfMemoryError:
|
132 |
+
raise gr.Error("Out of GPU memory. Try reducing duration or resolution.")
|
133 |
+
finally:
|
134 |
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
|
137 |
video_path = tmpfile.name
|
138 |
export_video_with_ffmpeg(output_frames_list, video_path, fps=FIXED_FPS)
|
139 |
+
# Optional: FFmpeg optimization
|
140 |
+
# ...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
return video_path, current_seed
|
142 |
|
143 |
+
# --- Gradio UI ---
|
144 |
with gr.Blocks() as demo:
|
145 |
+
gr.Markdown("# Wan 2.1 I2V FusionX-LoRA")
|
146 |
+
gr.Markdown("GPU is required. If this doesn't load, check your Space hardware settings.")
|
147 |
+
|
148 |
with gr.Row():
|
149 |
with gr.Column():
|
150 |
+
input_image_component = gr.Image(type="pil", label="Input Image")
|
151 |
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
|
152 |
+
duration_seconds_input = gr.Slider(minimum=round(MIN_FRAMES_MODEL/FIXED_FPS, 1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS, 1), step=0.1, value=2, label="Duration (seconds)")
|
|
|
|
|
|
|
|
|
|
|
153 |
with gr.Accordion("Advanced Settings", open=False):
|
154 |
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
|
155 |
+
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
|
156 |
+
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
|
157 |
with gr.Row():
|
158 |
+
height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label="Height")
|
159 |
+
width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label="Width")
|
160 |
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
|
161 |
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
|
162 |
+
generate_button = gr.Button("Generate Video", variant="primary", interactive=(pipe is not None))
|
|
|
163 |
with gr.Column():
|
164 |
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
|
165 |
+
gr.Markdown("### Tips:\n- Longer videos need more memory.\n- 4-8 steps is optimal.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
+
input_image_component.upload(fn=handle_image_upload_for_dims_wan, inputs=input_image_component, outputs=[height_input, width_input])
|
|
|
|
|
|
|
|
|
|
|
168 |
|
169 |
+
ui_inputs = [input_image_component, prompt_input, height_input, width_input, negative_prompt_input, duration_seconds_input, guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox]
|
170 |
+
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
if __name__ == "__main__":
|
173 |
if pipe is not None:
|
174 |
demo.queue(max_size=3).launch()
|
175 |
else:
|
176 |
+
# This provides a clean message in the UI if the app can't start.
|
177 |
+
gr.Markdown("# Application Start Failed").launch()
|
178 |
+
gr.Info("A GPU is required to run this application. Please ensure your Hugging Face Space is configured with GPU hardware.")
|