Spaces:
Runtime error
Runtime error
File size: 9,544 Bytes
7fecea0 71c32c3 1f97f51 7fecea0 4c12131 efa8780 7fecea0 b29fa42 7fecea0 314e10e 5d87a8b 314e10e 5d87a8b 314e10e 7fecea0 314e10e 7fecea0 314e10e 5d87a8b 7fecea0 314e10e 7fecea0 314e10e 7fecea0 314e10e 7fecea0 314e10e 7fecea0 314e10e 7fecea0 71c32c3 5d87a8b 71c32c3 7fecea0 5d87a8b 314e10e 7fecea0 314e10e 7fecea0 4c12131 5d87a8b 314e10e 7fecea0 5d87a8b 71c32c3 5d87a8b 71c32c3 314e10e 71c32c3 314e10e 71c32c3 7fecea0 5d87a8b 314e10e 7fecea0 314e10e 7fecea0 314e10e 7fecea0 314e10e 7fecea0 314e10e 7fecea0 314e10e 7fecea0 314e10e 5d87a8b 7fecea0 314e10e 7fecea0 314e10e 5d87a8b 314e10e 5d87a8b 314e10e 7fecea0 5d87a8b 314e10e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
import torch
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler
from diffusers.utils import export_to_video
from transformers import CLIPVisionModel
import gradio as gr
import tempfile
import os
import subprocess
from huggingface_hub import hf_hub_download
import numpy as np
from PIL import Image
import random
import warnings
warnings.filterwarnings("ignore", message=".*Attempting to use legacy OpenCV backend.*")
warnings.filterwarnings("ignore", message=".*num_frames - 1.*")
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
LORA_FILENAME = "FusionX_LoRa/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
# --- Model Initialization ---
pipe = None
# This check correctly identifies if the Hugging Face Space has a GPU.
if torch.cuda.is_available():
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float16)
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float16)
pipe = WanImageToVideoPipeline.from_pretrained(
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.float16
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
pipe.enable_model_cpu_offload()
try:
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
print("β
LoRA downloaded to:", causvid_path)
pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
pipe.set_adapters(["causvid_lora"], adapter_weights=[0.75])
pipe.fuse_lora()
except Exception as e:
import traceback
print("β Error during LoRA loading:")
traceback.print_exc()
else:
print("CUDA is not available. This script requires a GPU. Please upgrade your Space hardware.")
# --- Constants and Helper Functions ---
MOD_VALUE = 32
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE = 640, 1024
NEW_FORMULA_MAX_AREA = 640.0 * 1024.0
SLIDER_MIN_H, SLIDER_MAX_H = 128, 1024
SLIDER_MIN_W, SLIDER_MAX_W = 128, 1024
MAX_SEED = np.iinfo(np.int32).max
FIXED_FPS, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL = 24, 8, 240
default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
min_slider_h, max_slider_h, min_slider_w, max_slider_w,
default_h, default_w):
orig_w, orig_h = pil_image.size
if orig_w <= 0 or orig_h <= 0: return default_h, default_w
aspect_ratio = orig_h / orig_w
calc_h = round(np.sqrt(calculation_max_area * aspect_ratio))
calc_w = round(np.sqrt(calculation_max_area / aspect_ratio))
calc_h = max(mod_val, (calc_h // mod_val) * mod_val)
calc_w = max(mod_val, (calc_w // mod_val) * mod_val)
new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val))
new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val))
return new_h, new_w
def handle_image_upload_for_dims_wan(uploaded_pil_image):
if uploaded_pil_image is None:
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
try:
new_h, new_w = _calculate_new_dimensions_wan(
uploaded_pil_image, MOD_VALUE, NEW_FORMULA_MAX_AREA,
SLIDER_MIN_H, SLIDER_MAX_H, SLIDER_MIN_W, SLIDER_MAX_W,
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE
)
return gr.update(value=new_h), gr.update(value=new_w)
except Exception as e:
gr.Warning("Error calculating new dimensions.")
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
def export_video_with_ffmpeg(frames, output_path, fps=24):
try:
import imageio
writer = imageio.get_writer(output_path, fps=fps, codec='libx264',
pixelformat='yuv420p', quality=8)
for frame in frames:
writer.append_data(np.array(frame))
writer.close()
except ImportError:
export_to_video(frames, output_path, fps=fps)
def generate_video(input_image, prompt, height, width,
negative_prompt, duration_seconds,
guidance_scale, steps, seed, randomize_seed,
progress=gr.Progress(track_tqdm=True)):
if pipe is None:
raise gr.Error("Pipeline not initialized. Check logs for GPU availability.")
if input_image is None:
raise gr.Error("Please upload an input image.")
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
raw_frames = int(round(duration_seconds * FIXED_FPS))
num_frames = ((raw_frames - 1) // 4) * 4 + 1
num_frames = np.clip(num_frames, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
if num_frames > 120 and max(target_h, target_w) > 768:
scale_factor = 768 / max(target_h, target_w)
target_h = max(MOD_VALUE, int(target_h * scale_factor) // MOD_VALUE * MOD_VALUE)
target_w = max(MOD_VALUE, int(target_w * scale_factor) // MOD_VALUE * MOD_VALUE)
gr.Info(f"Reduced resolution to {target_w}x{target_h} for long video.")
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
resized_image = input_image.resize((target_w, target_h), Image.Resampling.LANCZOS)
torch.cuda.empty_cache()
try:
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float16):
output_frames_list = pipe(
image=resized_image, prompt=prompt, negative_prompt=negative_prompt,
height=target_h, width=target_w, num_frames=num_frames,
guidance_scale=float(guidance_scale), num_inference_steps=int(steps),
generator=torch.Generator(device="cuda").manual_seed(current_seed)
).frames[0]
except torch.cuda.OutOfMemoryError:
raise gr.Error("Out of GPU memory. Try reducing duration or resolution.")
finally:
torch.cuda.empty_cache()
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
video_path = tmpfile.name
export_video_with_ffmpeg(output_frames_list, video_path, fps=FIXED_FPS)
# Optional: FFmpeg optimization
# ...
return video_path, current_seed
# --- Gradio UI ---
with gr.Blocks() as demo:
gr.Markdown("# Wan 2.1 I2V FusionX-LoRA")
gr.Markdown("GPU is required. If this doesn't load, check your Space hardware settings.")
with gr.Row():
with gr.Column():
input_image_component = gr.Image(type="pil", label="Input Image")
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
duration_seconds_input = gr.Slider(minimum=round(MIN_FRAMES_MODEL/FIXED_FPS, 1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS, 1), step=0.1, value=2, label="Duration (seconds)")
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label="Height")
width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label="Width")
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
generate_button = gr.Button("Generate Video", variant="primary", interactive=(pipe is not None))
with gr.Column():
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
gr.Markdown("### Tips:\n- Longer videos need more memory.\n- 4-8 steps is optimal.")
input_image_component.upload(fn=handle_image_upload_for_dims_wan, inputs=input_image_component, outputs=[height_input, width_input])
ui_inputs = [input_image_component, prompt_input, height_input, width_input, negative_prompt_input, duration_seconds_input, guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox]
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
if __name__ == "__main__":
if pipe is not None:
demo.queue(max_size=3).launch()
else:
# This provides a clean message in the UI if the app can't start.
gr.Markdown("# Application Start Failed").launch()
gr.Info("A GPU is required to run this application. Please ensure your Hugging Face Space is configured with GPU hardware.") |