wan-fusionx-lora / app_lora.py
Lemonator's picture
Update app_lora.py
5e15674 verified
raw
history blame
9.15 kB
import torch
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler
from diffusers.utils import export_to_video
from transformers import CLIPVisionModel
import gradio as gr
import tempfile
import os
import subprocess
# The spaces library IS required for ZeroGPU.
import spaces
from huggingface_hub import hf_hub_download
import numpy as np
from PIL import Image
import random
import warnings
warnings.filterwarnings("ignore")
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
LORA_FILENAME = "FusionX_LoRa/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
# --- Global variable for the pipeline ---
# We use a global variable to cache the model between calls.
pipe = None
# --- Constants and Helper Functions ---
MOD_VALUE = 32
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE = 640, 1024
NEW_FORMULA_MAX_AREA = 640.0 * 1024.0
SLIDER_MIN_H, SLIDER_MAX_H = 128, 1024
SLIDER_MIN_W, SLIDER_MAX_W = 128, 1024
MAX_SEED = np.iinfo(np.int32).max
FIXED_FPS, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL = 24, 8, 240
default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
def get_duration(duration_seconds):
"""
Dynamically set the timeout for the @spaces.GPU decorator based on video length.
"""
if duration_seconds > 7: return 180
if duration_seconds > 5: return 120
if duration_seconds > 3: return 90
return 60
# --- The Main GPU Function ---
# The @spaces.GPU decorator is ESSENTIAL for ZeroGPU.
# It tells the platform that this function needs a GPU.
@spaces.GPU(duration=60) # Default duration, can be updated dynamically
def generate_video(input_image, prompt, height, width,
negative_prompt, duration_seconds,
guidance_scale, steps, seed, randomize_seed,
progress=gr.Progress(track_tqdm=True)):
global pipe
# --- LAZY LOADING of the model ---
# This block will only run on the very first generation request.
if pipe is None:
progress(0, desc="Cold start: Initializing model...")
print("Cold start: Initializing model pipeline...")
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float16)
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float16)
pipe = WanImageToVideoPipeline.from_pretrained(
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.float16
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
pipe.enable_model_cpu_offload()
try:
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
pipe.set_adapters(["causvid_lora"], adapter_weights=[0.75])
pipe.fuse_lora()
print("βœ… LoRA loaded successfully.")
except Exception as e:
raise gr.Error(f"Error loading LoRA: {e}")
print("βœ… Pipeline initialized successfully.")
# Update the GPU duration based on user input for longer videos.
spaces.set_timeout(get_duration(duration_seconds))
if input_image is None:
raise gr.Error("Please upload an input image.")
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
raw_frames = int(round(duration_seconds * FIXED_FPS))
num_frames = ((raw_frames - 1) // 4) * 4 + 1
num_frames = np.clip(num_frames, MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
if num_frames > 120 and max(target_h, target_w) > 768:
scale_factor = 768 / max(target_h, target_w)
target_h = max(MOD_VALUE, int(target_h * scale_factor) // MOD_VALUE * MOD_VALUE)
target_w = max(MOD_VALUE, int(target_w * scale_factor) // MOD_VALUE * MOD_VALUE)
gr.Info(f"Reduced resolution to {target_w}x{target_h} for long video.")
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
resized_image = input_image.resize((target_w, target_h), Image.Resampling.LANCZOS)
try:
torch.cuda.empty_cache()
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float16):
output_frames_list = pipe(
image=resized_image, prompt=prompt, negative_prompt=negative_prompt,
height=target_h, width=target_w, num_frames=num_frames,
guidance_scale=float(guidance_scale), num_inference_steps=int(steps),
generator=torch.Generator(device="cuda").manual_seed(current_seed),
callback_on_step_end=lambda p, s, t: progress(s/int(steps))
).frames[0]
except torch.cuda.OutOfMemoryError:
raise gr.Error("Out of GPU memory. Try reducing duration or resolution.")
finally:
torch.cuda.empty_cache()
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
video_path = tmpfile.name
# (Video export logic is unchanged)
import imageio
writer = imageio.get_writer(video_path, fps=FIXED_FPS, codec='libx264', pixelformat='yuv420p', quality=8)
for frame in output_frames_list:
writer.append_data(np.array(frame))
writer.close()
return video_path, current_seed
# --- Gradio UI ---
# (Helper functions for UI are unchanged)
def handle_image_upload_for_dims_wan(uploaded_pil_image):
if uploaded_pil_image is None: return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
try:
orig_w, orig_h = uploaded_pil_image.size
aspect_ratio = orig_h / orig_w
calc_h = round(np.sqrt(NEW_FORMULA_MAX_AREA * aspect_ratio))
calc_w = round(np.sqrt(NEW_FORMULA_MAX_AREA / aspect_ratio))
calc_h = max(MOD_VALUE, (calc_h // MOD_VALUE) * MOD_VALUE)
calc_w = max(MOD_VALUE, (calc_w // MOD_VALUE) * MOD_VALUE)
new_h = int(np.clip(calc_h, SLIDER_MIN_H, SLIDER_MAX_H))
new_w = int(np.clip(calc_w, SLIDER_MIN_W, SLIDER_MAX_W))
return gr.update(value=new_h), gr.update(value=new_w)
except: return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
with gr.Blocks() as demo:
gr.Markdown("# Wan 2.1 I2V FusionX-LoRA (ZeroGPU Ready)")
gr.Markdown("The first generation will be slow due to a 'cold start'. Subsequent generations will be much faster.")
with gr.Row():
with gr.Column():
input_image_component = gr.Image(type="pil", label="Input Image")
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
duration_seconds_input = gr.Slider(minimum=round(MIN_FRAMES_MODEL/FIXED_FPS, 1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS, 1), step=0.1, value=2, label="Duration (seconds)")
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label="Height")
width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label="Width")
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
generate_button = gr.Button("Generate Video", variant="primary")
with gr.Column():
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
gr.Markdown("### Tips:\n- Longer videos need more memory.\n- 4-8 steps is optimal.")
input_image_component.upload(fn=handle_image_upload_for_dims_wan, inputs=input_image_component, outputs=[height_input, width_input])
ui_inputs = [input_image_component, prompt_input, height_input, width_input, negative_prompt_input, duration_seconds_input, guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox]
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
if __name__ == "__main__":
demo.queue().launch()