Spaces:
Running
on
Zero
Running
on
Zero
File size: 11,221 Bytes
fb0638e 10d1560 80784a2 10d1560 1633ad5 e589a0c 5f364b5 e589a0c 12d6cf5 e589a0c 7577271 1633ad5 e589a0c f4cf641 afdfe21 5f364b5 8268b44 f4cf641 8268b44 8249703 8268b44 e589a0c afdfe21 f4cf641 8268b44 1b75f51 f4cf641 e589a0c c103ac7 8575388 8268b44 c103ac7 8268b44 1e531a7 8268b44 8116465 30b58cc e589a0c 8116465 f4cf641 8268b44 f4cf641 8268b44 f4cf641 e589a0c 12d6cf5 f4cf641 8268b44 f4cf641 e589a0c f4cf641 afdfe21 54856e7 8116465 e589a0c 1e531a7 79ad4c4 a999829 cd1c14b f4cf641 8268b44 1e531a7 9e90928 8268b44 e589a0c a999829 12d6cf5 8268b44 e589a0c 30b58cc 558bf1d e589a0c 5f364b5 1e531a7 54b40a7 5f364b5 e589a0c 5f364b5 e589a0c 5f364b5 c103ac7 8268b44 8575388 e589a0c 8268b44 f4cf641 8575388 8268b44 e589a0c 1633ad5 e589a0c 1633ad5 5f364b5 c103ac7 8268b44 f4cf641 e589a0c d5cf532 c103ac7 e589a0c c103ac7 8268b44 e589a0c f4cf641 8268b44 e589a0c 8268b44 8116465 1633ad5 e589a0c f4cf641 54b40a7 5f364b5 8268b44 e589a0c 5f364b5 e589a0c 5f364b5 f8902bc 8268b44 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 |
import spaces
import subprocess
# Installing flash_attn
subprocess.run('pip install flash-attn==2.7.4.post1 --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
import torch
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler
from diffusers.utils import export_to_video
from transformers import CLIPVisionModel
import gradio as gr
import tempfile
from huggingface_hub import hf_hub_download
import numpy as np
from PIL import Image
import random
import time
# --- MODEL LOADING ---
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
LORA_REPO_ID = "Kijai/WanVideo_comfy"
LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors"
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
pipe = WanImageToVideoPipeline.from_pretrained(
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
pipe.to("cuda")
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
pipe.set_adapters(["causvid_lora"], adapter_weights=[0.95])
pipe.fuse_lora()
# --- CONSTANTS ---
MOD_VALUE = 32
DEFAULT_H_SLIDER_VALUE = 512
DEFAULT_W_SLIDER_VALUE = 896
NEW_FORMULA_MAX_AREA = 480.0 * 832.0
SLIDER_MIN_H, SLIDER_MAX_H = 128, 896
SLIDER_MIN_W, SLIDER_MAX_W = 128, 896
MAX_SEED = np.iinfo(np.int32).max
FIXED_FPS = 24
MIN_FRAMES_MODEL = 8
MAX_FRAMES_MODEL = 81
default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards, watermark, text, signature"
class CalculateTime:
"""Context manager to measure execution time of code blocks."""
def __init__(self, description="Operation"):
self.description = description
self.elapsed = 0
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end = time.perf_counter()
self.elapsed = self.end - self.start
print(f"{self.description} took {self.elapsed:.4f} seconds")
# --- HELPER FUNCTIONS (UNCHANGED) ---
def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
min_slider_h, max_slider_h,
min_slider_w, max_slider_w,
default_h, default_w):
orig_w, orig_h = pil_image.size
if orig_w <= 0 or orig_h <= 0:
return default_h, default_w
aspect_ratio = orig_h / orig_w
calc_h = round(np.sqrt(calculation_max_area * aspect_ratio))
calc_w = round(np.sqrt(calculation_max_area / aspect_ratio))
calc_h = max(mod_val, (calc_h // mod_val) * mod_val)
calc_w = max(mod_val, (calc_w // mod_val) * mod_val)
new_h = int(np.clip(calc_h, min_slider_h, (max_slider_h // mod_val) * mod_val))
new_w = int(np.clip(calc_w, min_slider_w, (max_slider_w // mod_val) * mod_val))
return new_h, new_w
def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_w_val):
if uploaded_pil_image is None:
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
try:
new_h, new_w = _calculate_new_dimensions_wan(
uploaded_pil_image, MOD_VALUE, NEW_FORMULA_MAX_AREA,
SLIDER_MIN_H, SLIDER_MAX_H, SLIDER_MIN_W, SLIDER_MAX_W,
DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE
)
return gr.update(value=new_h), gr.update(value=new_w)
except Exception as e:
gr.Warning("Error attempting to calculate new dimensions")
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
@spaces.GPU(duration=90)
def generate_video(input_image, prompt, height, width,
negative_prompt=default_negative_prompt, duration_seconds = 2,
guidance_scale = 1, steps = 4,
seed = 42, randomize_seed = False,
# --- NEW RADIAL ATTENTION PARAMS ---
pattern="dense", dense_layers=1, dense_timesteps=12, decay_factor=0.2,
progress=gr.Progress(track_tqdm=True)):
from radial_attn.models.wan.inference import replace_wan_attention
if input_image is None:
raise gr.Error("Please upload an input image.")
target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
num_frames = int(np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL))
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
# --- APPLY RADIAL ATTENTION PATCH ---
# This function is called on every generation to ensure the correct settings
# for either 'dense' or 'radial' attention are applied.
with CalculateTime("Replace attention"):
replace_wan_attention(
pipe=pipe,
height=target_h,
width=target_w,
num_frames=num_frames,
dense_layers=dense_layers,
dense_timesteps=dense_timesteps,
decay_factor=decay_factor,
sparsity_type=pattern,
)
resized_image = input_image.resize((target_w, target_h))
with CalculateTime("Run Inference"):
output_frames_list = pipe(
image=resized_image, prompt=prompt, negative_prompt=negative_prompt,
height=target_h, width=target_w, num_frames=num_frames,
guidance_scale=float(guidance_scale), num_inference_steps=int(steps),
generator=torch.Generator(device="cuda").manual_seed(current_seed)
).frames[0]
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
video_path = tmpfile.name
export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
return video_path, current_seed
# --- GRADIO UI ---
with gr.Blocks() as demo:
gr.Markdown("# Fast 4 steps Wan 2.1 I2V (14B) with CausVid LoRA and Radial Attention")
gr.Markdown("This demo combines the distilled Wan 2.1 I2V model with [Radial Attention](https://github.com/mit-han-lab/radial-attention) for efficient long video generation.")
with gr.Row():
with gr.Column():
input_image_component = gr.Image(type="pil", label="Input Image (auto-resized to target H/W)")
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
duration_seconds_input = gr.Slider(minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1), step=0.1, value=2, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
with gr.Row():
height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"Output Height (multiple of {MOD_VALUE})")
width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"Output Width (multiple of {MOD_VALUE})")
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
guidance_scale_input = gr.Slider(minimum=0.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale", visible=False)
# --- NEW RADIAL ATTENTION UI ---
with gr.Group():
gr.Markdown("### Radial Attention Settings")
pattern_input = gr.Radio(["dense", "radial"], value="dense", label="Attention Pattern", info="Select 'radial' to enable sparse attention.")
dense_layers_input = gr.Slider(minimum=0, maximum=40, step=1, value=1, label="Dense Layers", info="Num initial layers to keep dense. Default: 1 for ~3s video.")
dense_timesteps_input = gr.Slider(minimum=0, maximum=50, step=1, value=12, label="Dense Timesteps", info="Num initial denoising steps to keep dense. Default: 12 for ~3s video.")
decay_factor_input = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=0.2, label="Decay Factor", info="Controls attention window decay. Smaller is sparser. Default: 0.2.")
generate_button = gr.Button("Generate Video", variant="primary")
with gr.Column():
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
# Event handlers (unchanged)
input_image_component.upload(
fn=handle_image_upload_for_dims_wan,
inputs=[input_image_component, height_input, width_input],
outputs=[height_input, width_input]
)
input_image_component.clear(
fn=handle_image_upload_for_dims_wan,
inputs=[input_image_component, height_input, width_input],
outputs=[height_input, width_input]
)
# --- UPDATE UI INPUTS LIST ---
ui_inputs = [
input_image_component, prompt_input, height_input, width_input,
negative_prompt_input, duration_seconds_input,
guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox,
# Add new radial attention inputs
pattern_input, dense_layers_input, dense_timesteps_input, decay_factor_input
]
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
gr.Examples(
examples=[
["peng.png", "a penguin playfully dancing in the snow, Antarctica", 896, 512],
["forg.jpg", "the frog jumps around", 448, 832],
],
inputs=[input_image_component, prompt_input, height_input, width_input], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
)
if __name__ == "__main__":
import multiprocessing as mp
try:
# 'fork' is the default on Linux, but it's not safe with CUDA.
# 'spawn' creates a new process from scratch, avoiding CUDA re-initialization issues.
mp.set_start_method("spawn", force=True)
except RuntimeError:
# This can happen if the context is already set.
pass
demo.queue().launch() |