Spaces:
Running
on
Zero
Running
on
Zero
File size: 6,087 Bytes
5f364b5 8249703 5f364b5 5158fc3 5f364b5 afdfe21 5158fc3 5f364b5 afdfe21 5f364b5 afdfe21 5f364b5 afdfe21 5f364b5 afdfe21 5f364b5 afdfe21 5f364b5 afdfe21 5f364b5 afdfe21 5f364b5 5158fc3 8249703 afdfe21 5f364b5 afdfe21 2acf5ad afdfe21 5f364b5 8249703 afdfe21 5f364b5 afdfe21 5f364b5 721f7aa 1d3a31b 5f364b5 5158fc3 1d3a31b afdfe21 5f364b5 afdfe21 5f364b5 5158fc3 5f364b5 721f7aa 1bb48a3 5f364b5 8df45ae 5f364b5 721f7aa 5f364b5 721f7aa 5f364b5 afdfe21 5f364b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import torch
from diffusers import AutoencoderKLWan, WanPipeline, UniPCMultistepScheduler
from diffusers.utils import export_to_video
from diffusers.loaders.lora_conversion_utils import _convert_non_diffusers_wan_lora_to_diffusers # Keep this if it's the base for standard LoRA parts
import gradio as gr
import tempfile
import os
import spaces
from huggingface_hub import hf_hub_download
import logging # For better logging
import re # For key manipulation
# --- Global Model Loading & LoRA Handling ---
MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
LORA_REPO_ID = "Kijai/WanVideo_comfy"
LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors"
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# --- Model Loading ---
logger.info(f"Loading VAE for {MODEL_ID}...")
vae = AutoencoderKLWan.from_pretrained(
MODEL_ID,
subfolder="vae",
torch_dtype=torch.float32 # float32 for VAE stability
)
logger.info(f"Loading Pipeline {MODEL_ID}...")
pipe = WanPipeline.from_pretrained(
MODEL_ID,
vae=vae,
torch_dtype=torch.bfloat16 # bfloat16 for pipeline
)
flow_shift = 8.0
pipe.scheduler = UniPCMultistepScheduler.from_config(
pipe.scheduler.config, flow_shift=flow_shift
)
logger.info("Moving pipeline to CUDA...")
pipe.to("cuda")
# --- LoRA Loading ---
logger.info(f"Downloading LoRA {LORA_FILENAME} from {LORA_REPO_ID}...")
causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
logger.info("Loading LoRA weights with custom converter...")
pipe.load_lora_weights(causvid_path,adapter_name="causvid_lora")
# --- Gradio Interface Function ---
@spaces.GPU
def generate_video(prompt, negative_prompt, height, width, num_frames, guidance_scale, steps, fps, progress=gr.Progress(track_tqdm=True)):
logger.info("Starting video generation...")
logger.info(f" Prompt: {prompt}")
logger.info(f" Negative Prompt: {negative_prompt if negative_prompt else 'None'}")
logger.info(f" Height: {height}, Width: {width}")
logger.info(f" Num Frames: {num_frames}, FPS: {fps}")
logger.info(f" Guidance Scale: {guidance_scale}")
height = (int(height) // 8) * 8
width = (int(width) // 8) * 8
num_frames = int(num_frames)
fps = int(fps)
with torch.inference_mode():
output_frames_list = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
num_frames=num_frames,
guidance_scale=float(guidance_scale),
num_inference_steps=steps
).frames[0]
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
video_path = tmpfile.name
export_to_video(output_frames_list, video_path, fps=fps)
logger.info(f"Video successfully generated and saved to {video_path}")
return video_path
# --- Gradio UI Definition ---
default_prompt = "A cat walks on the grass, realistic"
default_negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
with gr.Blocks() as demo:
gr.Markdown(f"""
# Text-to-Video with Wan 2.1 (14B) + CausVid LoRA
Powered by `diffusers` and `Wan-AI/{MODEL_ID}`.
Model is loaded into memory when the app starts. This might take a few minutes.
Ensure you have a GPU with sufficient VRAM (e.g., ~24GB+ for these default settings).
""")
with gr.Row():
with gr.Column(scale=2):
prompt_input = gr.Textbox(label="Prompt", value=default_prompt, lines=3)
negative_prompt_input = gr.Textbox(
label="Negative Prompt (Optional)",
value=default_negative_prompt,
lines=3
)
with gr.Row():
height_input = gr.Slider(minimum=256, maximum=768, step=64, value=480, label="Height (multiple of 8)")
width_input = gr.Slider(minimum=256, maximum=1024, step=64, value=832, label="Width (multiple of 8)")
with gr.Row():
num_frames_input = gr.Slider(minimum=16, maximum=100, step=1, value=25, label="Number of Frames")
fps_input = gr.Slider(minimum=5, maximum=30, step=1, value=15, label="Output FPS")
steps = gr.Slider(minimum=1.0, maximum=30.0, value=4.0, label="Steps")
guidance_scale_input = gr.Slider(minimum=1.0, maximum=20.0, step=0.5, value=1.0, label="Guidance Scale")
generate_button = gr.Button("Generate Video", variant="primary")
with gr.Column(scale=3):
video_output = gr.Video(label="Generated Video")
generate_button.click(
fn=generate_video,
inputs=[
prompt_input,
negative_prompt_input,
height_input,
width_input,
num_frames_input,
guidance_scale_input,
steps,
fps_input
],
outputs=video_output
)
gr.Examples(
examples=[
["A panda eating bamboo in a lush forest, cinematic lighting", default_negative_prompt, 480, 832, 25, 5.0, 4, 15],
["A majestic eagle soaring over snowy mountains", default_negative_prompt, 512, 768, 30, 7.0, 4, 12],
["Timelapse of a flower blooming, vibrant colors", "static, ugly", 384, 640, 40, 6.0, 4, 20],
["Astronaut walking on the moon, Earth in the background, highly detailed", default_negative_prompt, 480, 832, 20, 5.5, 4, 10],
],
inputs=[prompt_input, negative_prompt_input, height_input, width_input, num_frames_input, guidance_scale_input, steps, fps_input],
outputs=video_output,
fn=generate_video,
cache_examples=False
)
if __name__ == "__main__":
demo.queue().launch(share=True, debug=True) |