Spaces:
Running
on
Zero
Running
on
Zero
import torch | |
from diffusers import UniPCMultistepScheduler | |
from diffusers import WanPipeline, AutoencoderKLWan | |
from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe | |
from huggingface_hub import hf_hub_download | |
from PIL import Image | |
import numpy as np | |
import gradio as gr | |
import spaces | |
import gc | |
# --- INITIAL SETUP --- | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
print(f"Using device: {device}") | |
model_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers" | |
print("Loading VAE...") | |
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) | |
print("Loading WanPipeline in bfloat16...") | |
# This will use ZeroGPU/accelerate with meta devices | |
pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16) | |
flow_shift = 1.0 | |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift) | |
# Move the base pipeline to the GPU. ZeroGPU will manage this. | |
print("Moving pipeline to device (ZeroGPU will handle offloading)...") | |
pipe.to(device) | |
# --- LORA SETUP --- | |
CAUSVID_LORA_REPO = "Kijai/WanVideo_comfy" | |
CAUSVID_LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors" | |
BASE_LORA_NAME = "causvid_lora" | |
CUSTOM_LORA_NAME = "custom_lora" | |
print("Downloading base LoRA...") | |
try: | |
causvid_path = hf_hub_download(repo_id=CAUSVID_LORA_REPO, filename=CAUSVID_LORA_FILENAME) | |
print("✅ Base LoRA downloaded.") | |
except Exception as e: | |
causvid_path = None | |
print(f"⚠️ Could not download base LoRA: {e}") | |
print("Initialization complete. Gradio is starting...") | |
def generate(prompt, negative_prompt, width=1024, height=1024, num_inference_steps=30, lora_id=None, progress=gr.Progress(track_tqdm=True)): | |
# --- DYNAMIC LORA MANAGEMENT FOR EACH RUN --- | |
active_adapters = [] | |
adapter_weights = [] | |
# 1. Load the Base LoRA directly onto the correct device | |
if causvid_path: | |
try: | |
print(f"Loading base LoRA '{BASE_LORA_NAME}'...") | |
pipe.load_lora_weights(causvid_path, adapter_name=BASE_LORA_NAME, device_map={"":device}) | |
active_adapters.append(BASE_LORA_NAME) | |
adapter_weights.append(1.0) | |
print("✅ Base LoRA loaded to device.") | |
except Exception as e: | |
print(f"⚠️ Failed to load base LoRA: {e}") | |
# 2. Load the Custom LoRA if provided, also directly to the device | |
clean_lora_id = lora_id.strip() if lora_id else "" | |
if clean_lora_id: | |
try: | |
print(f"Loading custom LoRA '{CUSTOM_LORA_NAME}' from '{clean_lora_id}'...") | |
pipe.load_lora_weights(clean_lora_id, adapter_name=CUSTOM_LORA_NAME, device_map={"":device}) | |
active_adapters.append(CUSTOM_LORA_NAME) | |
adapter_weights.append(1.0) | |
print("✅ Custom LoRA loaded to device.") | |
except Exception as e: | |
print(f"⚠️ Failed to load custom LoRA '{clean_lora_id}': {e}") | |
if CUSTOM_LORA_NAME in getattr(pipe.transformer, 'peft_config', {}): | |
del pipe.transformer.peft_config[CUSTOM_LORA_NAME] | |
# 3. Activate the successfully loaded adapters | |
if active_adapters: | |
print(f"Activating adapters: {active_adapters} with weights: {adapter_weights}") | |
pipe.set_adapters(active_adapters, adapter_weights) | |
pipe.transformer.to(device) # Explicitly move transformer to GPU after setting adapters | |
else: | |
# Ensure LoRA is disabled if no adapters were loaded | |
pipe.disable_lora() | |
apply_cache_on_pipe(pipe) | |
try: | |
output = pipe( | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
height=height, | |
width=width, | |
num_frames=1, | |
num_inference_steps=num_inference_steps, | |
guidance_scale=1.0, | |
) | |
image = output.frames[0][0] | |
image = (image * 255).astype(np.uint8) | |
return Image.fromarray(image) | |
finally: | |
# --- PROPER CLEANUP --- | |
print("Unloading all LoRAs to ensure a clean state...") | |
pipe.unload_lora_weights() | |
gc.collect() # Force garbage collection | |
torch.cuda.empty_cache() # Clear CUDA cache | |
print("✅ LoRAs unloaded and memory cleaned.") | |
iface = gr.Interface( | |
fn=generate, | |
inputs=[ | |
gr.Textbox(label="Input prompt"), | |
gr.Textbox(label="Negative prompt", value="Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"), | |
gr.Slider(label="Width", minimum=480, maximum=1280, step=16, value=1024), | |
gr.Slider(label="Height", minimum=480, maximum=1280, step=16, value=1024), | |
gr.Slider(minimum=1, maximum=80, step=1, label="Inference Steps", value=10), | |
gr.Textbox(label="LoRA ID (Optional, loads dynamically)"), | |
], | |
outputs=gr.Image(label="output"), | |
) | |
iface.launch() |