Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -19,33 +19,42 @@ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512'
|
|
19 |
os.environ['HF_HUB_CACHE'] = '/tmp/hub' # Use temp directory to avoid filling persistent storage
|
20 |
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
|
30 |
-
|
|
|
31 |
LORA_FILENAME = "FusionX_LoRa/Wan2.1_T2V_14B_FusionX_LoRA.safetensors"
|
32 |
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
|
38 |
-
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
|
39 |
-
pipe = WanImageToVideoPipeline.from_pretrained(
|
40 |
-
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
|
41 |
)
|
|
|
|
|
42 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
|
43 |
pipe.to("cuda")
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
MOD_VALUE = 32
|
51 |
DEFAULT_H_SLIDER_VALUE = 512
|
|
|
19 |
os.environ['HF_HUB_CACHE'] = '/tmp/hub' # Use temp directory to avoid filling persistent storage
|
20 |
|
21 |
|
22 |
+
from diffusers import UniPCMultistepScheduler
|
23 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
24 |
+
from wan_diffusers import WanTextToVideoPipeline # Use correct import for Wan's T2V pipeline
|
25 |
+
from safetensors.torch import load_file
|
26 |
+
from huggingface_hub import hf_hub_download
|
27 |
+
import torch
|
|
|
28 |
|
29 |
+
# --- Base model setup (Wan T2V) ---
|
30 |
+
MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
|
31 |
LORA_FILENAME = "FusionX_LoRa/Wan2.1_T2V_14B_FusionX_LoRA.safetensors"
|
32 |
|
33 |
+
print("🚀 Loading Wan2.1 T2V base pipeline...")
|
34 |
|
35 |
+
pipe = WanTextToVideoPipeline.from_pretrained(
|
36 |
+
MODEL_ID,
|
37 |
+
torch_dtype=torch.bfloat16,
|
|
|
|
|
|
|
|
|
38 |
)
|
39 |
+
|
40 |
+
# Optional: replace scheduler for more stable generation
|
41 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
|
42 |
pipe.to("cuda")
|
43 |
|
44 |
+
# --- Load FusionX-style LoRA ---
|
45 |
+
print("🔧 Loading FusionX LoRA...")
|
46 |
+
try:
|
47 |
+
lora_path = hf_hub_download(repo_id="FusionX_LoRa", filename=LORA_FILENAME)
|
48 |
+
pipe.load_lora_weights(lora_path, adapter_name="fusionx_lora")
|
49 |
+
pipe.set_adapters(["fusionx_lora"], adapter_weights=[1.0])
|
50 |
+
pipe.fuse_lora()
|
51 |
+
print("✅ FusionX LoRA applied (strength: 1.0)")
|
52 |
+
except Exception as e:
|
53 |
+
print(f"⚠️ Failed to load FusionX LoRA: {e}")
|
54 |
+
|
55 |
+
# --- Ready to generate ---
|
56 |
+
print("✨ T2V model ready for text-to-video generation!")
|
57 |
+
|
58 |
|
59 |
MOD_VALUE = 32
|
60 |
DEFAULT_H_SLIDER_VALUE = 512
|