rahul7star commited on
Commit
72bb863
·
verified ·
1 Parent(s): ac31bd9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -19
app.py CHANGED
@@ -19,33 +19,42 @@ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512'
19
  os.environ['HF_HUB_CACHE'] = '/tmp/hub' # Use temp directory to avoid filling persistent storage
20
 
21
 
22
- MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-720P-Diffusers"
23
-
24
-
25
- #LORA_REPO_ID = "Kijai/WanVideo_comfy"
26
- #LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors"
27
-
28
-
29
 
30
- LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
 
31
  LORA_FILENAME = "FusionX_LoRa/Wan2.1_T2V_14B_FusionX_LoRA.safetensors"
32
 
 
33
 
34
- #LORA_REPO_ID = "romanfratric234/WAN_2-1_Furry_nsfw_14b_img2vid_and_nsfw_motion_in_general"
35
- #LORA_FILENAME = "furry_nsfw_1.1_e22.safetensors"
36
-
37
- image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
38
- vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
39
- pipe = WanImageToVideoPipeline.from_pretrained(
40
- MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
41
  )
 
 
42
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
43
  pipe.to("cuda")
44
 
45
- causvid_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
46
- pipe.load_lora_weights(causvid_path, adapter_name="causvid_lora")
47
- pipe.set_adapters(["causvid_lora"], adapter_weights=[0.95])
48
- pipe.fuse_lora()
 
 
 
 
 
 
 
 
 
 
49
 
50
  MOD_VALUE = 32
51
  DEFAULT_H_SLIDER_VALUE = 512
 
19
  os.environ['HF_HUB_CACHE'] = '/tmp/hub' # Use temp directory to avoid filling persistent storage
20
 
21
 
22
+ from diffusers import UniPCMultistepScheduler
23
+ from transformers import CLIPTextModel, CLIPTokenizer
24
+ from wan_diffusers import WanTextToVideoPipeline # Use correct import for Wan's T2V pipeline
25
+ from safetensors.torch import load_file
26
+ from huggingface_hub import hf_hub_download
27
+ import torch
 
28
 
29
+ # --- Base model setup (Wan T2V) ---
30
+ MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
31
  LORA_FILENAME = "FusionX_LoRa/Wan2.1_T2V_14B_FusionX_LoRA.safetensors"
32
 
33
+ print("🚀 Loading Wan2.1 T2V base pipeline...")
34
 
35
+ pipe = WanTextToVideoPipeline.from_pretrained(
36
+ MODEL_ID,
37
+ torch_dtype=torch.bfloat16,
 
 
 
 
38
  )
39
+
40
+ # Optional: replace scheduler for more stable generation
41
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
42
  pipe.to("cuda")
43
 
44
+ # --- Load FusionX-style LoRA ---
45
+ print("🔧 Loading FusionX LoRA...")
46
+ try:
47
+ lora_path = hf_hub_download(repo_id="FusionX_LoRa", filename=LORA_FILENAME)
48
+ pipe.load_lora_weights(lora_path, adapter_name="fusionx_lora")
49
+ pipe.set_adapters(["fusionx_lora"], adapter_weights=[1.0])
50
+ pipe.fuse_lora()
51
+ print("✅ FusionX LoRA applied (strength: 1.0)")
52
+ except Exception as e:
53
+ print(f"⚠️ Failed to load FusionX LoRA: {e}")
54
+
55
+ # --- Ready to generate ---
56
+ print("✨ T2V model ready for text-to-video generation!")
57
+
58
 
59
  MOD_VALUE = 32
60
  DEFAULT_H_SLIDER_VALUE = 512