rahul7star commited on
Commit
4ca75ef
·
verified ·
1 Parent(s): 0127bea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -1
app.py CHANGED
@@ -35,14 +35,41 @@ pipe = WanImageToVideoPipeline.from_pretrained(
35
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
36
  pipe.to("cuda")
37
 
 
 
 
 
38
  # Load FusionX enhancement LoRAs
39
  lora_adapters = []
40
  lora_weights = []
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  # Download LoRA file
43
  lora_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
44
 
45
- # Load and fuse it — use any adapter name (e.g., "default", "main", etc.)
 
 
 
 
 
 
 
 
 
46
  try:
47
  pipe.load_lora_weights(lora_path, adapter_name="main")
48
  pipe.set_adapters(["main"], adapter_weights=[1.0])
 
35
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=8.0)
36
  pipe.to("cuda")
37
 
38
+ # Load FusionX enhancement LoRAs
39
+ from huggingface_hub import hf_hub_download
40
+ import torch
41
+
42
  # Load FusionX enhancement LoRAs
43
  lora_adapters = []
44
  lora_weights = []
45
 
46
+ # Helper to print model keys
47
+ def print_model_keys(model, show_values=False):
48
+ print("\n🔑 Model State Dict Keys:")
49
+ for k, v in model.state_dict().items():
50
+ if show_values:
51
+ print(f"{k}: {v.shape}")
52
+ else:
53
+ print(k)
54
+
55
+ # Print keys in UNet, Text Encoder, VAE
56
+ print_model_keys(pipe.unet, show_values=True)
57
+ print_model_keys(pipe.text_encoder, show_values=True)
58
+ print_model_keys(pipe.vae, show_values=True)
59
+
60
  # Download LoRA file
61
  lora_path = hf_hub_download(repo_id=LORA_REPO_ID, filename=LORA_FILENAME)
62
 
63
+ # Print keys from LoRA checkpoint
64
+ print("\n📦 LoRA Checkpoint Keys:")
65
+ try:
66
+ lora_checkpoint = torch.load(lora_path, map_location="cpu")
67
+ for k, v in lora_checkpoint.items():
68
+ print(f"{k}: {v.shape}")
69
+ except Exception as e:
70
+ print(f"❌ Failed to load LoRA file for key inspection: {e}")
71
+
72
+ # Load and fuse LoRA
73
  try:
74
  pipe.load_lora_weights(lora_path, adapter_name="main")
75
  pipe.set_adapters(["main"], adapter_weights=[1.0])