Spaces:
Runtime error
Runtime error
Update app_lora.py
Browse files- app_lora.py +2 -2
app_lora.py
CHANGED
@@ -16,8 +16,8 @@ MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
|
|
16 |
LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
|
17 |
LORA_FILENAME = "FusionX_LoRa/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
|
18 |
|
19 |
-
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.
|
20 |
-
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.
|
21 |
pipe = WanImageToVideoPipeline.from_pretrained(
|
22 |
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
|
23 |
)
|
|
|
16 |
LORA_REPO_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
|
17 |
LORA_FILENAME = "FusionX_LoRa/Wan2.1_I2V_14B_FusionX_LoRA.safetensors"
|
18 |
|
19 |
+
image_encoder = CLIPVisionModel.from_pretrained(MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32)
|
20 |
+
vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
|
21 |
pipe = WanImageToVideoPipeline.from_pretrained(
|
22 |
MODEL_ID, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
|
23 |
)
|