Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,17 @@
|
|
1 |
-
import torch
|
2 |
-
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler
|
3 |
-
from diffusers.utils import export_to_video
|
4 |
-
from transformers import CLIPVisionModel
|
5 |
import gradio as gr
|
6 |
import tempfile
|
7 |
-
import spaces
|
8 |
from huggingface_hub import hf_hub_download
|
9 |
import numpy as np
|
10 |
from PIL import Image
|
11 |
import random
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
|
14 |
LORA_REPO_ID = "Kijai/WanVideo_comfy"
|
15 |
LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors"
|
|
|
1 |
+
import spaces # MUST be imported FIRST before any torch/CUDA imports
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
import tempfile
|
|
|
4 |
from huggingface_hub import hf_hub_download
|
5 |
import numpy as np
|
6 |
from PIL import Image
|
7 |
import random
|
8 |
|
9 |
+
# Now import torch and related modules AFTER spaces
|
10 |
+
import torch
|
11 |
+
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline, UniPCMultistepScheduler
|
12 |
+
from diffusers.utils import export_to_video
|
13 |
+
from transformers import CLIPVisionModel
|
14 |
+
|
15 |
MODEL_ID = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
|
16 |
LORA_REPO_ID = "Kijai/WanVideo_comfy"
|
17 |
LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors"
|