Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,8 @@ import gradio as gr
|
|
9 |
import torch
|
10 |
# Determine device: use GPU if available, otherwise CPU
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
|
12 |
import tempfile
|
13 |
from diffusers import StableVideoDiffusionPipeline
|
14 |
from diffusers.utils import export_to_video
|
@@ -16,9 +18,9 @@ from diffusers.utils import export_to_video
|
|
16 |
# Use the official SVD-XT img2vid-xt model
|
17 |
MODEL = "stabilityai/stable-video-diffusion-img2vid-xt"
|
18 |
|
19 |
-
# Load pipeline in
|
20 |
pipe = StableVideoDiffusionPipeline.from_pretrained(
|
21 |
-
MODEL, torch_dtype=
|
22 |
).to(device)
|
23 |
|
24 |
def infer(first_image, last_image, prompt, guidance=7.5, frames=25):
|
@@ -50,4 +52,4 @@ demo = gr.Interface(
|
|
50 |
)
|
51 |
|
52 |
# Enable the REST API
|
53 |
-
demo.queue(
|
|
|
9 |
import torch
|
10 |
# Determine device: use GPU if available, otherwise CPU
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
# Choose dtype based on device
|
13 |
+
dtype = torch.float16 if device.type == "cuda" else torch.float32
|
14 |
import tempfile
|
15 |
from diffusers import StableVideoDiffusionPipeline
|
16 |
from diffusers.utils import export_to_video
|
|
|
18 |
# Use the official SVD-XT img2vid-xt model
|
19 |
MODEL = "stabilityai/stable-video-diffusion-img2vid-xt"
|
20 |
|
21 |
+
# Load pipeline in appropriate precision on GPU or CPU
|
22 |
pipe = StableVideoDiffusionPipeline.from_pretrained(
|
23 |
+
MODEL, torch_dtype=dtype
|
24 |
).to(device)
|
25 |
|
26 |
def infer(first_image, last_image, prompt, guidance=7.5, frames=25):
|
|
|
52 |
)
|
53 |
|
54 |
# Enable the REST API
|
55 |
+
demo.queue(default_concurrency_limit=1).launch(show_api=True)
|