Update app.py
Browse files
app.py
CHANGED
@@ -1,59 +1,49 @@
|
|
1 |
import gradio as gr
|
2 |
-
import subprocess
|
3 |
-
import os
|
4 |
import random
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
def generate_video(prompt, negative_prompt, image, frames, fps):
|
12 |
-
# Save uploaded image
|
13 |
-
image_path = "/tmp/uploaded_image.png"
|
14 |
-
image.save(image_path)
|
15 |
-
|
16 |
-
output_path = f"/tmp/generated_{random.randint(1000,9999)}.mp4"
|
17 |
-
|
18 |
-
# Example: calling your WAN I2V pipeline (replace with actual command)
|
19 |
-
command = [
|
20 |
-
"python3", WAN_SCRIPT,
|
21 |
-
"--prompt", prompt,
|
22 |
-
"--negative_prompt", negative_prompt,
|
23 |
-
"--image", image_path,
|
24 |
-
"--frames", str(frames),
|
25 |
-
"--fps", str(fps),
|
26 |
-
"--output", output_path
|
27 |
-
]
|
28 |
-
|
29 |
-
print("Running command:", " ".join(command))
|
30 |
-
subprocess.run(command)
|
31 |
-
|
32 |
-
print("Generated:", output_path)
|
33 |
-
return output_path
|
34 |
-
|
35 |
-
# Gradio UI
|
36 |
-
with gr.Blocks() as demo:
|
37 |
-
gr.Markdown("## 🎬 WAN 2.1 I2V Video Generator\nUpload an image, enter a prompt — generate animated video!")
|
38 |
|
|
|
39 |
with gr.Row():
|
40 |
with gr.Column():
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
with gr.Column():
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
)
|
56 |
|
57 |
-
|
58 |
-
if __name__ == "__main__":
|
59 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
import random
|
3 |
|
4 |
+
with gr.Blocks(fill_height=True) as demo:
|
5 |
+
with gr.Sidebar():
|
6 |
+
gr.Markdown("# Inference Provider")
|
7 |
+
gr.Markdown("This Space showcases the Wan-AI/Wan2.1-T2V-1.3B model, served by the fal-ai API. Sign in with your Hugging Face account to use this API.")
|
8 |
+
button = gr.LoginButton("Sign in")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
gr.Markdown("## 🎬 Generate Video from Text")
|
11 |
with gr.Row():
|
12 |
with gr.Column():
|
13 |
+
text_input = gr.Textbox(label="Prompt", placeholder="Enter your prompt to generate video...")
|
14 |
+
fps_slider = gr.Slider(1, 24, value=8, label="FPS")
|
15 |
+
frames_slider = gr.Slider(8, 48, value=24, label="Number of Frames")
|
16 |
+
aspect_ratio = gr.Dropdown(choices=["square", "landscape", "portrait"], value="square", label="Aspect Ratio")
|
17 |
+
guidance_scale = gr.Slider(1, 20, value=15, label="Guidance Scale")
|
18 |
+
seed_input = gr.Number(value=random.randint(1, 99999), label="Seed")
|
19 |
+
generate_button = gr.Button("Generate Video")
|
20 |
with gr.Column():
|
21 |
+
video_output = gr.Video(label="Generated Video")
|
22 |
+
|
23 |
+
# Load model
|
24 |
+
model = gr.load(
|
25 |
+
"models/Wan-AI/Wan2.1-T2V-1.3B",
|
26 |
+
inputs=["prompt", "num_frames", "fps", "aspect_ratio", "guidance_scale", "negative_prompt", "seed"],
|
27 |
+
outputs=video_output,
|
28 |
+
accept_token=button,
|
29 |
+
provider="fal-ai"
|
30 |
+
)
|
31 |
|
32 |
+
def generate_video_fn(prompt, fps, num_frames, aspect_ratio, guidance_scale, seed):
|
33 |
+
return model(
|
34 |
+
prompt=prompt,
|
35 |
+
num_frames=num_frames,
|
36 |
+
fps=fps,
|
37 |
+
aspect_ratio=aspect_ratio,
|
38 |
+
guidance_scale=guidance_scale,
|
39 |
+
negative_prompt="",
|
40 |
+
seed=int(seed)
|
41 |
+
)
|
42 |
+
|
43 |
+
generate_button.click(
|
44 |
+
fn=generate_video_fn,
|
45 |
+
inputs=[text_input, fps_slider, frames_slider, aspect_ratio, guidance_scale, seed_input],
|
46 |
+
outputs=video_output
|
47 |
)
|
48 |
|
49 |
+
demo.launch()
|
|
|
|