File size: 2,405 Bytes
3ffd377 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import gradio as gr
import torch
from diffusers import DiffusionPipeline
from diffusers.utils import load_image, export_to_video
import tempfile
import os
# Load the model (we'll initialize it when first used to save resources)
pipe = None
def generate_video(image, prompt, seed=42):
global pipe
# Initialize the model if not already loaded
if pipe is None:
pipe = DiffusionPipeline.from_pretrained(
"Wan-AI/Wan2.1-VACE-14B",
torch_dtype=torch.float16
)
pipe.to("cuda")
# Set the seed for reproducibility
torch.manual_seed(seed)
# Generate the video frames
output = pipe(image=image, prompt=prompt).frames[0]
# Save to temporary file
temp_dir = tempfile.mkdtemp()
output_path = os.path.join(temp_dir, "output.mp4")
export_to_video(output, output_path)
return output_path
# Create Gradio interface
with gr.Blocks(title="Wan2.1 Video Generation") as demo:
gr.Markdown("# Wan2.1-VACE-14B Video Generation")
gr.Markdown("Generate videos from images and prompts using Wan2.1 model")
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Input Image", type="pil")
prompt = gr.Textbox(label="Prompt", placeholder="Describe the video you want to generate...")
seed = gr.Number(label="Seed", value=42, precision=0)
generate_btn = gr.Button("Generate Video")
with gr.Column():
output_video = gr.Video(label="Generated Video")
# Example inputs
gr.Examples(
examples=[
[
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png",
"A man with short gray hair plays a red electric guitar.",
42
],
[
"https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/Sunflower_from_Silesia2.jpg/1200px-Sunflower_from_Silesia2.jpg",
"A sunflower slowly blooming in the sunlight",
123
]
],
inputs=[input_image, prompt, seed],
outputs=output_video,
fn=generate_video,
cache_examples=True
)
generate_btn.click(
fn=generate_video,
inputs=[input_image, prompt, seed],
outputs=output_video
)
demo.launch() |