|
import gradio as gr |
|
import torch |
|
from diffusers import DiffusionPipeline |
|
from diffusers.utils import load_image, export_to_video |
|
import tempfile |
|
import os |
|
|
|
|
|
pipe = None |
|
|
|
def generate_video(image, prompt, seed=42): |
|
global pipe |
|
|
|
|
|
if pipe is None: |
|
pipe = DiffusionPipeline.from_pretrained( |
|
"Wan-AI/Wan2.1-VACE-14B", |
|
torch_dtype=torch.float16 |
|
) |
|
pipe.to("cuda") |
|
|
|
|
|
torch.manual_seed(seed) |
|
|
|
|
|
output = pipe(image=image, prompt=prompt).frames[0] |
|
|
|
|
|
temp_dir = tempfile.mkdtemp() |
|
output_path = os.path.join(temp_dir, "output.mp4") |
|
export_to_video(output, output_path) |
|
|
|
return output_path |
|
|
|
|
|
with gr.Blocks(title="Wan2.1 Video Generation") as demo: |
|
gr.Markdown("# Wan2.1-VACE-14B Video Generation") |
|
gr.Markdown("Generate videos from images and prompts using Wan2.1 model") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
input_image = gr.Image(label="Input Image", type="pil") |
|
prompt = gr.Textbox(label="Prompt", placeholder="Describe the video you want to generate...") |
|
seed = gr.Number(label="Seed", value=42, precision=0) |
|
generate_btn = gr.Button("Generate Video") |
|
|
|
with gr.Column(): |
|
output_video = gr.Video(label="Generated Video") |
|
|
|
|
|
gr.Examples( |
|
examples=[ |
|
[ |
|
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png", |
|
"A man with short gray hair plays a red electric guitar.", |
|
42 |
|
], |
|
[ |
|
"https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/Sunflower_from_Silesia2.jpg/1200px-Sunflower_from_Silesia2.jpg", |
|
"A sunflower slowly blooming in the sunlight", |
|
123 |
|
] |
|
], |
|
inputs=[input_image, prompt, seed], |
|
outputs=output_video, |
|
fn=generate_video, |
|
cache_examples=True |
|
) |
|
|
|
generate_btn.click( |
|
fn=generate_video, |
|
inputs=[input_image, prompt, seed], |
|
outputs=output_video |
|
) |
|
|
|
demo.launch() |