Spaces:
Build error
Build error
| import torch | |
| import gradio as gr | |
| from diffusers import StableVideoDiffusionPipeline | |
| from diffusers.utils import load_image, export_to_video | |
| import spaces | |
| # Check if GPU is available | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| # Load the pipeline | |
| pipeline = StableVideoDiffusionPipeline.from_pretrained( | |
| "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" | |
| ) | |
| pipeline.to(device) | |
| def generate_video(image_path, seed): | |
| # Load and preprocess the image | |
| image = load_image(image_path) | |
| image = image.resize((1024, 576)) | |
| # Set the generator seed | |
| generator = torch.Generator(device=device).manual_seed(seed) | |
| # Generate the video frames | |
| frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0] | |
| # Export the frames to a video file | |
| output_video_path = "generated.mp4" | |
| export_to_video(frames, output_video_path, fps=25) | |
| return output_video_path | |
| # Create the Gradio interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Stable Video Diffusion") | |
| gr.Markdown("Generate a video from an uploaded image using Stable Video Diffusion.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| image_input = gr.Image(type="filepath", label="Upload Image") | |
| seed_input = gr.Number(label="Seed", value=666666) | |
| generate_button = gr.Button("Generate Video") | |
| with gr.Column(): | |
| video_output = gr.Video(label="Generated Video") | |
| with gr.Row(): | |
| example_image = gr.Image("example.jpeg", label="Example Image") | |
| example_video = gr.Video("generated.mp4", label="Example Video") | |
| generate_button.click( | |
| fn=generate_video, | |
| inputs=[image_input, seed_input], | |
| outputs=video_output | |
| ) | |
| # Launch the interface | |
| if __name__ == "__main__": | |
| demo.launch() |