File size: 5,534 Bytes
ada2691
 
 
 
 
 
 
 
 
 
 
 
 
90d8457
 
 
ada2691
90d8457
 
 
 
 
 
ada2691
90d8457
ada2691
90d8457
 
 
 
 
 
 
 
ada2691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90d8457
 
 
 
 
 
 
 
 
 
ada2691
 
 
 
90d8457
ada2691
90d8457
ada2691
 
 
 
 
 
90d8457
ada2691
 
 
 
 
 
 
90d8457
ada2691
 
 
 
 
 
 
 
 
 
 
90d8457
 
ada2691
 
 
 
 
90d8457
 
ada2691
 
 
90d8457
 
 
ada2691
90d8457
 
 
 
 
ada2691
90d8457
b950a8e
90d8457
 
 
 
 
 
 
 
b950a8e
 
 
90d8457
 
 
 
 
 
 
 
b950a8e
 
 
90d8457
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import gradio as gr
import torch
import os
import uuid
import random
from glob import glob
from pathlib import Path
from typing import Optional
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import load_image, export_to_video
from PIL import Image
from huggingface_hub import hf_hub_download

# ------------------------------------------------------------------------
# FIX: Adapt to the available hardware (GPU or CPU)
# ------------------------------------------------------------------------

# Automatically detect the device and select the appropriate data type.
# This makes the code runnable on machines with or without a dedicated NVIDIA GPU.
device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if device == "cuda" else torch.float32

# Load the pipeline onto the detected device.
pipe = StableVideoDiffusionPipeline.from_pretrained(
    "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch_dtype, variant="fp16"
)
pipe.to(device)

# Apply torch.compile for optimization only if on a GPU, as it's most effective there.
if device == "cuda":
    pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)

# ------------------------------------------------------------------------

max_64_bit_int = 2**63 - 1

# Function to sample video from the input image
def sample(
    image: Image,
    seed: Optional[int] = 42,
    randomize_seed: bool = True,
    motion_bucket_id: int = 127,
    fps_id: int = 6,
    version: str = "svd_xt",
    cond_aug: float = 0.02,
    decoding_t: int = 3,  # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
    output_folder: str = "outputs",
):
    if image.mode == "RGBA":
        image = image.convert("RGB")
    if randomize_seed:
        seed = random.randint(0, max_64_bit_int)
        
    generator = torch.manual_seed(seed)

    os.makedirs(output_folder, exist_ok=True)
    base_count = len(glob(os.path.join(output_folder, "*.mp4")))
    video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
    
    frames = pipe(
        image, 
        decode_chunk_size=decoding_t, 
        generator=generator, 
        motion_bucket_id=motion_bucket_id, 
        noise_aug_strength=0.1, 
        num_frames=25
    ).frames[0]
    
    export_to_video(frames, video_path, fps=fps_id)
    torch.manual_seed(seed)
    return video_path, seed

# Function to resize the uploaded image to the model's optimal input size
def resize_image(image, output_size=(1024, 576)):
    # Resizes and crops the image to a 16:9 aspect ratio.
    target_aspect = output_size[0] / output_size[1]
    image_aspect = image.width / image.height

    if image_aspect > target_aspect:
        new_height = output_size[1]
        new_width = int(new_height * image_aspect)
        resized_image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
        left = (new_width - output_size[0]) / 2
        top = 0
        right = (new_width + output_size[0]) / 2
        bottom = output_size[1]
    else:
        new_width = output_size[0]
        new_height = int(new_width / image_aspect)
        resized_image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
        left = 0
        top = (new_height - output_size[1]) / 2
        right = output_size[0]
        bottom = (new_height + output_size[1]) / 2

    cropped_image = resized_image.crop((left, top, right, bottom))
    return cropped_image

# Dynamically load image files from the 'images' directory
def get_example_images():
    image_dir = "images/"
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    image_files = glob(os.path.join(image_dir, "*.png")) + glob(os.path.join(image_dir, "*.jpg"))
    return image_files

# Gradio interface setup
with gr.Blocks() as demo:
    gr.Markdown('''# Stable Video Diffusion
    #### Generate short videos from a single image.''')

    with gr.Row():
        with gr.Column():
            image = gr.Image(label="Upload Your Image", type="pil")
            generate_btn = gr.Button("Generate Video", variant="primary")
        video = gr.Video(label="Generated Video")

    with gr.Accordion("Advanced Options", open=False):
        seed = gr.Slider(label="Seed", value=42, minimum=0, maximum=max_64_bit_int, step=1)
        randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
        motion_bucket_id = gr.Slider(label="Motion Bucket ID", info="Controls the amount of motion in the video.", value=127, minimum=1, maximum=255)
        fps_id = gr.Slider(label="Frames Per Second (FPS)", info="Adjusts the playback speed of the video.", value=7, minimum=5, maximum=30)

    # When a new image is uploaded, process it immediately
    image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
    
    # When the generate button is clicked, run the sampling function
    generate_btn.click(
        fn=sample, 
        inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], 
        outputs=[video, seed], 
        api_name="video"
    )

    # Dynamically load examples from the filesystem
    example_images = get_example_images()
    if example_images:
        gr.Examples(
            examples=example_images,
            inputs=image,
            outputs=[video, seed],
            fn=lambda img: sample(resize_image(Image.open(img))), # Resize example images before sampling
            cache_examples=True,
        )

if __name__ == "__main__":
    demo.queue(max_size=20)
    demo.launch(share=True)