|
import gradio as gr |
|
from diffusers import StableDiffusionPipeline |
|
import os |
|
import cv2 |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1") |
|
|
|
FRAME_FOLDER = "frames" |
|
VIDEO_OUTPUT = "generated_video.mp4" |
|
|
|
|
|
def generate_frames(prompt, num_frames=10): |
|
os.makedirs(FRAME_FOLDER, exist_ok=True) |
|
for i in range(num_frames): |
|
image = pipe(prompt).images[0] |
|
image.save(f"{FRAME_FOLDER}/frame_{i:03d}.png") |
|
|
|
|
|
def create_video_from_frames(frame_folder, output_path, fps=2): |
|
images = sorted([img for img in os.listdir(frame_folder) if img.endswith(".png")]) |
|
if not images: |
|
raise ValueError("No frames generated.") |
|
frame = cv2.imread(os.path.join(frame_folder, images[0])) |
|
height, width, _ = frame.shape |
|
|
|
video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) |
|
for img in images: |
|
video.write(cv2.imread(os.path.join(frame_folder, img))) |
|
video.release() |
|
|
|
|
|
def generate_video(prompt): |
|
generate_frames(prompt, num_frames=10) |
|
create_video_from_frames(FRAME_FOLDER, VIDEO_OUTPUT, fps=2) |
|
return VIDEO_OUTPUT |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_video, |
|
inputs=gr.Textbox(lines=3, placeholder="Describe your scene here..."), |
|
outputs=gr.Video(), |
|
title="AI Text-to-Video Generator (No manual assets needed)" |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |