t2a / app.py
ZeeAI1's picture
Update app.py
0441f0b verified
raw
history blame
1.63 kB
import gradio as gr
from diffusers import StableDiffusionPipeline
import os
import cv2
# --- Initialize Stable Diffusion pipeline ---
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1")
FRAME_FOLDER = "frames"
VIDEO_OUTPUT = "generated_video.mp4"
# --- Function to generate N frames using AI ---
def generate_frames(prompt, num_frames=10):
os.makedirs(FRAME_FOLDER, exist_ok=True)
for i in range(num_frames):
image = pipe(prompt).images[0]
image.save(f"{FRAME_FOLDER}/frame_{i:03d}.png")
# --- Function to create video from frames ---
def create_video_from_frames(frame_folder, output_path, fps=2):
images = sorted([img for img in os.listdir(frame_folder) if img.endswith(".png")])
if not images:
raise ValueError("No frames generated.")
frame = cv2.imread(os.path.join(frame_folder, images[0]))
height, width, _ = frame.shape
video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
for img in images:
video.write(cv2.imread(os.path.join(frame_folder, img)))
video.release()
# --- Main function called by Gradio ---
def generate_video(prompt):
generate_frames(prompt, num_frames=10) # Generate 10 frames
create_video_from_frames(FRAME_FOLDER, VIDEO_OUTPUT, fps=2) # 2 fps
return VIDEO_OUTPUT
# --- Gradio UI ---
iface = gr.Interface(
fn=generate_video,
inputs=gr.Textbox(lines=3, placeholder="Describe your scene here..."),
outputs=gr.Video(),
title="AI Text-to-Video Generator (No manual assets needed)"
)
if __name__ == "__main__":
iface.launch()