import gradio as gr import torch from diffusers import StableDiffusionPipeline import moviepy.editor as mp import os from pathlib import Path def generate_image(prompt): try: print("Generating image...") model_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32) pipe.to("cpu") # Generate image image = pipe(prompt).images[0] # Create output directory if it doesn't exist output_dir = Path("outputs") output_dir.mkdir(exist_ok=True) # Save image with timestamp to avoid conflicts image_path = output_dir / "generated_image.png" image.save(image_path) print(f"Image saved at {image_path}") return str(image_path) except Exception as e: print(f"Error generating image: {str(e)}") return None def create_video(image_path, duration=5): try: if not image_path or not os.path.exists(image_path): return None print("Creating video...") # Create video path in outputs directory output_dir = Path("outputs") video_path = output_dir / "output_video.mp4" # Create video clip clip = mp.ImageClip(image_path, duration=duration) clip = clip.set_fps(24) clip.write_videofile(str(video_path), codec='libx264') clip.close() print(f"Video saved at {video_path}") return str(video_path) except Exception as e: print(f"Error creating video: {str(e)}") return None def text_to_video(prompt): # Generate image image_path = generate_image(prompt) if not image_path: return "❌ Error generating image. Please try again with a different prompt." # Create video video_path = create_video(image_path) # Clean up image file try: if image_path and os.path.exists(image_path): os.remove(image_path) except Exception as e: print(f"Warning: Could not delete image file: {e}") if not video_path: return "❌ Error creating video. Please try again." return video_path # Create outputs directory if it doesn't exist Path("outputs").mkdir(exist_ok=True) # Create the Gradio interface with gr.Blocks() as demo: gr.Markdown(""" # Text to AI Video Generator Create a video from text using Stable Diffusion and MoviePy """) with gr.Row(): with gr.Column(): prompt_input = gr.Textbox( label="Enter your prompt", placeholder="Describe the image you want to generate...", lines=3 ) generate_btn = gr.Button("Generate Video", variant="primary") with gr.Column(): output_video = gr.Video( label="Generated Video", format="mp4", interactive=False ) status = gr.Textbox(label="Status", interactive=False) # Examples gr.Examples( examples=[ "A beautiful sunset over mountains", "An astronaut riding a horse on Mars", "A futuristic city with flying cars" ], inputs=prompt_input ) # Button click handler generate_btn.click( fn=text_to_video, inputs=prompt_input, outputs=[output_video, status], api_name="generate" ) # Launch the app if __name__ == "__main__": demo.launch( server_name="0.0.0.0", server_port=7860, share=True, debug=True )