Seedance-Free / app.py
ginipick's picture
Update app.py
3558e0d verified
raw
history blame
15.4 kB
#!/usr/bin/env python3
"""
AI Video Generator with Gradio
Single file application - app.py
"""
import os
import gradio as gr
import replicate
import base64
from PIL import Image
import io
import requests
from datetime import datetime
import tempfile
import time
# API token setup
api_token = os.getenv("RAPI_TOKEN")
if api_token:
os.environ["REPLICATE_API_TOKEN"] = api_token
# Aspect ratio options
ASPECT_RATIOS = {
"16:9": "16:9 (YouTube, Standard Video)",
"4:3": "4:3 (Traditional TV Format)",
"1:1": "1:1 (Instagram Feed)",
"3:4": "3:4 (Instagram Portrait)",
"9:16": "9:16 (Instagram Reels, TikTok)",
"21:9": "21:9 (Cinematic Wide)",
"9:21": "9:21 (Ultra Vertical)"
}
# Default prompts
DEFAULT_TEXT_PROMPT = ""
DEFAULT_IMAGE_PROMPT = "Generate a video with smooth and natural movement. Objects should have visible motion while maintaining fluid transitions."
def update_prompt_placeholder(mode):
"""Update prompt placeholder based on mode"""
if mode == "Text to Video":
return gr.update(
placeholder="Describe the video you want to create.\nExample: The sun rises slowly between tall buildings. [Ground-level follow shot] Bicycle tires roll over a dew-covered street at dawn.",
value=""
)
else:
return gr.update(
placeholder="Describe how the image should move.\nExample: Camera slowly zooms in while clouds move across the sky. The subject's hair gently moves in the wind.",
value=DEFAULT_IMAGE_PROMPT
)
def update_image_input(mode):
"""Show/hide image input based on mode"""
if mode == "Image to Video":
return gr.update(visible=True)
else:
return gr.update(visible=False)
def wait_for_model_with_retry(replicate_client, model_name, max_retries=5, initial_wait=10):
"""Wait for model to be ready with retry logic"""
for attempt in range(max_retries):
try:
# Try to get model info
model = replicate_client.models.get(model_name)
return True
except Exception as e:
if attempt < max_retries - 1:
wait_time = initial_wait * (attempt + 1)
print(f"Model not ready, waiting {wait_time} seconds... (Attempt {attempt + 1}/{max_retries})")
time.sleep(wait_time)
else:
return False
return False
def generate_video(mode, prompt, image, aspect_ratio, seed, api_key_input, progress=gr.Progress()):
"""Main video generation function"""
# API token check
token = api_key_input or api_token
if not token:
return None, "❌ API token required. Please set RAPI_TOKEN environment variable or enter your API key."
os.environ["REPLICATE_API_TOKEN"] = token
# Input validation
if not prompt:
return None, "❌ Please enter a prompt."
if mode == "Image to Video" and image is None:
return None, "❌ Please upload an image."
try:
progress(0, desc="Preparing video generation...")
# Input parameters setup
input_params = {
"prompt": prompt,
"duration": 5,
"resolution": "480p",
"aspect_ratio": aspect_ratio,
"seed": seed
}
# Image to video mode
if mode == "Image to Video" and image is not None:
progress(0.1, desc="Processing image...")
# Convert PIL Image to base64
if isinstance(image, str): # File path
with Image.open(image) as img:
buffered = io.BytesIO()
img.save(buffered, format="PNG")
image_base64 = base64.b64encode(buffered.getvalue()).decode()
else: # PIL Image object
buffered = io.BytesIO()
image.save(buffered, format="PNG")
image_base64 = base64.b64encode(buffered.getvalue()).decode()
input_params["image"] = f"data:image/png;base64,{image_base64}"
progress(0.2, desc="Checking model availability...")
# Create Replicate client with extended timeout
client = replicate.Client(
api_token=token,
timeout=300 # 5 minutes timeout
)
# Wait for model to be ready
model_ready = wait_for_model_with_retry(client, "bytedance/seedance-1-lite")
if not model_ready:
return None, "⏳ Model is still booting up. Please try again in a few minutes."
progress(0.3, desc="Calling Replicate API...")
# Run Replicate with retry logic
max_attempts = 3
for attempt in range(max_attempts):
try:
# Use prediction API for better control
prediction = client.predictions.create(
version="bytedance/seedance-1-lite:latest",
input=input_params,
webhook_completed=None
)
# Poll for completion with extended timeout
start_time = time.time()
timeout_seconds = 300 # 5 minutes
while prediction.status not in ["succeeded", "failed", "canceled"]:
if time.time() - start_time > timeout_seconds:
return None, "⏱️ Generation timed out. The server might be under heavy load. Please try again."
time.sleep(2) # Poll every 2 seconds
prediction.reload()
# Update progress
elapsed = time.time() - start_time
progress_val = min(0.3 + (elapsed / timeout_seconds) * 0.4, 0.7)
progress(progress_val, desc=f"Generating video... ({int(elapsed)}s)")
if prediction.status == "failed":
error_msg = getattr(prediction, 'error', 'Unknown error')
if "cold boot" in str(error_msg).lower() or "starting" in str(error_msg).lower():
if attempt < max_attempts - 1:
progress(0.3, desc=f"Model is starting up, retrying... (Attempt {attempt + 2}/{max_attempts})")
time.sleep(30) # Wait 30 seconds before retry
continue
return None, f"❌ Generation failed: {error_msg}"
elif prediction.status == "canceled":
return None, "❌ Generation was canceled."
# Success - get output
output = prediction.output
break
except replicate.exceptions.ReplicateError as e:
if "timeout" in str(e).lower() and attempt < max_attempts - 1:
progress(0.3, desc=f"Timeout occurred, retrying... (Attempt {attempt + 2}/{max_attempts})")
time.sleep(10)
continue
else:
return None, f"❌ Replicate API error: {str(e)}"
except Exception as e:
if attempt < max_attempts - 1:
progress(0.3, desc=f"Error occurred, retrying... (Attempt {attempt + 2}/{max_attempts})")
time.sleep(5)
continue
else:
return None, f"❌ Unexpected error: {str(e)}"
progress(0.7, desc="Downloading video...")
# Get video data
if hasattr(output, 'read'):
video_data = output.read()
else:
# Download from URL with timeout
response = requests.get(output, timeout=60)
video_data = response.content
# Save to temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmp_file:
tmp_file.write(video_data)
video_path = tmp_file.name
# Also save as output.mp4
with open("output.mp4", "wb") as file:
file.write(video_data)
progress(1.0, desc="Complete!")
# Generation info
info = f"""βœ… Video generated successfully!
πŸ“Š Generation Info:
- Mode: {mode}
- Aspect Ratio: {aspect_ratio}
- Seed: {seed}
- Duration: 5 seconds
- Resolution: 480p
- File: output.mp4"""
return video_path, info
except requests.exceptions.Timeout:
return None, "⏱️ Request timed out. The server might be under heavy load. Please try again in a few minutes."
except Exception as e:
error_msg = f"❌ Error occurred: {str(e)}"
if "timeout" in str(e).lower():
error_msg += "\n\nπŸ’‘ Tip: The model might be cold starting. Please wait a minute and try again."
return None, error_msg
# Gradio interface
with gr.Blocks(title="Bytedance Seedance Video Free", theme=gr.themes.Soft()) as app:
gr.Markdown("""
# 🎬 Bytedance Seedance Video' Free
Generate videos from text or images using **Replicate API**.
[![Powered by Ginigen](https://img.shields.io/badge/Powered%20by-Replicate-blue)](https://ginigen.com/)
""")
with gr.Row():
with gr.Column(scale=1):
# API Settings
with gr.Accordion("βš™οΈ API Settings", open=not bool(api_token)):
if api_token:
gr.Markdown("βœ… API token loaded from environment variable.")
api_key_input = gr.Textbox(
label="Replicate API Token (Optional)",
type="password",
placeholder="Enter to override environment variable",
value=""
)
else:
gr.Markdown("⚠️ RAPI_TOKEN environment variable not set.")
api_key_input = gr.Textbox(
label="Replicate API Token (Required)",
type="password",
placeholder="Enter your Replicate API token",
value=""
)
# Generation mode
mode = gr.Radio(
label="🎯 Generation Mode",
choices=["Text to Video", "Image to Video"],
value="Text to Video"
)
# Image upload
image_input = gr.Image(
label="πŸ“· Upload Image",
type="pil",
visible=False
)
# Aspect ratio
aspect_ratio = gr.Dropdown(
label="πŸ“ Aspect Ratio",
choices=list(ASPECT_RATIOS.keys()),
value="16:9",
info="Choose ratio optimized for social media platforms"
)
# Ratio description
ratio_info = gr.Markdown(value=f"Selected ratio: {ASPECT_RATIOS['16:9']}")
# Seed setting
seed = gr.Number(
label="🎲 Random Seed",
value=42,
precision=0,
info="Use same seed value to reproduce same results"
)
# Fixed settings display
gr.Markdown("""
### πŸ“‹ Fixed Settings
- **Duration**: 5 seconds
- **Resolution**: 480p
""")
with gr.Column(scale=2):
# Prompt input
prompt = gr.Textbox(
label="✍️ Prompt",
lines=5,
placeholder="Describe the video you want to create.\nExample: The sun rises slowly between tall buildings. [Ground-level follow shot] Bicycle tires roll over a dew-covered street at dawn.",
value=""
)
# Generate button
generate_btn = gr.Button("🎬 Generate Video", variant="primary", size="lg")
# Results display
with gr.Column():
output_video = gr.Video(
label="πŸ“Ή Generated Video",
autoplay=True
)
output_info = gr.Textbox(
label="Information",
lines=8,
interactive=False
)
# Usage instructions
with gr.Accordion("πŸ“– How to Use", open=False):
gr.Markdown("""
### Installation
1. **Install required packages**:
```bash
pip install gradio replicate pillow requests
```
2. **Set environment variable** (optional):
```bash
export RAPI_TOKEN="your-replicate-api-token"
```
3. **Run**:
```bash
python app.py
```
### Features
- **Text to Video**: Generate video from text description only
- **Image to Video**: Transform uploaded image into animated video
- **Aspect Ratios**: Choose ratios optimized for various social media platforms
- **Seed Value**: Use same seed to reproduce identical results
### Prompt Writing Tips
- Use specific and detailed descriptions
- Specify camera movements (e.g., zoom in, pan left, tracking shot)
- Describe lighting and atmosphere (e.g., golden hour, dramatic lighting)
- Indicate movement speed (e.g., slowly, rapidly, gently)
### Troubleshooting
- **Timeout errors**: The model might be cold starting. Wait 1-2 minutes and try again.
- **Model booting**: First requests after inactivity may take longer as the model boots up.
- **Extended wait times**: Complex prompts or server load may cause longer generation times.
""")
# Examples
gr.Examples(
examples=[
["Text to Video", "A serene lake at sunrise with mist rolling over the water. Camera slowly pans across the landscape as birds fly overhead.", None, "16:9", 42],
["Text to Video", "Urban street scene at night with neon lights reflecting on wet pavement. People walking with umbrellas, camera tracking forward.", None, "9:16", 123],
["Text to Video", "Close-up of a flower blooming in time-lapse, soft natural lighting, shallow depth of field.", None, "1:1", 789],
],
inputs=[mode, prompt, image_input, aspect_ratio, seed],
label="Example Prompts"
)
# Event handlers
mode.change(
fn=update_prompt_placeholder,
inputs=[mode],
outputs=[prompt]
)
mode.change(
fn=update_image_input,
inputs=[mode],
outputs=[image_input]
)
aspect_ratio.change(
fn=lambda x: f"Selected ratio: {ASPECT_RATIOS[x]}",
inputs=[aspect_ratio],
outputs=[ratio_info]
)
generate_btn.click(
fn=generate_video,
inputs=[mode, prompt, image_input, aspect_ratio, seed, api_key_input],
outputs=[output_video, output_info]
)
# Run app
if __name__ == "__main__":
app.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
inbrowser=True
)