lumapi2 / app.py
seawolf2357's picture
Update app.py
6a9e2b5 verified
raw
history blame
8.97 kB
import gradio as gr
import os
from lumaai import AsyncLumaAI
import asyncio
import aiohttp
async def get_camera_motions():
api_key = os.getenv("LMGEN_KEY")
if not api_key:
raise gr.Error("LMGEN_KEY environment variable is not set.")
client = AsyncLumaAI(auth_token=api_key)
try:
motions = await client.generations.camera_motion.list()
return [str(motion) for motion in motions] # Convert each motion to a string
except Exception as e:
print(f"Error fetching camera motions: {str(e)}")
return []
async def generate_video(client, prompt, loop=False, aspect_ratio="16:9", camera_motion=None, extend_id=None, reverse_extend_id=None, interpolate_ids=None, progress=gr.Progress()):
generation_params = {
"prompt": prompt,
"loop": loop,
"aspect_ratio": aspect_ratio
}
if camera_motion:
generation_params["prompt"] += f" {camera_motion}"
if extend_id:
generation_params["keyframes"] = {
"frame0": {
"type": "generation",
"id": extend_id
}
}
elif reverse_extend_id:
generation_params["keyframes"] = {
"frame1": {
"type": "generation",
"id": reverse_extend_id
}
}
elif interpolate_ids:
generation_params["keyframes"] = {
"frame0": {
"type": "generation",
"id": interpolate_ids[0]
},
"frame1": {
"type": "generation",
"id": interpolate_ids[1]
}
}
progress(0, desc="Initiating video generation...")
generation = await client.generations.create(**generation_params)
progress(0.1, desc="Video generation started. Waiting for completion...")
# Poll for completion
start_time = asyncio.get_event_loop().time()
while True:
status = await client.generations.get(id=generation.id)
if status.state == "completed":
break
elif status.state == "failed":
raise Exception("Video generation failed")
# Update progress based on time elapsed (assuming 60 seconds total)
elapsed_time = asyncio.get_event_loop().time() - start_time
progress_value = min(0.1 + (elapsed_time / 60) * 0.8, 0.9)
progress(progress_value, desc="Generating video...")
await asyncio.sleep(5)
progress(0.9, desc="Downloading generated video...")
# Download the video
video_url = status.assets.video
async with aiohttp.ClientSession() as session:
async with session.get(video_url) as resp:
if resp.status == 200:
file_name = f"luma_ai_generated_{generation.id}.mp4"
with open(file_name, 'wb') as fd:
while True:
chunk = await resp.content.read(1024)
if not chunk:
break
fd.write(chunk)
progress(1.0, desc="Video generation complete!")
return file_name
async def text_to_video(prompt, loop, aspect_ratio, camera_motion, extend_id, reverse_extend_id, interpolate_id1, interpolate_id2, progress=gr.Progress()):
api_key = os.getenv("LMGEN_KEY")
if not api_key:
raise gr.Error("LMGEN_KEY environment variable is not set.")
client = AsyncLumaAI(auth_token=api_key)
try:
interpolate_ids = None
if interpolate_id1 and interpolate_id2:
interpolate_ids = [interpolate_id1, interpolate_id2]
video_path = await generate_video(
client, prompt, loop, aspect_ratio, camera_motion,
extend_id, reverse_extend_id, interpolate_ids, progress
)
return video_path, ""
except Exception as e:
return None, f"An error occurred: {str(e)}"
async def image_to_video(prompt, image_url, loop, aspect_ratio, camera_motion, progress=gr.Progress()):
api_key = os.getenv("LMGEN_KEY")
if not api_key:
raise gr.Error("LMGEN_KEY environment variable is not set.")
client = AsyncLumaAI(auth_token=api_key)
try:
generation_params = {
"prompt": prompt + (f" {camera_motion}" if camera_motion else ""),
"loop": loop,
"aspect_ratio": aspect_ratio,
"keyframes": {
"frame0": {
"type": "image",
"url": image_url
}
}
}
progress(0, desc="Initiating video generation from image...")
generation = await client.generations.create(**generation_params)
progress(0.1, desc="Video generation started. Waiting for completion...")
# Poll for completion
start_time = asyncio.get_event_loop().time()
while True:
status = await client.generations.get(id=generation.id)
if status.state == "completed":
break
elif status.state == "failed":
raise Exception("Video generation failed")
# Update progress based on time elapsed (assuming 60 seconds total)
elapsed_time = asyncio.get_event_loop().time() - start_time
progress_value = min(0.1 + (elapsed_time / 60) * 0.8, 0.9)
progress(progress_value, desc="Generating video...")
await asyncio.sleep(5)
progress(0.9, desc="Downloading generated video...")
# Download the video
video_url = status.assets.video
async with aiohttp.ClientSession() as session:
async with session.get(video_url) as resp:
if resp.status == 200:
file_name = f"luma_ai_generated_{generation.id}.mp4"
with open(file_name, 'wb') as fd:
while True:
chunk = await resp.content.read(1024)
if not chunk:
break
fd.write(chunk)
progress(1.0, desc="Video generation complete!")
return file_name, ""
except Exception as e:
return None, f"An error occurred: {str(e)}"
with gr.Blocks() as demo:
gr.Markdown("# Luma AI Text-to-Video Demo")
with gr.Tab("Text to Video"):
prompt = gr.Textbox(label="Prompt")
generate_btn = gr.Button("Generate Video")
video_output = gr.Video(label="Generated Video")
error_output = gr.Textbox(label="Error Messages", visible=True)
with gr.Accordion("Advanced Options", open=False):
loop = gr.Checkbox(label="Loop", value=False)
aspect_ratio = gr.Dropdown(label="Aspect Ratio", choices=["16:9", "1:1", "9:16", "4:3", "3:4"], value="16:9")
camera_motion = gr.Dropdown(label="Camera Motion", choices=[])
extend_id = gr.Textbox(label="Extend Video ID (optional)")
reverse_extend_id = gr.Textbox(label="Reverse Extend Video ID (optional)")
with gr.Row():
interpolate_id1 = gr.Textbox(label="Interpolate Video ID 1 (optional)")
interpolate_id2 = gr.Textbox(label="Interpolate Video ID 2 (optional)")
generate_btn.click(
text_to_video,
inputs=[prompt, loop, aspect_ratio, camera_motion, extend_id, reverse_extend_id, interpolate_id1, interpolate_id2],
outputs=[video_output, error_output]
)
with gr.Tab("Image to Video"):
img_prompt = gr.Textbox(label="Prompt")
img_url = gr.Textbox(label="Image URL")
img_generate_btn = gr.Button("Generate Video from Image")
img_video_output = gr.Video(label="Generated Video")
img_error_output = gr.Textbox(label="Error Messages", visible=True)
with gr.Accordion("Advanced Options", open=False):
img_loop = gr.Checkbox(label="Loop", value=False)
img_aspect_ratio = gr.Dropdown(label="Aspect Ratio", choices=["16:9", "1:1", "9:16", "4:3", "3:4"], value="16:9")
img_camera_motion = gr.Dropdown(label="Camera Motion", choices=[])
img_generate_btn.click(
image_to_video,
inputs=[img_prompt, img_url, img_loop, img_aspect_ratio, img_camera_motion],
outputs=[img_video_output, img_error_output]
)
async def update_camera_motions():
try:
motions = await get_camera_motions()
return {camera_motion: gr.update(choices=motions), img_camera_motion: gr.update(choices=motions)}
except Exception as e:
print(f"Error updating camera motions: {str(e)}")
return {camera_motion: gr.update(choices=[]), img_camera_motion: gr.update(choices=[])}
demo.load(update_camera_motions)
demo.queue().launch(debug=True)