Spaces:
Sleeping
Sleeping
File size: 3,312 Bytes
06a6e91 eacc26a 6123d43 06a6e91 b1fb753 b8aa7c5 4eada0f b8aa7c5 b1fb753 4eada0f a361f34 06a6e91 eacc26a 06a6e91 eacc26a 836daad 2cde650 eacc26a b8aa7c5 88d63cf eacc26a 88d63cf 836daad eacc26a 2cde650 eacc26a a6b51c5 eacc26a 836daad eacc26a b8aa7c5 eacc26a 88d63cf 836daad eacc26a 57e4050 b8aa7c5 4eada0f b8aa7c5 4eada0f b8aa7c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import gradio as gr
import os
from lumaai import AsyncLumaAI
import asyncio
import aiohttp
async def get_camera_motions():
api_key = os.getenv("LMGEN_KEY")
if not api_key:
raise gr.Error("LMGEN_KEY environment variable is not set.")
client = AsyncLumaAI(auth_token=api_key)
try:
motions = await client.generations.camera_motion.list()
return [str(motion) for motion in motions] # Convert each motion to a string
except Exception as e:
print(f"Error fetching camera motions: {str(e)}")
return []
# 나머지 함수들 (generate_video, text_to_video, image_to_video)은 변경하지 않습니다...
with gr.Blocks() as demo:
gr.Markdown("# Luma AI Text-to-Video Demo")
with gr.Tab("Text to Video"):
prompt = gr.Textbox(label="Prompt")
generate_btn = gr.Button("Generate Video")
video_output = gr.Video(label="Generated Video")
error_output = gr.Textbox(label="Error Messages", visible=True)
with gr.Accordion("Advanced Options", open=False):
loop = gr.Checkbox(label="Loop", value=False)
aspect_ratio = gr.Dropdown(label="Aspect Ratio", choices=["16:9", "1:1", "9:16", "4:3", "3:4"], value="16:9")
camera_motion = gr.Dropdown(label="Camera Motion", choices=[])
extend_id = gr.Textbox(label="Extend Video ID (optional)")
reverse_extend_id = gr.Textbox(label="Reverse Extend Video ID (optional)")
with gr.Row():
interpolate_id1 = gr.Textbox(label="Interpolate Video ID 1 (optional)")
interpolate_id2 = gr.Textbox(label="Interpolate Video ID 2 (optional)")
generate_btn.click(
text_to_video,
inputs=[prompt, loop, aspect_ratio, camera_motion, extend_id, reverse_extend_id, interpolate_id1, interpolate_id2],
outputs=[video_output, error_output]
)
with gr.Tab("Image to Video"):
img_prompt = gr.Textbox(label="Prompt")
img_url = gr.Textbox(label="Image URL")
img_generate_btn = gr.Button("Generate Video from Image")
img_video_output = gr.Video(label="Generated Video")
img_error_output = gr.Textbox(label="Error Messages", visible=True)
with gr.Accordion("Advanced Options", open=False):
img_loop = gr.Checkbox(label="Loop", value=False)
img_aspect_ratio = gr.Dropdown(label="Aspect Ratio", choices=["16:9", "1:1", "9:16", "4:3", "3:4"], value="16:9")
img_camera_motion = gr.Dropdown(label="Camera Motion", choices=[])
img_generate_btn.click(
image_to_video,
inputs=[img_prompt, img_url, img_loop, img_aspect_ratio, img_camera_motion],
outputs=[img_video_output, img_error_output]
)
async def update_camera_motions():
try:
motions = await get_camera_motions()
return {camera_motion: gr.update(choices=motions), img_camera_motion: gr.update(choices=motions)}
except Exception as e:
print(f"Error updating camera motions: {str(e)}")
return {camera_motion: gr.update(choices=[]), img_camera_motion: gr.update(choices=[])}
demo.load(update_camera_motions)
demo.queue().launch(debug=True) |