Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,21 +4,18 @@ from lumaai import AsyncLumaAI
|
|
4 |
import asyncio
|
5 |
import aiohttp
|
6 |
|
7 |
-
# 전역 변수로 카메라 모션 목록을 저장합니다.
|
8 |
-
camera_motions = []
|
9 |
-
|
10 |
async def get_camera_motions():
|
11 |
-
global camera_motions
|
12 |
api_key = os.getenv("LMGEN_KEY")
|
13 |
if not api_key:
|
14 |
raise gr.Error("LMGEN_KEY environment variable is not set.")
|
15 |
|
16 |
client = AsyncLumaAI(auth_token=api_key)
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
22 |
|
23 |
async def generate_video(client, prompt, loop=False, aspect_ratio="16:9", camera_motion=None, extend_id=None, reverse_extend_id=None, interpolate_ids=None, progress=gr.Progress()):
|
24 |
generation_params = {
|
@@ -179,6 +176,8 @@ async def image_to_video(prompt, image_url, loop, aspect_ratio, camera_motion, p
|
|
179 |
with gr.Blocks() as demo:
|
180 |
gr.Markdown("# Luma AI Text-to-Video Demo")
|
181 |
|
|
|
|
|
182 |
with gr.Tab("Text to Video"):
|
183 |
prompt = gr.Textbox(label="Prompt")
|
184 |
generate_btn = gr.Button("Generate Video")
|
@@ -188,7 +187,7 @@ with gr.Blocks() as demo:
|
|
188 |
with gr.Accordion("Advanced Options", open=False):
|
189 |
loop = gr.Checkbox(label="Loop", value=False)
|
190 |
aspect_ratio = gr.Dropdown(label="Aspect Ratio", choices=["16:9", "1:1", "9:16", "4:3", "3:4"], value="16:9")
|
191 |
-
camera_motion = gr.Dropdown(label="Camera Motion", choices=
|
192 |
extend_id = gr.Textbox(label="Extend Video ID (optional)")
|
193 |
reverse_extend_id = gr.Textbox(label="Reverse Extend Video ID (optional)")
|
194 |
with gr.Row():
|
@@ -211,7 +210,7 @@ with gr.Blocks() as demo:
|
|
211 |
with gr.Accordion("Advanced Options", open=False):
|
212 |
img_loop = gr.Checkbox(label="Loop", value=False)
|
213 |
img_aspect_ratio = gr.Dropdown(label="Aspect Ratio", choices=["16:9", "1:1", "9:16", "4:3", "3:4"], value="16:9")
|
214 |
-
img_camera_motion = gr.Dropdown(label="Camera Motion", choices=
|
215 |
|
216 |
img_generate_btn.click(
|
217 |
image_to_video,
|
@@ -219,4 +218,10 @@ with gr.Blocks() as demo:
|
|
219 |
outputs=[img_video_output, img_error_output]
|
220 |
)
|
221 |
|
222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
import asyncio
|
5 |
import aiohttp
|
6 |
|
|
|
|
|
|
|
7 |
async def get_camera_motions():
|
|
|
8 |
api_key = os.getenv("LMGEN_KEY")
|
9 |
if not api_key:
|
10 |
raise gr.Error("LMGEN_KEY environment variable is not set.")
|
11 |
|
12 |
client = AsyncLumaAI(auth_token=api_key)
|
13 |
+
try:
|
14 |
+
motions = await client.generations.camera_motion.list()
|
15 |
+
return [motion.name for motion in motions]
|
16 |
+
except Exception as e:
|
17 |
+
print(f"Error fetching camera motions: {str(e)}")
|
18 |
+
return []
|
19 |
|
20 |
async def generate_video(client, prompt, loop=False, aspect_ratio="16:9", camera_motion=None, extend_id=None, reverse_extend_id=None, interpolate_ids=None, progress=gr.Progress()):
|
21 |
generation_params = {
|
|
|
176 |
with gr.Blocks() as demo:
|
177 |
gr.Markdown("# Luma AI Text-to-Video Demo")
|
178 |
|
179 |
+
camera_motions = gr.State([])
|
180 |
+
|
181 |
with gr.Tab("Text to Video"):
|
182 |
prompt = gr.Textbox(label="Prompt")
|
183 |
generate_btn = gr.Button("Generate Video")
|
|
|
187 |
with gr.Accordion("Advanced Options", open=False):
|
188 |
loop = gr.Checkbox(label="Loop", value=False)
|
189 |
aspect_ratio = gr.Dropdown(label="Aspect Ratio", choices=["16:9", "1:1", "9:16", "4:3", "3:4"], value="16:9")
|
190 |
+
camera_motion = gr.Dropdown(label="Camera Motion", choices=[])
|
191 |
extend_id = gr.Textbox(label="Extend Video ID (optional)")
|
192 |
reverse_extend_id = gr.Textbox(label="Reverse Extend Video ID (optional)")
|
193 |
with gr.Row():
|
|
|
210 |
with gr.Accordion("Advanced Options", open=False):
|
211 |
img_loop = gr.Checkbox(label="Loop", value=False)
|
212 |
img_aspect_ratio = gr.Dropdown(label="Aspect Ratio", choices=["16:9", "1:1", "9:16", "4:3", "3:4"], value="16:9")
|
213 |
+
img_camera_motion = gr.Dropdown(label="Camera Motion", choices=[])
|
214 |
|
215 |
img_generate_btn.click(
|
216 |
image_to_video,
|
|
|
218 |
outputs=[img_video_output, img_error_output]
|
219 |
)
|
220 |
|
221 |
+
async def update_camera_motions():
|
222 |
+
motions = await get_camera_motions()
|
223 |
+
return gr.Dropdown.update(choices=motions), gr.Dropdown.update(choices=motions)
|
224 |
+
|
225 |
+
demo.load(update_camera_motions, outputs=[camera_motion, img_camera_motion])
|
226 |
+
|
227 |
+
demo.queue().launch(debug=True)
|