Spaces:
Sleeping
Sleeping
File size: 10,791 Bytes
06a6e91 eacc26a 6123d43 297ade1 06a6e91 b1fb753 b8aa7c5 4eada0f b8aa7c5 b1fb753 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 6a9e2b5 297ade1 a361f34 06a6e91 f9f982e 06a6e91 f9f982e 2cde650 f9f982e 88d63cf f9f982e eacc26a 88d63cf 836daad eacc26a 2cde650 f9f982e eacc26a f9f982e eacc26a 88d63cf 836daad eacc26a 57e4050 b8aa7c5 4eada0f 1e59355 4eada0f f9f982e 1e59355 b8aa7c5 1e59355 b8aa7c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 |
import gradio as gr
import os
from lumaai import AsyncLumaAI
import asyncio
import aiohttp
from transformers import pipeline
# λ²μ νμ΄νλΌμΈ μ΄κΈ°ν
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
async def get_camera_motions():
api_key = os.getenv("LMGEN_KEY")
if not api_key:
raise gr.Error("LMGEN_KEY environment variable is not set.")
client = AsyncLumaAI(auth_token=api_key)
try:
motions = await client.generations.camera_motion.list()
return [str(motion) for motion in motions] # Convert each motion to a string
except Exception as e:
print(f"Error fetching camera motions: {str(e)}")
return []
async def generate_video(client, prompt, loop=False, aspect_ratio="16:9", camera_motion=None, extend_id=None, reverse_extend_id=None, interpolate_ids=None, progress=gr.Progress()):
generation_params = {
"prompt": prompt,
"loop": loop,
"aspect_ratio": aspect_ratio
}
if camera_motion:
generation_params["prompt"] += f" {camera_motion}"
if extend_id:
generation_params["keyframes"] = {
"frame0": {
"type": "generation",
"id": extend_id
}
}
elif reverse_extend_id:
generation_params["keyframes"] = {
"frame1": {
"type": "generation",
"id": reverse_extend_id
}
}
elif interpolate_ids:
generation_params["keyframes"] = {
"frame0": {
"type": "generation",
"id": interpolate_ids[0]
},
"frame1": {
"type": "generation",
"id": interpolate_ids[1]
}
}
progress(0, desc="λΉλμ€ μμ± μμ μ€...")
generation = await client.generations.create(**generation_params)
progress(0.1, desc="λΉλμ€ μμ± μμλ¨. μλ£ λκΈ° μ€...")
# Poll for completion
start_time = asyncio.get_event_loop().time()
while True:
status = await client.generations.get(id=generation.id)
if status.state == "completed":
break
elif status.state == "failed":
raise Exception("λΉλμ€ μμ± μ€ν¨")
# Update progress based on time elapsed (assuming 60 seconds total)
elapsed_time = asyncio.get_event_loop().time() - start_time
progress_value = min(0.1 + (elapsed_time / 60) * 0.8, 0.9)
progress(progress_value, desc="λΉλμ€ μμ± μ€...")
await asyncio.sleep(5)
progress(0.9, desc="μμ±λ λΉλμ€ λ€μ΄λ‘λ μ€...")
# Download the video
video_url = status.assets.video
async with aiohttp.ClientSession() as session:
async with session.get(video_url) as resp:
if resp.status == 200:
file_name = f"luma_ai_generated_{generation.id}.mp4"
with open(file_name, 'wb') as fd:
while True:
chunk = await resp.content.read(1024)
if not chunk:
break
fd.write(chunk)
progress(1.0, desc="λΉλμ€ μμ± μλ£!")
return file_name
async def translate_prompt(prompt):
try:
translated = translator(prompt, max_length=512)[0]['translation_text']
return translated
except Exception as e:
print(f"λ²μ μ€ μ€λ₯ λ°μ: {str(e)}")
return prompt # λ²μ μ€ν¨ μ μλ³Έ ν둬ννΈ λ°ν
async def text_to_video(prompt, loop, aspect_ratio, camera_motion, extend_id, reverse_extend_id, interpolate_id1, interpolate_id2, progress=gr.Progress()):
api_key = os.getenv("LMGEN_KEY")
if not api_key:
raise gr.Error("LMGEN_KEY νκ²½ λ³μκ° μ€μ λμ§ μμμ΅λλ€.")
client = AsyncLumaAI(auth_token=api_key)
try:
# ν둬ννΈ λ²μ
translated_prompt = await translate_prompt(prompt)
print(f"μλ³Έ ν둬ννΈ: {prompt}")
print(f"λ²μλ ν둬ννΈ: {translated_prompt}")
interpolate_ids = None
if interpolate_id1 and interpolate_id2:
interpolate_ids = [interpolate_id1, interpolate_id2]
video_path = await generate_video(
client, translated_prompt, loop, aspect_ratio, camera_motion,
extend_id, reverse_extend_id, interpolate_ids, progress
)
return video_path, ""
except Exception as e:
return None, f"μ€λ₯ λ°μ: {str(e)}"
async def image_to_video(prompt, image_url, loop, aspect_ratio, camera_motion, progress=gr.Progress()):
api_key = os.getenv("LMGEN_KEY")
if not api_key:
raise gr.Error("LMGEN_KEY νκ²½ λ³μκ° μ€μ λμ§ μμμ΅λλ€.")
client = AsyncLumaAI(auth_token=api_key)
try:
# ν둬ννΈ λ²μ
translated_prompt = await translate_prompt(prompt)
print(f"μλ³Έ ν둬ννΈ: {prompt}")
print(f"λ²μλ ν둬ννΈ: {translated_prompt}")
generation_params = {
"prompt": translated_prompt + (f" {camera_motion}" if camera_motion else ""),
"loop": loop,
"aspect_ratio": aspect_ratio,
"keyframes": {
"frame0": {
"type": "image",
"url": image_url
}
}
}
progress(0, desc="μ΄λ―Έμ§λ‘λΆν° λΉλμ€ μμ± μμ μ€...")
generation = await client.generations.create(**generation_params)
progress(0.1, desc="λΉλμ€ μμ± μμλ¨. μλ£ λκΈ° μ€...")
# Poll for completion
start_time = asyncio.get_event_loop().time()
while True:
status = await client.generations.get(id=generation.id)
if status.state == "completed":
break
elif status.state == "failed":
raise Exception("λΉλμ€ μμ± μ€ν¨")
# Update progress based on time elapsed (assuming 60 seconds total)
elapsed_time = asyncio.get_event_loop().time() - start_time
progress_value = min(0.1 + (elapsed_time / 60) * 0.8, 0.9)
progress(progress_value, desc="λΉλμ€ μμ± μ€...")
await asyncio.sleep(5)
progress(0.9, desc="μμ±λ λΉλμ€ λ€μ΄λ‘λ μ€...")
# Download the video
video_url = status.assets.video
async with aiohttp.ClientSession() as session:
async with session.get(video_url) as resp:
if resp.status == 200:
file_name = f"luma_ai_generated_{generation.id}.mp4"
with open(file_name, 'wb') as fd:
while True:
chunk = await resp.content.read(1024)
if not chunk:
break
fd.write(chunk)
progress(1.0, desc="λΉλμ€ μμ± μλ£!")
return file_name, ""
except Exception as e:
return None, f"μ€λ₯ λ°μ: {str(e)}"
with gr.Blocks() as demo:
gr.Markdown("# Luma AI ν
μ€νΈ-λΉλμ€ μμ± λ°λͺ¨")
with gr.Tab("ν
μ€νΈλ‘ λΉλμ€ λ§λ€κΈ°"):
prompt = gr.Textbox(label="ν둬ννΈ (λΉλμ€μ λν μ€λͺ
μ μ
λ ₯νμΈμ)")
generate_btn = gr.Button("λΉλμ€ μμ±")
video_output = gr.Video(label="μμ±λ λΉλμ€")
error_output = gr.Textbox(label="μ€λ₯ λ©μμ§", visible=True)
with gr.Accordion("κ³ κΈ μ΅μ
", open=False):
loop = gr.Checkbox(label="루ν (λΉλμ€λ₯Ό λ°λ³΅ μ¬μν μ§ μ€μ )", value=False)
aspect_ratio = gr.Dropdown(
label="νλ©΄ λΉμ¨",
choices=["16:9 (μμ΄λμ€ν¬λ¦°)", "1:1 (μ μ¬κ°ν)", "9:16 (μΈλ‘ μμ)", "4:3 (νμ€)", "3:4 (μΈλ‘ νμ€)"],
value="16:9 (μμ΄λμ€ν¬λ¦°)"
)
camera_motion = gr.Dropdown(label="μΉ΄λ©λΌ λͺ¨μ
(μΉ΄λ©λΌ μμ§μ ν¨κ³Ό μ ν)")
extend_id = gr.Textbox(label="νμ₯ν λΉλμ€ ID (κΈ°μ‘΄ λΉλμ€λ₯Ό μ΄μ΄μ μμ±ν λ μ
λ ₯)")
reverse_extend_id = gr.Textbox(label="μλ°©ν₯ νμ₯ν λΉλμ€ ID (κΈ°μ‘΄ λΉλμ€μ μλΆλΆμ μμ±ν λ μ
λ ₯)")
with gr.Row():
interpolate_id1 = gr.Textbox(label="λ³΄κ° λΉλμ€ ID 1 (λ λΉλμ€ μ¬μ΄λ₯Ό 보κ°ν λ 첫 λ²μ§Έ λΉλμ€ ID)")
interpolate_id2 = gr.Textbox(label="λ³΄κ° λΉλμ€ ID 2 (λ λΉλμ€ μ¬μ΄λ₯Ό 보κ°ν λ λ λ²μ§Έ λΉλμ€ ID)")
generate_btn.click(
text_to_video,
inputs=[prompt, loop, aspect_ratio, camera_motion, extend_id, reverse_extend_id, interpolate_id1, interpolate_id2],
outputs=[video_output, error_output]
)
with gr.Tab("μ΄λ―Έμ§λ‘ λΉλμ€ λ§λ€κΈ°"):
img_prompt = gr.Textbox(label="ν둬ννΈ (μ΄λ―Έμ§λ₯Ό λ°νμΌλ‘ μμ±ν λΉλμ€μ λν μ€λͺ
)")
img_url = gr.Textbox(label="μ΄λ―Έμ§ URL (λ³ννκ³ μ νλ μ΄λ―Έμ§μ μΉ μ£Όμ)")
img_generate_btn = gr.Button("μ΄λ―Έμ§λ‘ λΉλμ€ μμ±")
img_video_output = gr.Video(label="μμ±λ λΉλμ€")
img_error_output = gr.Textbox(label="μ€λ₯ λ©μμ§", visible=True)
with gr.Accordion("κ³ κΈ μ΅μ
", open=False):
img_loop = gr.Checkbox(label="루ν (λΉλμ€λ₯Ό λ°λ³΅ μ¬μν μ§ μ€μ )", value=False)
img_aspect_ratio = gr.Dropdown(
label="νλ©΄ λΉμ¨",
choices=["16:9 (μμ΄λμ€ν¬λ¦°)", "1:1 (μ μ¬κ°ν)", "9:16 (μΈλ‘ μμ)", "4:3 (νμ€)", "3:4 (μΈλ‘ νμ€)"],
value="16:9 (μμ΄λμ€ν¬λ¦°)"
)
img_camera_motion = gr.Dropdown(label="μΉ΄λ©λΌ λͺ¨μ
(μΉ΄λ©λΌ μμ§μ ν¨κ³Ό μ ν)")
img_generate_btn.click(
image_to_video,
inputs=[img_prompt, img_url, img_loop, img_aspect_ratio, img_camera_motion],
outputs=[img_video_output, img_error_output]
)
async def update_camera_motions():
try:
motions = await get_camera_motions()
return gr.update(choices=motions), gr.update(choices=motions)
except Exception as e:
print(f"μΉ΄λ©λΌ λͺ¨μ
μ
λ°μ΄νΈ μ€ μ€λ₯ λ°μ: {str(e)}")
return gr.update(choices=[]), gr.update(choices=[])
demo.load(update_camera_motions, outputs=[camera_motion, img_camera_motion])
demo.queue().launch(debug=True) |