Spaces:
Runtime error
Runtime error
update examples
Browse files
app.py
CHANGED
|
@@ -243,7 +243,7 @@ with gr.Blocks() as demo:
|
|
| 243 |
gr.Markdown(
|
| 244 |
"""
|
| 245 |
# 🏞 Video Composer
|
| 246 |
-
Add video, image and audio assets and let [Qwen2.5-Coder](https://huggingface.co/Qwen/Qwen2.5-Coder-32B) compose a new video.
|
| 247 |
**Please note: This demo is not a generative AI model, it only uses [Qwen2.5-Coder](https://huggingface.co/Qwen/Qwen2.5-Coder-32B) to generate a valid FFMPEG command based on the input files and the prompt.**
|
| 248 |
""",
|
| 249 |
elem_id="header",
|
|
@@ -291,6 +291,18 @@ with gr.Blocks() as demo:
|
|
| 291 |
with gr.Row():
|
| 292 |
gr.Examples(
|
| 293 |
examples=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
[
|
| 295 |
[
|
| 296 |
"./examples/cat8.jpeg",
|
|
@@ -303,34 +315,22 @@ with gr.Blocks() as demo:
|
|
| 303 |
"./examples/cat7.jpeg",
|
| 304 |
"./examples/heat-wave.mp3",
|
| 305 |
],
|
| 306 |
-
"
|
| 307 |
-
0,
|
| 308 |
-
0,
|
| 309 |
],
|
| 310 |
[
|
| 311 |
["./examples/example.mp4"],
|
| 312 |
-
"
|
| 313 |
-
0,
|
| 314 |
-
0,
|
| 315 |
-
],
|
| 316 |
-
[
|
| 317 |
-
["./examples/heat-wave.mp3", "./examples/square-image.png"],
|
| 318 |
-
"Make a 720x720 video, a white waveform of the audio, and finally add add the input image as the background all along the video.",
|
| 319 |
-
0,
|
| 320 |
-
0,
|
| 321 |
-
],
|
| 322 |
-
[
|
| 323 |
-
["./examples/waterfall-overlay.png", "./examples/waterfall.mp4"],
|
| 324 |
-
"Add the overlay to the video.",
|
| 325 |
-
0,
|
| 326 |
-
0,
|
| 327 |
],
|
| 328 |
],
|
| 329 |
inputs=[user_files, user_prompt, top_p, temperature],
|
| 330 |
outputs=[generated_video, generated_command],
|
| 331 |
fn=update,
|
| 332 |
run_on_click=True,
|
| 333 |
-
cache_examples=
|
| 334 |
)
|
| 335 |
|
| 336 |
with gr.Row():
|
|
|
|
| 243 |
gr.Markdown(
|
| 244 |
"""
|
| 245 |
# 🏞 Video Composer
|
| 246 |
+
Compose new videos with your assets using natural language. Add video, image and audio assets and let [Qwen2.5-Coder](https://huggingface.co/Qwen/Qwen2.5-Coder-32B) compose a new video.
|
| 247 |
**Please note: This demo is not a generative AI model, it only uses [Qwen2.5-Coder](https://huggingface.co/Qwen/Qwen2.5-Coder-32B) to generate a valid FFMPEG command based on the input files and the prompt.**
|
| 248 |
""",
|
| 249 |
elem_id="header",
|
|
|
|
| 291 |
with gr.Row():
|
| 292 |
gr.Examples(
|
| 293 |
examples=[
|
| 294 |
+
[
|
| 295 |
+
["./examples/heat-wave.mp3", "./examples/square-image.png"],
|
| 296 |
+
"Compose a 720x720 output with the background image scaled to fill, add a full-width transparent waveform visualization positioned in center of the original audio.",
|
| 297 |
+
0.7,
|
| 298 |
+
0.1,
|
| 299 |
+
],
|
| 300 |
+
[
|
| 301 |
+
["./examples/waterfall-overlay.png", "./examples/waterfall.mp4"],
|
| 302 |
+
"Add the overlay to the video.",
|
| 303 |
+
0.7,
|
| 304 |
+
0.1,
|
| 305 |
+
],
|
| 306 |
[
|
| 307 |
[
|
| 308 |
"./examples/cat8.jpeg",
|
|
|
|
| 315 |
"./examples/cat7.jpeg",
|
| 316 |
"./examples/heat-wave.mp3",
|
| 317 |
],
|
| 318 |
+
"Make a video from the images, each image with 1s loop and add the audio as background",
|
| 319 |
+
0.7,
|
| 320 |
+
0.1,
|
| 321 |
],
|
| 322 |
[
|
| 323 |
["./examples/example.mp4"],
|
| 324 |
+
"Make this video 10 times faster",
|
| 325 |
+
0.7,
|
| 326 |
+
0.1,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 327 |
],
|
| 328 |
],
|
| 329 |
inputs=[user_files, user_prompt, top_p, temperature],
|
| 330 |
outputs=[generated_video, generated_command],
|
| 331 |
fn=update,
|
| 332 |
run_on_click=True,
|
| 333 |
+
cache_examples=False,
|
| 334 |
)
|
| 335 |
|
| 336 |
with gr.Row():
|