Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
d621fa5
1
Parent(s):
331b171
Add file
Browse files- app.py +160 -0
- requirements.txt +89 -0
app.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import spaces
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
from ltx_video.inference import infer, InferenceConfig
|
8 |
+
from functools import partial
|
9 |
+
|
10 |
+
@spaces.GPU
|
11 |
+
def create(
|
12 |
+
prompt,
|
13 |
+
mode="text-to-video",
|
14 |
+
negative_prompt="",
|
15 |
+
input_image_filepath=None,
|
16 |
+
input_video_filepath=None,
|
17 |
+
height_ui=512,
|
18 |
+
width_ui=704,
|
19 |
+
duration_ui=2.0,
|
20 |
+
ui_frames_to_use=16,
|
21 |
+
seed_ui=42,
|
22 |
+
randomize_seed=True,
|
23 |
+
ui_guidance_scale=3.0,
|
24 |
+
improve_texture_flag=True,
|
25 |
+
progress=gr.Progress(track_tqdm=True),
|
26 |
+
fps=8,
|
27 |
+
):
|
28 |
+
"""
|
29 |
+
Generate videos using the LTX Video model.
|
30 |
+
"""
|
31 |
+
|
32 |
+
# pick seed
|
33 |
+
used_seed = random.randint(0, 2**32 - 1) if randomize_seed else seed_ui
|
34 |
+
|
35 |
+
output_path = f"output_{mode}_{used_seed}.mp4"
|
36 |
+
|
37 |
+
config = InferenceConfig(
|
38 |
+
pipeline_config="configs/ltxv-13b-0.9.8-distilled.yaml",
|
39 |
+
prompt=prompt,
|
40 |
+
negative_prompt=negative_prompt,
|
41 |
+
height=height_ui,
|
42 |
+
width=width_ui,
|
43 |
+
num_frames=ui_frames_to_use,
|
44 |
+
seed=used_seed,
|
45 |
+
guidance_scale=ui_guidance_scale,
|
46 |
+
output_path=output_path,
|
47 |
+
improve_texture=improve_texture_flag,
|
48 |
+
)
|
49 |
+
|
50 |
+
# attach initial image or video if mode requires
|
51 |
+
if mode == "image-to-video" and input_image_filepath:
|
52 |
+
config.init_image = input_image_filepath
|
53 |
+
elif mode == "video-to-video" and input_video_filepath:
|
54 |
+
config.init_video = input_video_filepath
|
55 |
+
|
56 |
+
# run inference
|
57 |
+
infer(config)
|
58 |
+
|
59 |
+
return output_path, f"✅ Done! Seed: {used_seed}"
|
60 |
+
|
61 |
+
# ---- Gradio Blocks & UI ----
|
62 |
+
with gr.Blocks(title="AI Video Converter", theme=gr.themes.Soft()) as demo:
|
63 |
+
gr.Markdown("# 🎬 AI Video Converter")
|
64 |
+
gr.Markdown("Convert text, images, and videos into stunning AI-generated videos!")
|
65 |
+
|
66 |
+
with gr.Tabs():
|
67 |
+
# --- Text to Video ---
|
68 |
+
with gr.Tab("📝 Text to Video"):
|
69 |
+
gr.Markdown("### Generate videos from text descriptions")
|
70 |
+
with gr.Row():
|
71 |
+
with gr.Column():
|
72 |
+
text_prompt = gr.Textbox(
|
73 |
+
label="Text Prompt",
|
74 |
+
placeholder="Describe the video you want to create...",
|
75 |
+
value="A Nigerian woman dancing on the streets of Lagos, Nigeria",
|
76 |
+
lines=3
|
77 |
+
)
|
78 |
+
text_num_frames = gr.Slider(minimum=8, maximum=32, value=16, step=1,label="Number of Frames")
|
79 |
+
text_fps = gr.Slider(minimum=4, maximum=30, value=8, step=1,label="Frames Per Second")
|
80 |
+
text_generate_video_btn = gr.Button("Generate Video", variant="primary")
|
81 |
+
|
82 |
+
with gr.Column():
|
83 |
+
text_output_video = gr.Video(label="Generated Video")
|
84 |
+
text_status = gr.Textbox(label="Status", interactive=False)
|
85 |
+
|
86 |
+
# --- Image to Video ---
|
87 |
+
with gr.Tab("🖼️ Image to Video"):
|
88 |
+
gr.Markdown("### Animate images into videos")
|
89 |
+
with gr.Row():
|
90 |
+
with gr.Column():
|
91 |
+
image_input = gr.Image(label="Input Image",type="filepath", sources=["upload", "webcam", "clipboard"])
|
92 |
+
image_text_prompt = gr.Textbox(
|
93 |
+
label="Text Prompt",
|
94 |
+
placeholder="Describe the video you want to create...",
|
95 |
+
value="The creature from the image starts to move",
|
96 |
+
lines=3
|
97 |
+
)
|
98 |
+
image_num_frames = gr.Slider(minimum=8, maximum=50, value=25, step=1,label="Number of Frames")
|
99 |
+
image_fps = gr.Slider(minimum=4, maximum=30, value=8, step=1,label="Frames Per Second")
|
100 |
+
image_generate_video_btn = gr.Button("Generate Video", variant="primary")
|
101 |
+
|
102 |
+
with gr.Column():
|
103 |
+
image_output_video = gr.Video(label="Generated Video")
|
104 |
+
image_status = gr.Textbox(label="Status", interactive=False)
|
105 |
+
|
106 |
+
# --- Video to Video ---
|
107 |
+
with gr.Tab("🎥 Video to Video"):
|
108 |
+
gr.Markdown("### Transform videos with AI")
|
109 |
+
with gr.Row():
|
110 |
+
with gr.Column():
|
111 |
+
video_input = gr.Video(label="Input Video", type="filepath", sources=["upload", "webcam", "clipboard"])
|
112 |
+
video_prompt = gr.Textbox(
|
113 |
+
label="Transformation Prompt",
|
114 |
+
placeholder="Describe how you want to transform the video...",
|
115 |
+
lines=3
|
116 |
+
)
|
117 |
+
video_strength = gr.Slider(minimum=0.1, maximum=1.0, value=0.8, step=0.1,label="Transformation Strength")
|
118 |
+
video_generate_video_btn = gr.Button("Transform Video", variant="primary")
|
119 |
+
|
120 |
+
with gr.Column():
|
121 |
+
video_output_video = gr.Video(label="Transformed Video")
|
122 |
+
video_status = gr.Textbox(label="Status", interactive=False)
|
123 |
+
|
124 |
+
|
125 |
+
# --- Inputs ---
|
126 |
+
tgv_inputs = {"prompt": text_prompt, "ui_frames_to_use": text_num_frames, "fps": text_fps}
|
127 |
+
igv_inputs = {"prompt": image_text_prompt, "input_image_filepath": image_input, "ui_frames_to_use": image_num_frames,"fps": image_fps}
|
128 |
+
vgv_inputs = {
|
129 |
+
"prompt": video_prompt,
|
130 |
+
"input_video_filepath": video_input,
|
131 |
+
"ui_guidance_scale": video_strength
|
132 |
+
}
|
133 |
+
|
134 |
+
# --- Outputs ---
|
135 |
+
tgv_outputs = {"video": text_output_video, "status": text_status}
|
136 |
+
igv_outputs = {"video": image_output_video, "status": image_status}
|
137 |
+
vgv_outputs = {"video": video_output_video, "status": video_status}
|
138 |
+
|
139 |
+
|
140 |
+
# --- Button Logic ---
|
141 |
+
text_generate_video_btn.click(
|
142 |
+
fn=partial(create, mode="text-to-video"),
|
143 |
+
inputs=tgv_inputs,
|
144 |
+
outputs=tgv_outputs
|
145 |
+
)
|
146 |
+
|
147 |
+
image_generate_video_btn.click(
|
148 |
+
fn=partial(create, mode="image-to-video"),
|
149 |
+
inputs=igv_inputs,
|
150 |
+
outputs=igv_outputs
|
151 |
+
)
|
152 |
+
|
153 |
+
video_generate_video_btn.click(
|
154 |
+
fn=partial(create, mode="video-to-video"),
|
155 |
+
inputs=vgv_inputs,
|
156 |
+
outputs=vgv_outputs
|
157 |
+
)
|
158 |
+
|
159 |
+
if __name__ == "__main__":
|
160 |
+
demo.launch(debug=True, share=False)
|
requirements.txt
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==24.1.0
|
2 |
+
annotated-types==0.7.0
|
3 |
+
anyio==4.9.0
|
4 |
+
audioop-lts==0.2.2
|
5 |
+
blinker==1.9.0
|
6 |
+
Brotli==1.1.0
|
7 |
+
certifi==2025.7.14
|
8 |
+
charset-normalizer==3.4.3
|
9 |
+
click==8.2.1
|
10 |
+
decorator==5.2.1
|
11 |
+
diffusers==0.35.1
|
12 |
+
einops==0.8.1
|
13 |
+
fastapi==0.116.1
|
14 |
+
ffmpy==0.6.1
|
15 |
+
filelock==3.19.1
|
16 |
+
Flask==3.1.1
|
17 |
+
flask-cors==6.0.1
|
18 |
+
fsspec==2025.7.0
|
19 |
+
gradio==5.43.1
|
20 |
+
gradio_client==1.12.1
|
21 |
+
groovy==0.1.2
|
22 |
+
h11==0.16.0
|
23 |
+
hf-xet==1.1.8
|
24 |
+
httpcore==1.0.9
|
25 |
+
httpx==0.28.1
|
26 |
+
httpx-ws==0.7.2
|
27 |
+
huggingface-hub==0.34.4
|
28 |
+
idna==3.10
|
29 |
+
imageio==2.37.0
|
30 |
+
imageio-ffmpeg==0.6.0
|
31 |
+
importlib_metadata==8.7.0
|
32 |
+
itsdangerous==2.2.0
|
33 |
+
Jinja2==3.1.6
|
34 |
+
lmstudio==1.4.1
|
35 |
+
ltx-video==0.1.2
|
36 |
+
markdown-it-py==4.0.0
|
37 |
+
MarkupSafe==3.0.2
|
38 |
+
mdurl==0.1.2
|
39 |
+
moviepy==2.2.1
|
40 |
+
mpmath==1.3.0
|
41 |
+
msgspec==0.19.0
|
42 |
+
networkx==3.5
|
43 |
+
numpy==2.3.2
|
44 |
+
orjson==3.11.2
|
45 |
+
packaging==25.0
|
46 |
+
pandas==2.3.1
|
47 |
+
pillow==11.3.0
|
48 |
+
proglog==0.1.12
|
49 |
+
pydantic==2.11.7
|
50 |
+
pydantic_core==2.33.2
|
51 |
+
pydub==0.25.1
|
52 |
+
Pygments==2.19.2
|
53 |
+
python-dateutil==2.9.0.post0
|
54 |
+
python-dotenv==1.1.1
|
55 |
+
python-multipart==0.0.20
|
56 |
+
pytz==2025.2
|
57 |
+
PyYAML==6.0.2
|
58 |
+
regex==2025.7.34
|
59 |
+
requests==2.32.5
|
60 |
+
rich==14.1.0
|
61 |
+
ruff==0.12.9
|
62 |
+
safehttpx==0.1.6
|
63 |
+
safetensors==0.6.2
|
64 |
+
scipy==1.16.1
|
65 |
+
semantic-version==2.10.0
|
66 |
+
sentencepiece==0.2.1
|
67 |
+
setuptools==80.9.0
|
68 |
+
shellingham==1.5.4
|
69 |
+
six==1.17.0
|
70 |
+
sniffio==1.3.1
|
71 |
+
starlette==0.47.2
|
72 |
+
sympy==1.14.0
|
73 |
+
timm==1.0.19
|
74 |
+
tokenizers==0.21.4
|
75 |
+
tomlkit==0.13.3
|
76 |
+
torch==2.8.0
|
77 |
+
torchvision==0.23.0
|
78 |
+
tqdm==4.67.1
|
79 |
+
transformers==4.51.3
|
80 |
+
typer==0.16.1
|
81 |
+
typing-inspection==0.4.1
|
82 |
+
typing_extensions==4.14.1
|
83 |
+
tzdata==2025.2
|
84 |
+
urllib3==2.5.0
|
85 |
+
uvicorn==0.35.0
|
86 |
+
websockets==15.0.1
|
87 |
+
Werkzeug==3.1.3
|
88 |
+
wsproto==1.2.0
|
89 |
+
zipp==3.23.0
|