Muhammad Taqi Raza
commited on
Commit
·
fdebd91
1
Parent(s):
fa72d99
push
Browse files- gradio_app.py +16 -3
gradio_app.py
CHANGED
@@ -42,7 +42,7 @@ download_models()
|
|
42 |
def run_epic_inference(video_path, caption, motion_type):
|
43 |
temp_input_path = "/app/temp_input.mp4"
|
44 |
output_dir = f"/app/output_anchor"
|
45 |
-
video_output_path = f"{output_dir}/
|
46 |
traj_name = motion_type
|
47 |
traj_txt = f"/app/inference/v2v_data/test/trajs/{traj_name}.txt"
|
48 |
# Save uploaded video
|
@@ -65,7 +65,6 @@ def run_epic_inference(video_path, caption, motion_type):
|
|
65 |
|
66 |
# Run inference command
|
67 |
try:
|
68 |
-
|
69 |
result = subprocess.run(command, capture_output=True, text=True, check=True)
|
70 |
print("Getting Anchor Videos run successfully.")
|
71 |
logs = result.stdout
|
@@ -80,6 +79,7 @@ def run_epic_inference(video_path, caption, motion_type):
|
|
80 |
return f"Inference succeeded but no output video found in {output_dir}", None
|
81 |
|
82 |
def inference(video_path, caption, motion_type):
|
|
|
83 |
|
84 |
MODEL_PATH="/app/pretrained/CogVideoX-5b-I2V"
|
85 |
|
@@ -122,12 +122,25 @@ def inference(video_path, caption, motion_type):
|
|
122 |
else:
|
123 |
print(f"Error occurred during inference: {result.stderr}")
|
124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
# -----------------------------
|
127 |
# Step 3: Create Gradio UI
|
128 |
# -----------------------------
|
129 |
demo = gr.Interface(
|
130 |
-
fn=
|
131 |
inputs=[
|
132 |
gr.Video(label="Upload Video (MP4)"),
|
133 |
gr.Textbox(label="Caption", placeholder="e.g., Amalfi coast with boats"),
|
|
|
42 |
def run_epic_inference(video_path, caption, motion_type):
|
43 |
temp_input_path = "/app/temp_input.mp4"
|
44 |
output_dir = f"/app/output_anchor"
|
45 |
+
video_output_path = f"{output_dir}/masked_videos/output.mp4"
|
46 |
traj_name = motion_type
|
47 |
traj_txt = f"/app/inference/v2v_data/test/trajs/{traj_name}.txt"
|
48 |
# Save uploaded video
|
|
|
65 |
|
66 |
# Run inference command
|
67 |
try:
|
|
|
68 |
result = subprocess.run(command, capture_output=True, text=True, check=True)
|
69 |
print("Getting Anchor Videos run successfully.")
|
70 |
logs = result.stdout
|
|
|
79 |
return f"Inference succeeded but no output video found in {output_dir}", None
|
80 |
|
81 |
def inference(video_path, caption, motion_type):
|
82 |
+
logs, video_masked = run_epic_inference(video_path, caption, motion_type)
|
83 |
|
84 |
MODEL_PATH="/app/pretrained/CogVideoX-5b-I2V"
|
85 |
|
|
|
122 |
else:
|
123 |
print(f"Error occurred during inference: {result.stderr}")
|
124 |
|
125 |
+
# Print output directory contents
|
126 |
+
print(f"\nContents of output directory: {out_dir}")
|
127 |
+
for root, dirs, files in os.walk(out_dir):
|
128 |
+
level = root.replace(out_dir, '').count(os.sep)
|
129 |
+
indent = ' ' * 4 * level
|
130 |
+
print(f"{indent}{os.path.basename(root)}/")
|
131 |
+
sub_indent = ' ' * 4 * (level + 1)
|
132 |
+
for f in files:
|
133 |
+
print(f"{sub_indent}{f}")
|
134 |
+
|
135 |
+
logs = result.stdout
|
136 |
+
|
137 |
+
return logs, str(f"{out_dir}/output.mp4")
|
138 |
|
139 |
# -----------------------------
|
140 |
# Step 3: Create Gradio UI
|
141 |
# -----------------------------
|
142 |
demo = gr.Interface(
|
143 |
+
fn=inference,
|
144 |
inputs=[
|
145 |
gr.Video(label="Upload Video (MP4)"),
|
146 |
gr.Textbox(label="Caption", placeholder="e.g., Amalfi coast with boats"),
|