Muhammad Taqi Raza commited on
Commit
fa72d99
·
1 Parent(s): 49304f4
Files changed (1) hide show
  1. gradio_app.py +59 -27
gradio_app.py CHANGED
@@ -38,58 +38,90 @@ download_models()
38
  # -----------------------------
39
  # Step 2: Inference Logic
40
  # -----------------------------
 
41
  def run_epic_inference(video_path, caption, motion_type):
42
  temp_input_path = "/app/temp_input.mp4"
43
- output_dir = f"/app/output_{motion_type}"
44
-
45
- video_output_path = f"{output_dir}/masked_videos/temp_input.mp4"
46
  traj_name = motion_type
47
  traj_txt = f"/app/inference/v2v_data/test/trajs/{traj_name}.txt"
48
-
49
  # Save uploaded video
50
  if video_path:
51
  os.system(f"cp '{video_path}' {temp_input_path}")
52
-
53
  command = [
54
- "python",
55
- "/app/inference/v2v_data/inference.py",
56
- "--video_path",
57
- temp_input_path,
58
- "--stride",
59
- "1",
60
- "--out_dir",
61
- output_dir,
62
- "--radius_scale",
63
- "1",
64
- "--camera",
65
- "traj",
66
- "--mask",
67
  "--target_pose", "0", "30", "-0.6", "0", "0",
68
- "--traj_txt",
69
- traj_txt,
70
- "--save_name",
71
- f"temp_input",
72
- "--mode",
73
- "gradual",
74
- "--out_dir",
75
- output_dir,
76
  ]
77
 
78
  # Run inference command
79
  try:
 
80
  result = subprocess.run(command, capture_output=True, text=True, check=True)
 
81
  logs = result.stdout
82
  except subprocess.CalledProcessError as e:
83
  logs = f"❌ Inference failed:\n{e.stderr}"
84
  return logs, None
85
 
86
  # Locate the output video
87
- output_video = Path(output_dir) / f"amalfi-coast_traj_{traj_name}.mp4"
88
  if video_output_path:
89
  return logs, str(video_output_path)
90
  else:
91
  return f"Inference succeeded but no output video found in {output_dir}", None
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  # -----------------------------
95
  # Step 3: Create Gradio UI
 
38
  # -----------------------------
39
  # Step 2: Inference Logic
40
  # -----------------------------
41
+
42
  def run_epic_inference(video_path, caption, motion_type):
43
  temp_input_path = "/app/temp_input.mp4"
44
+ output_dir = f"/app/output_anchor"
45
+ video_output_path = f"{output_dir}/videos/output.mp4"
 
46
  traj_name = motion_type
47
  traj_txt = f"/app/inference/v2v_data/test/trajs/{traj_name}.txt"
 
48
  # Save uploaded video
49
  if video_path:
50
  os.system(f"cp '{video_path}' {temp_input_path}")
51
+
52
  command = [
53
+ "python", "/app/inference/v2v_data/inference.py",
54
+ "--video_path", temp_input_path,
55
+ "--stride", "1",
56
+ "--out_dir", output_dir,
57
+ "--radius_scale", "1",
58
+ "--camera", "traj",
59
+ "--mask",
 
 
 
 
 
 
60
  "--target_pose", "0", "30", "-0.6", "0", "0",
61
+ "--traj_txt", traj_txt,
62
+ "--save_name", "output",
63
+ "--mode", "gradual",
 
 
 
 
 
64
  ]
65
 
66
  # Run inference command
67
  try:
68
+
69
  result = subprocess.run(command, capture_output=True, text=True, check=True)
70
+ print("Getting Anchor Videos run successfully.")
71
  logs = result.stdout
72
  except subprocess.CalledProcessError as e:
73
  logs = f"❌ Inference failed:\n{e.stderr}"
74
  return logs, None
75
 
76
  # Locate the output video
 
77
  if video_output_path:
78
  return logs, str(video_output_path)
79
  else:
80
  return f"Inference succeeded but no output video found in {output_dir}", None
81
 
82
+ def inference(video_path, caption, motion_type):
83
+
84
+ MODEL_PATH="/app/pretrained/CogVideoX-5b-I2V"
85
+
86
+ ckpt_steps=500
87
+ ckpt_dir="/app/out/EPiC_pretrained"
88
+ ckpt_file=f"checkpoint-{ckpt_steps}.pt"
89
+ ckpt_path=f"{ckpt_dir}/{ckpt_file}"
90
+
91
+ video_root_dir= f"/app/output_anchor"
92
+ out_dir=f"/app/output"
93
+
94
+ command = [
95
+ "python", "/app/inference/cli_demo_camera_i2v_pcd.py",
96
+ "--video_root_dir", video_root_dir,
97
+ "--base_model_path", MODEL_PATH,
98
+ "--controlnet_model_path", ckpt_path,
99
+ "--output_path", out_dir,
100
+ "--start_camera_idx", "0",
101
+ "--end_camera_idx", "8",
102
+ "--controlnet_weights", "1.0",
103
+ "--controlnet_guidance_start", "0.0",
104
+ "--controlnet_guidance_end", "0.4",
105
+ "--controlnet_input_channels", "3",
106
+ "--controlnet_transformer_num_attn_heads", "4",
107
+ "--controlnet_transformer_attention_head_dim", "64",
108
+ "--controlnet_transformer_out_proj_dim_factor", "64",
109
+ "--controlnet_transformer_out_proj_dim_zero_init",
110
+ "--vae_channels", "16",
111
+ "--num_frames", "49",
112
+ "--controlnet_transformer_num_layers", "8",
113
+ "--infer_with_mask",
114
+ "--pool_style", "max",
115
+ "--seed", "43"
116
+ ]
117
+
118
+ # Run the command
119
+ result = subprocess.run(command, capture_output=True, text=True)
120
+ if result.returncode == 0:
121
+ print("Inference completed successfully.")
122
+ else:
123
+ print(f"Error occurred during inference: {result.stderr}")
124
+
125
 
126
  # -----------------------------
127
  # Step 3: Create Gradio UI