Sean Carnahan commited on
Commit
376ee7c
·
1 Parent(s): 56926a1

Deep debug: log environment, versions, and every major step in process_video_movenet

Browse files
Files changed (1) hide show
  1. app.py +42 -18
app.py CHANGED
@@ -174,8 +174,15 @@ def after_request(response):
174
  def process_video_movenet(video_path):
175
  try:
176
  print("[DEBUG] Starting MoveNet video processing")
 
 
 
 
 
 
177
  cap = cv2.VideoCapture(video_path)
178
  if not cap.isOpened():
 
179
  raise ValueError("Could not open video file")
180
 
181
  # Get video properties
@@ -187,9 +194,16 @@ def process_video_movenet(video_path):
187
 
188
  # Force MoveNet to CPU to avoid GPU JIT error
189
  print("[DEBUG] Forcing CPU for MoveNet (due to GPU JIT error)")
190
- with tf.device('/CPU:0'):
191
- movenet_model = hub.load("https://tfhub.dev/google/movenet/singlepose/lightning/4")
192
- movenet = movenet_model.signatures['serving_default']
 
 
 
 
 
 
 
193
 
194
  # Create output video writer
195
  output_filename = f'output_movenet_lightning.mp4'
@@ -199,23 +213,25 @@ def process_video_movenet(video_path):
199
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
200
  out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
201
  if not out.isOpened():
 
202
  raise ValueError(f"Failed to create output video writer at {output_path}")
203
 
204
  frame_count = 0
205
  processed_frames = 0
206
  first_frame_shape = None
 
207
 
208
  while cap.isOpened():
209
- ret, frame = cap.read()
210
- print(f"[DEBUG] Frame {frame_count+1}: ret={ret}, frame is None: {frame is None}")
211
- if not ret or frame is None:
212
- print(f"[DEBUG] Stopping at frame {frame_count+1}: ret={ret}, frame is None: {frame is None}")
213
- break
214
- if first_frame_shape is None:
215
- first_frame_shape = frame.shape
216
- print(f"[DEBUG] First frame shape: {first_frame_shape}")
217
- frame_count += 1
218
  try:
 
 
 
 
 
 
 
 
 
219
  # Ensure frame size matches VideoWriter
220
  if frame.shape[1] != width or frame.shape[0] != height:
221
  print(f"[WARNING] Frame size {frame.shape[1]}x{frame.shape[0]} does not match VideoWriter size {width}x{height}. Resizing.")
@@ -225,9 +241,14 @@ def process_video_movenet(video_path):
225
  img = tf.image.resize_with_pad(tf.expand_dims(img, axis=0), 192, 192)
226
  img = tf.cast(img, dtype=tf.int32)
227
  # Always run inference on CPU
228
- with tf.device('/CPU:0'):
229
- results = movenet(img)
230
- keypoints = results['output_0'].numpy()
 
 
 
 
 
231
  # Process keypoints and draw on frame
232
  y, x, c = frame.shape
233
  shaped = np.squeeze(keypoints)
@@ -240,24 +261,27 @@ def process_video_movenet(video_path):
240
  processed_frames += 1
241
  print(f"[DEBUG] Wrote frame {frame_count} to output video.")
242
  except Exception as e:
243
- print(f"[ERROR] Error processing frame {frame_count}: {str(e)}")
 
244
  continue
245
  cap.release()
246
  out.release()
247
  print(f"[DEBUG] Processed {processed_frames} frames out of {total_frames} total frames")
248
  # Check output file size
249
  if not os.path.exists(output_path):
 
250
  raise ValueError(f"Output video file was not created: {output_path}")
251
  file_size = os.path.getsize(output_path)
252
  print(f"[DEBUG] Output video file size: {file_size} bytes")
253
  if processed_frames == 0 or file_size < 1000:
 
254
  raise ValueError(f"Output video file is empty or too small: {output_path}")
255
  video_url = url_for('serve_video', filename=output_filename, _external=False)
256
  print(f"[DEBUG] Returning video URL: {video_url}")
257
  return video_url
258
  except Exception as e:
259
- print(f"[ERROR] Error in process_video_movenet: {str(e)}")
260
- traceback.print_exc()
261
  raise
262
 
263
  def process_video_mediapipe(video_path):
 
174
  def process_video_movenet(video_path):
175
  try:
176
  print("[DEBUG] Starting MoveNet video processing")
177
+ print(f"[DEBUG] Python version: {sys.version}")
178
+ print(f"[DEBUG] OpenCV version: {cv2.__version__}")
179
+ print(f"[DEBUG] TensorFlow version: {tf.__version__}")
180
+ print(f"[DEBUG] Upload dir contents: {os.listdir(os.path.dirname(video_path))}")
181
+ print(f"[DEBUG] Current working dir: {os.getcwd()}")
182
+ print(f"[DEBUG] Model dir contents: {os.listdir(os.path.join(BASE_DIR, 'external', 'BodybuildingPoseClassifier'))}")
183
  cap = cv2.VideoCapture(video_path)
184
  if not cap.isOpened():
185
+ print(f"[ERROR] Could not open video file: {video_path}")
186
  raise ValueError("Could not open video file")
187
 
188
  # Get video properties
 
194
 
195
  # Force MoveNet to CPU to avoid GPU JIT error
196
  print("[DEBUG] Forcing CPU for MoveNet (due to GPU JIT error)")
197
+ try:
198
+ with tf.device('/CPU:0'):
199
+ print("[DEBUG] Loading MoveNet model...")
200
+ movenet_model = hub.load("https://tfhub.dev/google/movenet/singlepose/lightning/4")
201
+ movenet = movenet_model.signatures['serving_default']
202
+ print("[DEBUG] MoveNet model loaded.")
203
+ except Exception as e:
204
+ print(f"[ERROR] Exception during MoveNet model load: {e}")
205
+ import traceback; traceback.print_exc()
206
+ raise
207
 
208
  # Create output video writer
209
  output_filename = f'output_movenet_lightning.mp4'
 
213
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
214
  out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
215
  if not out.isOpened():
216
+ print(f"[ERROR] Failed to create output video writer at {output_path}")
217
  raise ValueError(f"Failed to create output video writer at {output_path}")
218
 
219
  frame_count = 0
220
  processed_frames = 0
221
  first_frame_shape = None
222
+ print("[DEBUG] Entering frame loop...")
223
 
224
  while cap.isOpened():
 
 
 
 
 
 
 
 
 
225
  try:
226
+ ret, frame = cap.read()
227
+ print(f"[DEBUG] Frame {frame_count+1}: ret={ret}, frame is None: {frame is None}")
228
+ if not ret or frame is None:
229
+ print(f"[DEBUG] Stopping at frame {frame_count+1}: ret={ret}, frame is None: {frame is None}")
230
+ break
231
+ if first_frame_shape is None:
232
+ first_frame_shape = frame.shape
233
+ print(f"[DEBUG] First frame shape: {first_frame_shape}")
234
+ frame_count += 1
235
  # Ensure frame size matches VideoWriter
236
  if frame.shape[1] != width or frame.shape[0] != height:
237
  print(f"[WARNING] Frame size {frame.shape[1]}x{frame.shape[0]} does not match VideoWriter size {width}x{height}. Resizing.")
 
241
  img = tf.image.resize_with_pad(tf.expand_dims(img, axis=0), 192, 192)
242
  img = tf.cast(img, dtype=tf.int32)
243
  # Always run inference on CPU
244
+ try:
245
+ with tf.device('/CPU:0'):
246
+ results = movenet(img)
247
+ keypoints = results['output_0'].numpy()
248
+ except Exception as e:
249
+ print(f"[ERROR] Exception during MoveNet inference on frame {frame_count}: {e}")
250
+ import traceback; traceback.print_exc()
251
+ continue
252
  # Process keypoints and draw on frame
253
  y, x, c = frame.shape
254
  shaped = np.squeeze(keypoints)
 
261
  processed_frames += 1
262
  print(f"[DEBUG] Wrote frame {frame_count} to output video.")
263
  except Exception as e:
264
+ print(f"[ERROR] Exception in frame loop at frame {frame_count+1}: {e}")
265
+ import traceback; traceback.print_exc()
266
  continue
267
  cap.release()
268
  out.release()
269
  print(f"[DEBUG] Processed {processed_frames} frames out of {total_frames} total frames")
270
  # Check output file size
271
  if not os.path.exists(output_path):
272
+ print(f"[ERROR] Output video file was not created: {output_path}")
273
  raise ValueError(f"Output video file was not created: {output_path}")
274
  file_size = os.path.getsize(output_path)
275
  print(f"[DEBUG] Output video file size: {file_size} bytes")
276
  if processed_frames == 0 or file_size < 1000:
277
+ print(f"[ERROR] Output video file is empty or too small: {output_path}")
278
  raise ValueError(f"Output video file is empty or too small: {output_path}")
279
  video_url = url_for('serve_video', filename=output_filename, _external=False)
280
  print(f"[DEBUG] Returning video URL: {video_url}")
281
  return video_url
282
  except Exception as e:
283
+ print(f"[FATAL ERROR] Uncaught exception in process_video_movenet: {e}")
284
+ import traceback; traceback.print_exc()
285
  raise
286
 
287
  def process_video_mediapipe(video_path):