nagasurendra commited on
Commit
7a25fd2
·
verified ·
1 Parent(s): f60df3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -12
app.py CHANGED
@@ -2,12 +2,12 @@ import cv2
2
  import torch
3
  import gradio as gr
4
  import numpy as np
5
- from ultralytics import YOLO, __version__ as ultralytics_version # Import ultralytics version
6
 
7
  # Debug: Check environment
8
  print(f"Torch version: {torch.__version__}")
9
  print(f"Gradio version: {gr.__version__}")
10
- print(f"Ultralytics version: {ultralytics_version}") # Corrected version access
11
  print(f"CUDA available: {torch.cuda.is_available()}")
12
 
13
  # Load YOLOv8 model
@@ -19,39 +19,50 @@ def process_video(video):
19
  if video is None:
20
  return "Error: No video uploaded"
21
 
 
22
  cap = cv2.VideoCapture(video)
23
  if not cap.isOpened():
24
  return "Error: Could not open video file"
25
 
26
- frame_width, frame_height = 320, 240 # Smaller resolution
 
 
27
  fps = cap.get(cv2.CAP_PROP_FPS)
 
28
 
 
 
 
 
 
29
  output_path = "processed_output.mp4"
30
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
31
  out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
32
 
33
- frame_count = 1
34
- frame_skip = 5 # Process every 5th frame
35
- max_frames = 100 # Limit for testing
36
 
37
  while True:
38
  ret, frame = cap.read()
39
- if not ret or frame_count > max_frames:
40
  break
41
 
42
  frame_count += 1
43
- if frame_count % frame_skip != 0:
44
- continue
45
-
46
- frame = cv2.resize(frame, (frame_width, frame_height))
47
- print(f"Processing frame {frame_count}")
48
 
 
 
 
 
49
  results = model(frame)
50
  annotated_frame = results[0].plot()
 
 
51
  out.write(annotated_frame)
52
 
 
53
  cap.release()
54
  out.release()
 
55
  return output_path
56
 
57
  # Gradio interface
 
2
  import torch
3
  import gradio as gr
4
  import numpy as np
5
+ from ultralytics import YOLO, __version__ as ultralytics_version
6
 
7
  # Debug: Check environment
8
  print(f"Torch version: {torch.__version__}")
9
  print(f"Gradio version: {gr.__version__}")
10
+ print(f"Ultralytics version: {ultralytics_version}")
11
  print(f"CUDA available: {torch.cuda.is_available()}")
12
 
13
  # Load YOLOv8 model
 
19
  if video is None:
20
  return "Error: No video uploaded"
21
 
22
+ # Open the input video
23
  cap = cv2.VideoCapture(video)
24
  if not cap.isOpened():
25
  return "Error: Could not open video file"
26
 
27
+ # Get input video properties
28
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
29
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
30
  fps = cap.get(cv2.CAP_PROP_FPS)
31
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
32
 
33
+ # Use original resolution to avoid resizing issues (optional: keep 320x240 if needed)
34
+ # frame_width, frame_height = 320, 240
35
+ print(f"Input video: {frame_width}x{frame_height}, {fps} FPS, {total_frames} frames")
36
+
37
+ # Set up video writer
38
  output_path = "processed_output.mp4"
39
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
40
  out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
41
 
42
+ frame_count = 0
 
 
43
 
44
  while True:
45
  ret, frame = cap.read()
46
+ if not ret:
47
  break
48
 
49
  frame_count += 1
50
+ print(f"Processing frame {frame_count}/{total_frames}")
 
 
 
 
51
 
52
+ # Optional: Resize if needed (remove if using original resolution)
53
+ # frame = cv2.resize(frame, (frame_width, frame_height))
54
+
55
+ # Run YOLOv8 inference
56
  results = model(frame)
57
  annotated_frame = results[0].plot()
58
+
59
+ # Write the annotated frame to the output video
60
  out.write(annotated_frame)
61
 
62
+ # Release resources
63
  cap.release()
64
  out.release()
65
+ print(f"Output video saved as {output_path}")
66
  return output_path
67
 
68
  # Gradio interface