Spaces:
Sleeping
Sleeping
File size: 1,741 Bytes
770130f c03a5f7 770130f c03a5f7 770130f c03a5f7 770130f c03a5f7 770130f c03a5f7 770130f c03a5f7 770130f c03a5f7 770130f c03a5f7 770130f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import cv2
import numpy as np
from detector import LBWDetector
from utils import draw_boxes, overlay_decision_text
def process_video(video_path, output_path="output.mp4"):
detector = LBWDetector()
cap = cv2.VideoCapture(video_path)
width = int(cap.get(3))
height = int(cap.get(4))
fps = cap.get(cv2.CAP_PROP_FPS)
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
impact_frame = None
impact_point = None
hit_stumps = False
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
detections, class_names = detector.detect_objects(frame)
labels = [class_names[int(cls_id)] for *_, cls_id in detections]
# Draw overlays
frame = draw_boxes(frame, detections, class_names)
# Detect impact frame
if 'pad' in labels and 'ball' in labels:
impact_frame = frame.copy()
# Assume impact point is ball's center in this frame
for x1, y1, x2, y2, conf, cls_id in detections:
if class_names[int(cls_id)] == 'ball':
impact_point = ((x1 + x2) / 2, (y1 + y2) / 2)
break
# Check if ball is later detected near stumps
if 'stumps' in labels and 'ball' in labels:
hit_stumps = True
out.write(frame)
cap.release()
# Append decision screen frame
decision_frame = np.zeros((height, width, 3), dtype=np.uint8)
decision_frame = overlay_decision_text(decision_frame, impact_point, hit_stumps, impact_frame is not None)
for _ in range(int(fps * 2)): # show for 2 seconds
out.write(decision_frame)
out.release()
return output_path
|