fight-detection-live-demo / ProcessVideo.py
sdafd's picture
Update ProcessVideo.py
ae0b2a7 verified
import cv2
import numpy as np
import time
from FeatureExtraction import FeatureExtractor
from Prediction import predict_fight
def process_video(video_path, sequence_length=40, threshold=0.8, output_frame_rate=30, debug=False):
try:
start_time = time.time()
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise ValueError("Could not open video file")
all_frames = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
all_frames.append(frame)
cap.release()
total_frames = len(all_frames)
feature_extractor = FeatureExtractor(img_shape=(224, 224), channels=3, seq_length=sequence_length)
predictions_list = []
predictions = []
for i in range(0, total_frames, sequence_length):
frames_buffer = all_frames[i:i + sequence_length]
if len(frames_buffer) < sequence_length:
frames_buffer += [frames_buffer[-1]] * (sequence_length - len(frames_buffer))
fight_detected, fight_prob = predict_fight(frames_buffer, threshold, feature_extractor)
predictions.append(fight_detected)
predictions_list.append({
'chunk_start_frame': i,
'chunk_end_frame': i + sequence_length - 1,
'fight_probability': float(fight_prob),
'fight_detected': bool(fight_detected)
})
output_video_path = "output_labeled.mp4"
height, width, _ = all_frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_video_path, fourcc, output_frame_rate, (width, height))
frame_idx = 0
for pred in predictions:
label = "Violence Detected!" if pred else "No Violence"
color = (0, 0, 255) if pred else (0, 255, 0)
for _ in range(sequence_length):
if frame_idx >= total_frames:
break
frame = all_frames[frame_idx].copy()
cv2.putText(frame, label, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
out.write(frame)
frame_idx += 1
out.release()
processing_time = time.time() - start_time
json_response = {
'output_video_path': output_video_path,
'total_frames': total_frames,
'sequence_length': sequence_length,
'threshold': threshold,
'output_frame_rate': output_frame_rate,
'processing_time_seconds': processing_time,
'predictions': predictions_list,
'error': None
}
return output_video_path, json_response
except Exception as e:
error_message = f"Error processing video: {str(e)}"
json_response = {
'output_video_path': None,
'error': error_message
}
return None, json_response