Aquibjaved commited on
Commit
8d8d2fc
·
verified ·
1 Parent(s): f78faf6

Update ProcessVideo.py

Browse files
Files changed (1) hide show
  1. ProcessVideo.py +71 -57
ProcessVideo.py CHANGED
@@ -1,57 +1,71 @@
1
- import cv2
2
- from Prediction import predict_fight # Assuming you have this
3
-
4
- # Process video: read, predict, and output labeled video
5
- def process_video(video_path):
6
- cap = cv2.VideoCapture(video_path)
7
- sequence_length = 40 # Number of frames for one prediction
8
- all_frames = []
9
- predictions = []
10
-
11
- # Step 1: Read all frames from the video
12
- while cap.isOpened():
13
- ret, frame = cap.read()
14
- if not ret:
15
- break
16
- all_frames.append(frame)
17
- cap.release()
18
-
19
- # Step 2: Process frames in chunks of 40 to make predictions
20
- for i in range(0, len(all_frames), sequence_length):
21
- frames_buffer = all_frames[i:i + sequence_length] # Get a batch of 40 frames
22
-
23
- # If the number of frames is less than 40 at the end, pad it with the last frame
24
- if len(frames_buffer) < sequence_length:
25
- frames_buffer += [frames_buffer[-1]] * (sequence_length - len(frames_buffer))
26
-
27
- # Perform the prediction on the current batch of frames
28
- fight_detected = predict_fight(frames_buffer)
29
-
30
- # Store the prediction for this batch
31
- predictions.append(fight_detected)
32
-
33
- # Step 3: Create output video with labels
34
- output_video_path = "output_labeled.mp4"
35
- height, width, _ = all_frames[0].shape
36
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
37
- out = cv2.VideoWriter(output_video_path, fourcc, 30, (width, height)) # Adjust frame rate if needed
38
-
39
- frame_idx = 0
40
- for pred in predictions:
41
- label = "Violence Detected!" if pred else "No Violence"
42
- color = (0, 0, 255) if pred else (0, 255, 0)
43
-
44
- # For the next 40 frames, show the same label
45
- for _ in range(sequence_length):
46
- if frame_idx >= len(all_frames):
47
- break
48
-
49
- frame = all_frames[frame_idx]
50
- cv2.putText(frame, label, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
51
- out.write(frame)
52
- frame_idx += 1
53
-
54
- out.release()
55
-
56
- return output_video_path
57
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from Prediction import predict_fight # Assuming you have this
3
+
4
+ # Process video: read, predict, and output labeled video
5
+ def process_video(video_path):
6
+ cap = cv2.VideoCapture(video_path)
7
+ sequence_length = 40 # Number of frames for one prediction
8
+ all_frames = []
9
+ predictions = []
10
+
11
+ # Step 1: Read all frames from the video
12
+ while cap.isOpened():
13
+ ret, frame = cap.read()
14
+ if not ret:
15
+ break
16
+ all_frames.append(frame)
17
+ cap.release()
18
+
19
+ # Step 2: Process frames in chunks of 40 to make predictions
20
+ for i in range(0, len(all_frames), sequence_length):
21
+ frames_buffer = all_frames[i:i + sequence_length] # Get a batch of 40 frames
22
+
23
+ # If the number of frames is less than 40 at the end, pad it with the last frame
24
+ if len(frames_buffer) < sequence_length:
25
+ frames_buffer += [frames_buffer[-1]] * (sequence_length - len(frames_buffer))
26
+
27
+ # Perform the prediction on the current batch of frames
28
+ fight_detected = predict_fight(frames_buffer)
29
+
30
+ # Store the prediction for this batch
31
+ predictions.append(fight_detected)
32
+
33
+ # Step 3: Create output video with labels
34
+ output_video_path = "output_labeled.mp4"
35
+ height, width, _ = all_frames[0].shape
36
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
37
+ out = cv2.VideoWriter(output_video_path, fourcc, 30, (width, height)) # Adjust frame rate if needed
38
+
39
+ frame_idx = 0
40
+ for pred in predictions:
41
+ label = "Violence Detected!" if pred else "No Violence"
42
+ color = (0, 0, 255) if pred else (0, 255, 0)
43
+
44
+ # For the next 40 frames, show the same label
45
+ for _ in range(sequence_length):
46
+ if frame_idx >= len(all_frames):
47
+ break
48
+
49
+ frame = all_frames[frame_idx]
50
+
51
+ # Increase the font size
52
+ font_scale = 1.5 # You can adjust this for size
53
+ thickness = 3 # You can adjust this for thickness
54
+
55
+ # Get the text size for centering
56
+ text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, font_scale, thickness)[0]
57
+
58
+ # Calculate the position to center the text
59
+ text_x = (width - text_size[0]) // 2
60
+ text_y = (height + text_size[1]) // 2 # Adjust for vertical centering
61
+
62
+ # Add label to the frame in the center
63
+ cv2.putText(frame, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, font_scale, color, thickness)
64
+
65
+ out.write(frame)
66
+ frame_idx += 1
67
+
68
+ out.release()
69
+
70
+ return output_video_path
71
+