Aquibjaved commited on
Commit
f126c7c
·
verified ·
1 Parent(s): d25d96a

Upload 3 files

Browse files
Files changed (3) hide show
  1. FeatureExtraction.py +41 -0
  2. Prediction.py +24 -0
  3. orignal_model_b32.h5 +3 -0
FeatureExtraction.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from tensorflow.keras.models import load_model
4
+ from tensorflow.keras.applications.resnet import ResNet152
5
+ from tensorflow.keras.layers import AveragePooling2D, Flatten
6
+ from tensorflow.keras.models import Model
7
+ from tqdm import tqdm
8
+ import os
9
+
10
+ class FeatureExtractor:
11
+ def __init__(self, img_shape, channels):
12
+ self.seq_length = 40 # Number of frames to process
13
+ self.height = img_shape[0]
14
+ self.width = img_shape[1]
15
+ self.channels = channels
16
+
17
+ # Load ResNet152 model without the top fully connected layer
18
+ self.base_model = ResNet152(include_top=False, input_shape=(224, 224, 3), weights='imagenet')
19
+
20
+ # Freeze the base model layers
21
+ for layer in self.base_model.layers:
22
+ layer.trainable = False
23
+
24
+ # Adding an Average Pooling layer followed by Flatten
25
+ self.op = self.base_model.output
26
+ self.x_model = AveragePooling2D((7, 7), name='avg_pool')(self.op)
27
+ self.x_model = Flatten()(self.x_model)
28
+
29
+ # Create the feature extraction model
30
+ self.model = Model(self.base_model.input, self.x_model)
31
+
32
+ def extract_feature(self, frames_buffer):
33
+ x_op = np.zeros((2048, 40)) # Shape (features_dim, seq_length)
34
+ for i in range(len(frames_buffer)):
35
+ x_t = frames_buffer[i]
36
+ x_t = cv2.resize(x_t, (224, 224)) # Resize each frame to the required input size
37
+ x_t = np.expand_dims(x_t, axis=0) # Add batch dimension
38
+ x = self.model.predict(x_t)
39
+ x_op[:, i] = x
40
+
41
+ return x_op
Prediction.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from tensorflow.keras.models import load_model
4
+ import os
5
+ from FeatureExtraction import FeatureExtractor
6
+
7
+ model = load_model('orignal_model_b32.h5')
8
+ model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
9
+
10
+
11
+ # Initialize the feature extractor
12
+ feature_extractor = FeatureExtractor(img_shape=(224, 224), channels=3)
13
+
14
+ def predict_fight(frames_buffer):
15
+ # Extract feature
16
+ features_sequence = feature_extractor.extract_feature(frames_buffer)
17
+
18
+ # Transpose the feature sequence to match the shape
19
+ features_sequence = np.transpose(features_sequence, (1, 0)) # From (2048, 40) to (40, 2048)
20
+ features_sequence = np.expand_dims(features_sequence, axis=0) # Add batch dimension (1, 40, 2048)
21
+
22
+ # Predict
23
+ prediction = model.predict(features_sequence)
24
+ return prediction > 0.8 # Returning a boolean for fight detection
orignal_model_b32.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad403c2e5014e3fd7589d74b801e4494191b329bcbaaa11fbd8d38effe3cc133
3
+ size 191265304