Subh775 commited on
Commit
a041f60
·
verified ·
1 Parent(s): 0e736c9

Update recognizer.py

Browse files
Files changed (1) hide show
  1. recognizer.py +380 -0
recognizer.py CHANGED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import os
3
+ import numpy as np
4
+ from deepface import DeepFace
5
+ import logging
6
+ from typing import Dict, List, Tuple, Optional
7
+ import sqlite3
8
+ from datetime import datetime
9
+ import pytz
10
+
11
+ # Configure logging
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+ class EnhancedFaceRecognizer:
16
+ """Enhanced face recognition system using DeepFace with optimizations"""
17
+
18
+ def __init__(self, known_faces_dir: str = 'static/known_faces', db_path: str = 'attendance.db'):
19
+ self.known_faces_dir = known_faces_dir
20
+ self.db_path = db_path
21
+ self.known_faces = {}
22
+ self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
23
+ self.models = ['VGG-Face', 'Facenet', 'OpenFace'] # Multiple models for better accuracy
24
+ self.current_model = 'VGG-Face'
25
+ self.recognition_threshold = 0.4 # Cosine distance threshold
26
+ self.confidence_threshold = 65 # Minimum confidence percentage
27
+
28
+ # Create directories if they don't exist
29
+ os.makedirs(self.known_faces_dir, exist_ok=True)
30
+
31
+ # Load known faces
32
+ self.load_known_faces()
33
+
34
+ def load_known_faces(self) -> None:
35
+ """Load known faces from database and file system"""
36
+ try:
37
+ self.known_faces = {}
38
+
39
+ # Connect to database
40
+ conn = sqlite3.connect(self.db_path)
41
+ cursor = conn.cursor()
42
+
43
+ # Get all users with face images
44
+ cursor.execute('SELECT id, name, face_encoding_path FROM users WHERE face_encoding_path IS NOT NULL')
45
+ users = cursor.fetchall()
46
+ conn.close()
47
+
48
+ for user_id, name, face_path in users:
49
+ full_path = os.path.join(self.known_faces_dir, face_path)
50
+ if os.path.exists(full_path):
51
+ # Validate image file
52
+ if self._validate_image(full_path):
53
+ self.known_faces[name] = {
54
+ 'user_id': user_id,
55
+ 'image_path': full_path,
56
+ 'embeddings': {} # Cache for embeddings
57
+ }
58
+ else:
59
+ logger.warning(f"Invalid image file for user {name}: {full_path}")
60
+ else:
61
+ logger.warning(f"Image file not found for user {name}: {full_path}")
62
+
63
+ logger.info(f"Loaded {len(self.known_faces)} known faces")
64
+
65
+ except Exception as e:
66
+ logger.error(f"Error loading known faces: {e}")
67
+ self.known_faces = {}
68
+
69
+ def _validate_image(self, image_path: str) -> bool:
70
+ """Validate if image file is readable and contains a face"""
71
+ try:
72
+ image = cv2.imread(image_path)
73
+ if image is None:
74
+ return False
75
+
76
+ # Check if image contains at least one face
77
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
78
+ faces = self.face_cascade.detectMultiScale(gray, 1.1, 4)
79
+
80
+ return len(faces) > 0
81
+
82
+ except Exception as e:
83
+ logger.error(f"Error validating image {image_path}: {e}")
84
+ return False
85
+
86
+ def preprocess_image(self, image: np.ndarray) -> np.ndarray:
87
+ """Preprocess image for better recognition"""
88
+ try:
89
+ # Convert to RGB if needed
90
+ if len(image.shape) == 3 and image.shape[2] == 3:
91
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
92
+
93
+ # Enhance image quality
94
+ # 1. Histogram equalization for better contrast
95
+ if len(image.shape) == 3:
96
+ # Convert to LAB color space
97
+ lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
98
+ l, a, b = cv2.split(lab)
99
+ # Apply CLAHE to L channel
100
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
101
+ l = clahe.apply(l)
102
+ # Merge channels
103
+ enhanced = cv2.merge([l, a, b])
104
+ image = cv2.cvtColor(enhanced, cv2.COLOR_LAB2RGB)
105
+ else:
106
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
107
+ image = clahe.apply(image)
108
+
109
+ # 2. Gaussian blur to reduce noise
110
+ image = cv2.GaussianBlur(image, (1, 1), 0)
111
+
112
+ return image
113
+
114
+ except Exception as e:
115
+ logger.error(f"Error preprocessing image: {e}")
116
+ return image
117
+
118
+ def detect_faces(self, image: np.ndarray) -> List[Tuple[int, int, int, int]]:
119
+ """Detect faces in image using Haar cascade"""
120
+ try:
121
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) if len(image.shape) == 3 else image
122
+ faces = self.face_cascade.detectMultiScale(
123
+ gray,
124
+ scaleFactor=1.1,
125
+ minNeighbors=5,
126
+ minSize=(30, 30),
127
+ flags=cv2.CASCADE_SCALE_IMAGE
128
+ )
129
+ return faces.tolist()
130
+ except Exception as e:
131
+ logger.error(f"Error detecting faces: {e}")
132
+ return []
133
+
134
+ def extract_face_region(self, image: np.ndarray, face_coords: Tuple[int, int, int, int]) -> np.ndarray:
135
+ """Extract face region from image with padding"""
136
+ try:
137
+ x, y, w, h = face_coords
138
+
139
+ # Add padding around face
140
+ padding = int(min(w, h) * 0.2)
141
+
142
+ # Calculate padded coordinates
143
+ x1 = max(0, x - padding)
144
+ y1 = max(0, y - padding)
145
+ x2 = min(image.shape[1], x + w + padding)
146
+ y2 = min(image.shape[0], y + h + padding)
147
+
148
+ # Extract face region
149
+ face_region = image[y1:y2, x1:x2]
150
+
151
+ return face_region
152
+
153
+ except Exception as e:
154
+ logger.error(f"Error extracting face region: {e}")
155
+ return image
156
+
157
+ def get_face_embedding(self, image_path: str, model_name: str = None) -> Optional[np.ndarray]:
158
+ """Get face embedding using DeepFace"""
159
+ try:
160
+ if model_name is None:
161
+ model_name = self.current_model
162
+
163
+ # Use DeepFace to get embedding
164
+ embedding = DeepFace.represent(
165
+ img_path=image_path,
166
+ model_name=model_name,
167
+ enforce_detection=False,
168
+ detector_backend='opencv'
169
+ )
170
+
171
+ if isinstance(embedding, list) and len(embedding) > 0:
172
+ return np.array(embedding[0]['embedding'])
173
+ elif isinstance(embedding, dict):
174
+ return np.array(embedding['embedding'])
175
+ else:
176
+ return None
177
+
178
+ except Exception as e:
179
+ logger.debug(f"Error getting embedding for {image_path} with {model_name}: {e}")
180
+ return None
181
+
182
+ def compare_faces(self, img1_path: str, img2_path: str, model_name: str = None) -> Dict:
183
+ """Compare two faces using DeepFace"""
184
+ try:
185
+ if model_name is None:
186
+ model_name = self.current_model
187
+
188
+ result = DeepFace.verify(
189
+ img1_path=img1_path,
190
+ img2_path=img2_path,
191
+ model_name=model_name,
192
+ distance_metric='cosine',
193
+ enforce_detection=False,
194
+ detector_backend='opencv'
195
+ )
196
+
197
+ return result
198
+
199
+ except Exception as e:
200
+ logger.debug(f"Error comparing faces: {e}")
201
+ return {'verified': False, 'distance': 1.0}
202
+
203
+ def recognize_face_advanced(self, frame: np.ndarray, use_multiple_models: bool = True) -> Tuple[Optional[Dict], float]:
204
+ """Advanced face recognition with multiple models and preprocessing"""
205
+ try:
206
+ if not self.known_faces:
207
+ return None, 0
208
+
209
+ # Preprocess the frame
210
+ processed_frame = self.preprocess_image(frame.copy())
211
+
212
+ # Detect faces in the frame
213
+ faces = self.detect_faces(processed_frame)
214
+
215
+ if not faces:
216
+ return None, 0
217
+
218
+ # Use the largest detected face
219
+ largest_face = max(faces, key=lambda f: f[2] * f[3])
220
+
221
+ # Extract face region
222
+ face_region = self.extract_face_region(processed_frame, largest_face)
223
+
224
+ # Save temporary frame for DeepFace
225
+ temp_path = 'temp_recognition_frame.jpg'
226
+
227
+ # Convert back to BGR for saving
228
+ if len(face_region.shape) == 3:
229
+ face_bgr = cv2.cvtColor(face_region, cv2.COLOR_RGB2BGR)
230
+ else:
231
+ face_bgr = face_region
232
+
233
+ cv2.imwrite(temp_path, face_bgr)
234
+
235
+ best_match = None
236
+ highest_confidence = 0
237
+
238
+ # Models to try
239
+ models_to_use = self.models if use_multiple_models else [self.current_model]
240
+
241
+ for name, face_data in self.known_faces.items():
242
+ best_model_result = None
243
+ best_model_confidence = 0
244
+
245
+ # Try multiple models for this face
246
+ for model in models_to_use:
247
+ try:
248
+ result = self.compare_faces(temp_path, face_data['image_path'], model)
249
+
250
+ if result['verified'] and result['distance'] < self.recognition_threshold:
251
+ confidence = (1 - result['distance']) * 100
252
+
253
+ if confidence > best_model_confidence:
254
+ best_model_confidence = confidence
255
+ best_model_result = {
256
+ 'name': name,
257
+ 'user_id': face_data['user_id'],
258
+ 'confidence': confidence,
259
+ 'model_used': model,
260
+ 'distance': result['distance']
261
+ }
262
+
263
+ except Exception as e:
264
+ logger.debug(f"Model {model} failed for {name}: {e}")
265
+ continue
266
+
267
+ # Check if this is the best match overall
268
+ if best_model_result and best_model_confidence > highest_confidence and best_model_confidence > self.confidence_threshold:
269
+ highest_confidence = best_model_confidence
270
+ best_match = best_model_result
271
+
272
+ # Clean up temp file
273
+ if os.path.exists(temp_path):
274
+ os.remove(temp_path)
275
+
276
+ return best_match, highest_confidence
277
+
278
+ except Exception as e:
279
+ logger.error(f"Advanced face recognition error: {e}")
280
+ return None, 0
281
+
282
+ def recognize_face(self, frame: np.ndarray) -> Tuple[Optional[Dict], float]:
283
+ """Main face recognition method (backward compatibility)"""
284
+ return self.recognize_face_advanced(frame, use_multiple_models=False)
285
+
286
+ def add_known_face(self, name: str, image_path: str) -> bool:
287
+ """Add a new known face"""
288
+ try:
289
+ if not os.path.exists(image_path):
290
+ logger.error(f"Image file not found: {image_path}")
291
+ return False
292
+
293
+ if not self._validate_image(image_path):
294
+ logger.error(f"Invalid image file: {image_path}")
295
+ return False
296
+
297
+ # Add to database (assuming it's already added)
298
+ # Just update our known_faces dictionary
299
+ self.load_known_faces()
300
+
301
+ return name in self.known_faces
302
+
303
+ except Exception as e:
304
+ logger.error(f"Error adding known face: {e}")
305
+ return False
306
+
307
+ def update_model_settings(self, model_name: str = None, threshold: float = None, confidence_threshold: float = None):
308
+ """Update recognition settings"""
309
+ if model_name and model_name in self.models:
310
+ self.current_model = model_name
311
+ logger.info(f"Model changed to: {model_name}")
312
+
313
+ if threshold is not None:
314
+ self.recognition_threshold = threshold
315
+ logger.info(f"Recognition threshold changed to: {threshold}")
316
+
317
+ if confidence_threshold is not None:
318
+ self.confidence_threshold = confidence_threshold
319
+ logger.info(f"Confidence threshold changed to: {confidence_threshold}")
320
+
321
+ def get_recognition_stats(self) -> Dict:
322
+ """Get recognition system statistics"""
323
+ return {
324
+ 'total_known_faces': len(self.known_faces),
325
+ 'current_model': self.current_model,
326
+ 'available_models': self.models,
327
+ 'recognition_threshold': self.recognition_threshold,
328
+ 'confidence_threshold': self.confidence_threshold,
329
+ 'known_faces_dir': self.known_faces_dir
330
+ }
331
+
332
+ # Utility functions for standalone usage
333
+ def recognize_from_webcam(recognizer: EnhancedFaceRecognizer, camera_index: int = 0):
334
+ """Recognize faces from webcam feed"""
335
+ cap = cv2.VideoCapture(camera_index)
336
+
337
+ if not cap.isOpened():
338
+ logger.error("Could not open webcam")
339
+ return
340
+
341
+ logger.info("Starting webcam recognition. Press 'q' to quit.")
342
+
343
+ while True:
344
+ ret, frame = cap.read()
345
+ if not ret:
346
+ break
347
+
348
+ # Recognize face
349
+ result, confidence = recognizer.recognize_face_advanced(frame)
350
+
351
+ # Draw results on frame
352
+ if result:
353
+ # Draw bounding box and name
354
+ faces = recognizer.detect_faces(frame)
355
+ if faces:
356
+ for (x, y, w, h) in faces:
357
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
358
+
359
+ label = f"{result['name']} ({confidence:.1f}%)"
360
+ cv2.putText(frame, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
361
+
362
+ # Show frame
363
+ cv2.imshow('Face Recognition', frame)
364
+
365
+ # Break on 'q' key
366
+ if cv2.waitKey(1) & 0xFF == ord('q'):
367
+ break
368
+
369
+ cap.release()
370
+ cv2.destroyAllWindows()
371
+
372
+ if __name__ == "__main__":
373
+ # Test the recognizer
374
+ recognizer = EnhancedFaceRecognizer()
375
+
376
+ print("Enhanced Face Recognizer Test")
377
+ print(f"Stats: {recognizer.get_recognition_stats()}")
378
+
379
+ # Uncomment to test with webcam
380
+ # recognize_from_webcam(recognizer)