Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,49 +1,73 @@
|
|
1 |
-
|
2 |
-
import
|
3 |
-
import av
|
4 |
import cv2
|
|
|
5 |
import numpy as np
|
|
|
6 |
from deepface import DeepFace
|
7 |
-
import
|
8 |
|
|
|
|
|
|
|
9 |
st.set_page_config(page_title="AI Facial Interview Monitor", layout="wide")
|
10 |
-
|
11 |
st.title(":blue[MOCKVIEWER - Face Monitoring System]")
|
12 |
|
13 |
-
#
|
|
|
|
|
14 |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
|
15 |
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_eye.xml")
|
16 |
|
17 |
-
#
|
|
|
|
|
18 |
uploaded_image = st.file_uploader("Upload your reference face image", type=["jpg", "jpeg", "png"])
|
|
|
19 |
if uploaded_image:
|
20 |
user_ref_img = cv2.imdecode(np.frombuffer(uploaded_image.read(), np.uint8), cv2.IMREAD_COLOR)
|
21 |
st.image(user_ref_img, caption="Reference Image", use_column_width=True)
|
22 |
|
|
|
|
|
|
|
23 |
face_detected_time = time.time()
|
24 |
last_verified_time = 0
|
25 |
|
26 |
-
#
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
30 |
|
|
|
|
|
|
|
31 |
def is_confident_pose(face_roi):
|
32 |
-
"""
|
33 |
gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)
|
34 |
eyes = eye_cascade.detectMultiScale(gray, 1.1, 4)
|
35 |
return len(eyes) >= 1
|
36 |
|
37 |
-
|
|
|
|
|
38 |
def match_identity(live_face, ref_img):
|
39 |
try:
|
40 |
-
result = DeepFace.verify(
|
|
|
|
|
|
|
|
|
|
|
41 |
return result["verified"]
|
42 |
except Exception as e:
|
43 |
print("Verification error:", e)
|
44 |
return False
|
45 |
|
46 |
-
|
|
|
|
|
47 |
class VideoProcessor:
|
48 |
def __init__(self):
|
49 |
self.last_check = time.time()
|
@@ -51,7 +75,7 @@ class VideoProcessor:
|
|
51 |
self.face_missing = False
|
52 |
|
53 |
def recv(self, frame):
|
54 |
-
global face_detected_time, last_verified_time
|
55 |
img = frame.to_ndarray(format="bgr24")
|
56 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
57 |
|
@@ -69,30 +93,32 @@ class VideoProcessor:
|
|
69 |
for (x, y, w, h) in faces:
|
70 |
face_roi = img[y:y+h, x:x+w]
|
71 |
|
72 |
-
# Eye
|
73 |
confident = is_confident_pose(face_roi)
|
74 |
status = "β
Confident Pose" if confident else "β οΈ Look Straight!"
|
75 |
color = (0, 255, 0) if confident else (0, 255, 255)
|
76 |
cv2.putText(img, status, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
|
77 |
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
|
78 |
|
79 |
-
# Identity
|
80 |
-
if uploaded_image and (time.time() - last_verified_time
|
81 |
matched = match_identity(face_roi, user_ref_img)
|
82 |
last_verified_time = time.time()
|
83 |
-
if
|
84 |
-
cv2.putText(img, "β Identity mismatch!", (x, y + h + 30),
|
85 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
|
86 |
-
else:
|
87 |
cv2.putText(img, "β
Identity Verified", (x, y + h + 30),
|
88 |
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
|
|
|
|
|
|
|
89 |
|
90 |
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
91 |
|
92 |
-
|
|
|
|
|
93 |
webrtc_streamer(
|
94 |
key="monitor",
|
95 |
video_processor_factory=VideoProcessor,
|
96 |
-
mode=WebRtcMode.RECVONLY,
|
97 |
-
rtc_configuration=RTCConfiguration(iceServers=[])
|
98 |
-
)
|
|
|
1 |
+
import os
|
2 |
+
import time
|
|
|
3 |
import cv2
|
4 |
+
import av
|
5 |
import numpy as np
|
6 |
+
import streamlit as st
|
7 |
from deepface import DeepFace
|
8 |
+
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
|
9 |
|
10 |
+
# ---------------------------------------------
|
11 |
+
# π Streamlit Page Config
|
12 |
+
# ---------------------------------------------
|
13 |
st.set_page_config(page_title="AI Facial Interview Monitor", layout="wide")
|
|
|
14 |
st.title(":blue[MOCKVIEWER - Face Monitoring System]")
|
15 |
|
16 |
+
# ---------------------------------------------
|
17 |
+
# π¦ Load Haar Cascade Models
|
18 |
+
# ---------------------------------------------
|
19 |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
|
20 |
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_eye.xml")
|
21 |
|
22 |
+
# ---------------------------------------------
|
23 |
+
# πΌοΈ Upload Reference Image
|
24 |
+
# ---------------------------------------------
|
25 |
uploaded_image = st.file_uploader("Upload your reference face image", type=["jpg", "jpeg", "png"])
|
26 |
+
user_ref_img = None
|
27 |
if uploaded_image:
|
28 |
user_ref_img = cv2.imdecode(np.frombuffer(uploaded_image.read(), np.uint8), cv2.IMREAD_COLOR)
|
29 |
st.image(user_ref_img, caption="Reference Image", use_column_width=True)
|
30 |
|
31 |
+
# ---------------------------------------------
|
32 |
+
# β±οΈ Global State Variables
|
33 |
+
# ---------------------------------------------
|
34 |
face_detected_time = time.time()
|
35 |
last_verified_time = 0
|
36 |
|
37 |
+
# ---------------------------------------------
|
38 |
+
# βοΈ Configurations
|
39 |
+
# ---------------------------------------------
|
40 |
+
FACE_TIMEOUT = 60 # seconds of face absence before cancelling
|
41 |
+
VERIFY_INTERVAL = 30 # seconds between identity checks
|
42 |
|
43 |
+
# ---------------------------------------------
|
44 |
+
# ποΈ Confidence Heuristic
|
45 |
+
# ---------------------------------------------
|
46 |
def is_confident_pose(face_roi):
|
47 |
+
"""Check if eyes are visible and head is upright."""
|
48 |
gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)
|
49 |
eyes = eye_cascade.detectMultiScale(gray, 1.1, 4)
|
50 |
return len(eyes) >= 1
|
51 |
|
52 |
+
# ---------------------------------------------
|
53 |
+
# 𧬠Identity Verification
|
54 |
+
# ---------------------------------------------
|
55 |
def match_identity(live_face, ref_img):
|
56 |
try:
|
57 |
+
result = DeepFace.verify(
|
58 |
+
live_face, ref_img,
|
59 |
+
enforce_detection=False,
|
60 |
+
model_name='Facenet',
|
61 |
+
detector_backend='opencv'
|
62 |
+
)
|
63 |
return result["verified"]
|
64 |
except Exception as e:
|
65 |
print("Verification error:", e)
|
66 |
return False
|
67 |
|
68 |
+
# ---------------------------------------------
|
69 |
+
# πΉ Streamlit Webcam Video Processor
|
70 |
+
# ---------------------------------------------
|
71 |
class VideoProcessor:
|
72 |
def __init__(self):
|
73 |
self.last_check = time.time()
|
|
|
75 |
self.face_missing = False
|
76 |
|
77 |
def recv(self, frame):
|
78 |
+
global face_detected_time, last_verified_time, user_ref_img
|
79 |
img = frame.to_ndarray(format="bgr24")
|
80 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
81 |
|
|
|
93 |
for (x, y, w, h) in faces:
|
94 |
face_roi = img[y:y+h, x:x+w]
|
95 |
|
96 |
+
# Eye/Posture Check
|
97 |
confident = is_confident_pose(face_roi)
|
98 |
status = "β
Confident Pose" if confident else "β οΈ Look Straight!"
|
99 |
color = (0, 255, 0) if confident else (0, 255, 255)
|
100 |
cv2.putText(img, status, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
|
101 |
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
|
102 |
|
103 |
+
# Identity Verification
|
104 |
+
if uploaded_image and (time.time() - last_verified_time > VERIFY_INTERVAL):
|
105 |
matched = match_identity(face_roi, user_ref_img)
|
106 |
last_verified_time = time.time()
|
107 |
+
if matched:
|
|
|
|
|
|
|
108 |
cv2.putText(img, "β
Identity Verified", (x, y + h + 30),
|
109 |
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
|
110 |
+
else:
|
111 |
+
cv2.putText(img, "β Identity mismatch!", (x, y + h + 30),
|
112 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
|
113 |
|
114 |
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
115 |
|
116 |
+
# ---------------------------------------------
|
117 |
+
# π Activate Webcam Stream
|
118 |
+
# ---------------------------------------------
|
119 |
webrtc_streamer(
|
120 |
key="monitor",
|
121 |
video_processor_factory=VideoProcessor,
|
122 |
+
mode=WebRtcMode.RECVONLY,
|
123 |
+
rtc_configuration=RTCConfiguration(iceServers=[])
|
124 |
+
)
|