Spaces:
Sleeping
Sleeping
Added app.py file
Browse files
app.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from streamlit_webrtc import webrtc_streamer, RTCConfiguration
|
2 |
+
import streamlit as st
|
3 |
+
import av
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
from deepface import DeepFace
|
7 |
+
import time
|
8 |
+
|
9 |
+
st.set_page_config(page_title="AI Facial Interview Monitor", layout="wide")
|
10 |
+
|
11 |
+
st.title(":blue[MOCKVIEWER - Face Monitoring System]")
|
12 |
+
|
13 |
+
# Load Haar Cascade models
|
14 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
|
15 |
+
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_eye.xml")
|
16 |
+
|
17 |
+
# Upload reference image
|
18 |
+
uploaded_image = st.file_uploader("Upload your reference face image", type=["jpg", "jpeg", "png"])
|
19 |
+
if uploaded_image:
|
20 |
+
user_ref_img = cv2.imdecode(np.frombuffer(uploaded_image.read(), np.uint8), cv2.IMREAD_COLOR)
|
21 |
+
st.image(user_ref_img, caption="Reference Image", use_column_width=True)
|
22 |
+
|
23 |
+
face_detected_time = time.time()
|
24 |
+
last_verified_time = 0
|
25 |
+
|
26 |
+
# Configs
|
27 |
+
FACE_TIMEOUT = 60 # seconds of face absence before cancelling
|
28 |
+
VERIFY_INTERVAL = 30 # seconds between face identity checks
|
29 |
+
|
30 |
+
|
31 |
+
def is_confident_pose(face_roi):
|
32 |
+
"""Very basic heuristic — head is upright, eyes are visible."""
|
33 |
+
gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)
|
34 |
+
eyes = eye_cascade.detectMultiScale(gray, 1.1, 4)
|
35 |
+
return len(eyes) >= 1
|
36 |
+
|
37 |
+
|
38 |
+
def match_identity(live_face, ref_img):
|
39 |
+
try:
|
40 |
+
result = DeepFace.verify(live_face, ref_img, enforce_detection=False, model_name='Facenet', detector_backend='opencv')
|
41 |
+
return result["verified"]
|
42 |
+
except Exception as e:
|
43 |
+
print("Verification error:", e)
|
44 |
+
return False
|
45 |
+
|
46 |
+
|
47 |
+
class VideoProcessor:
|
48 |
+
def __init__(self):
|
49 |
+
self.last_check = time.time()
|
50 |
+
self.last_verified = time.time()
|
51 |
+
self.face_missing = False
|
52 |
+
|
53 |
+
def recv(self, frame):
|
54 |
+
global face_detected_time, last_verified_time
|
55 |
+
img = frame.to_ndarray(format="bgr24")
|
56 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
57 |
+
|
58 |
+
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
|
59 |
+
|
60 |
+
if len(faces) == 0:
|
61 |
+
if time.time() - face_detected_time > FACE_TIMEOUT:
|
62 |
+
cv2.putText(img, "❌ Interview Cancelled: Face not visible!", (30, 30),
|
63 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
|
64 |
+
else:
|
65 |
+
cv2.putText(img, "⚠️ Face not visible. You have 60 seconds.", (30, 30),
|
66 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
|
67 |
+
else:
|
68 |
+
face_detected_time = time.time()
|
69 |
+
for (x, y, w, h) in faces:
|
70 |
+
face_roi = img[y:y+h, x:x+w]
|
71 |
+
|
72 |
+
# Eye and posture check
|
73 |
+
confident = is_confident_pose(face_roi)
|
74 |
+
status = "✅ Confident Pose" if confident else "⚠️ Look Straight!"
|
75 |
+
color = (0, 255, 0) if confident else (0, 255, 255)
|
76 |
+
cv2.putText(img, status, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
|
77 |
+
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
|
78 |
+
|
79 |
+
# Identity check every VERIFY_INTERVAL seconds
|
80 |
+
if uploaded_image and (time.time() - last_verified_time) > VERIFY_INTERVAL:
|
81 |
+
matched = match_identity(face_roi, user_ref_img)
|
82 |
+
last_verified_time = time.time()
|
83 |
+
if not matched:
|
84 |
+
cv2.putText(img, "❌ Identity mismatch!", (x, y + h + 30),
|
85 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
|
86 |
+
else:
|
87 |
+
cv2.putText(img, "✅ Identity Verified", (x, y + h + 30),
|
88 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
|
89 |
+
|
90 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
91 |
+
|
92 |
+
|
93 |
+
# Streamlit WebRTC component
|
94 |
+
webrtc_streamer(
|
95 |
+
key="monitor",
|
96 |
+
video_processor_factory=VideoProcessor,
|
97 |
+
rtc_configuration=RTCConfiguration(
|
98 |
+
{"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}
|
99 |
+
)
|
100 |
+
)
|