Spaces:
Sleeping
Sleeping
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration | |
import streamlit as st | |
import av | |
import cv2 | |
import numpy as np | |
from deepface import DeepFace | |
import time | |
st.set_page_config(page_title="AI Facial Interview Monitor", layout="wide") | |
st.title(":blue[MOCKVIEWER - Face Monitoring System]") | |
# Load Haar Cascade models | |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml") | |
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_eye.xml") | |
# Upload reference image | |
uploaded_image = st.file_uploader("Upload your reference face image", type=["jpg", "jpeg", "png"]) | |
if uploaded_image: | |
user_ref_img = cv2.imdecode(np.frombuffer(uploaded_image.read(), np.uint8), cv2.IMREAD_COLOR) | |
st.image(user_ref_img, caption="Reference Image", use_column_width=True) | |
face_detected_time = time.time() | |
last_verified_time = 0 | |
# Configs | |
FACE_TIMEOUT = 60 # seconds of face absence before cancelling | |
VERIFY_INTERVAL = 30 # seconds between face identity checks | |
def is_confident_pose(face_roi): | |
"""Very basic heuristic β head is upright, eyes are visible.""" | |
gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY) | |
eyes = eye_cascade.detectMultiScale(gray, 1.1, 4) | |
return len(eyes) >= 1 | |
def match_identity(live_face, ref_img): | |
try: | |
result = DeepFace.verify(live_face, ref_img, enforce_detection=False, model_name='Facenet', detector_backend='opencv') | |
return result["verified"] | |
except Exception as e: | |
print("Verification error:", e) | |
return False | |
class VideoProcessor: | |
def __init__(self): | |
self.last_check = time.time() | |
self.last_verified = time.time() | |
self.face_missing = False | |
def recv(self, frame): | |
global face_detected_time, last_verified_time | |
img = frame.to_ndarray(format="bgr24") | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
faces = face_cascade.detectMultiScale(gray, 1.1, 4) | |
if len(faces) == 0: | |
if time.time() - face_detected_time > FACE_TIMEOUT: | |
cv2.putText(img, "β Interview Cancelled: Face not visible!", (30, 30), | |
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2) | |
else: | |
cv2.putText(img, "β οΈ Face not visible. You have 60 seconds.", (30, 30), | |
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2) | |
else: | |
face_detected_time = time.time() | |
for (x, y, w, h) in faces: | |
face_roi = img[y:y+h, x:x+w] | |
# Eye and posture check | |
confident = is_confident_pose(face_roi) | |
status = "β Confident Pose" if confident else "β οΈ Look Straight!" | |
color = (0, 255, 0) if confident else (0, 255, 255) | |
cv2.putText(img, status, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2) | |
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2) | |
# Identity check every VERIFY_INTERVAL seconds | |
if uploaded_image and (time.time() - last_verified_time) > VERIFY_INTERVAL: | |
matched = match_identity(face_roi, user_ref_img) | |
last_verified_time = time.time() | |
if not matched: | |
cv2.putText(img, "β Identity mismatch!", (x, y + h + 30), | |
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2) | |
else: | |
cv2.putText(img, "β Identity Verified", (x, y + h + 30), | |
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) | |
return av.VideoFrame.from_ndarray(img, format="bgr24") | |
webrtc_streamer( | |
key="monitor", | |
video_processor_factory=VideoProcessor, | |
mode=WebRtcMode.RECVONLY, # β FIXED: Use enum, not string | |
rtc_configuration=RTCConfiguration(iceServers=[]) # Disable STUN | |
) |