Spaces:
Sleeping
Sleeping
File size: 5,028 Bytes
4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 833d8e7 4611956 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import os
import time
import cv2
import av
import numpy as np
import streamlit as st
from deepface import DeepFace
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
# ---------------------------------------------
# π Streamlit Page Config
# ---------------------------------------------
st.set_page_config(page_title="AI Facial Interview Monitor", layout="wide")
st.title(":blue[MOCKVIEWER - Face Monitoring System]")
# ---------------------------------------------
# π¦ Load Haar Cascade Models
# ---------------------------------------------
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_eye.xml")
# ---------------------------------------------
# πΌοΈ Upload Reference Image
# ---------------------------------------------
uploaded_image = st.file_uploader("Upload your reference face image", type=["jpg", "jpeg", "png"])
user_ref_img = None
if uploaded_image:
user_ref_img = cv2.imdecode(np.frombuffer(uploaded_image.read(), np.uint8), cv2.IMREAD_COLOR)
st.image(user_ref_img, caption="Reference Image", use_column_width=True)
# ---------------------------------------------
# β±οΈ Global State Variables
# ---------------------------------------------
face_detected_time = time.time()
last_verified_time = 0
# ---------------------------------------------
# βοΈ Configurations
# ---------------------------------------------
FACE_TIMEOUT = 60 # seconds of face absence before cancelling
VERIFY_INTERVAL = 30 # seconds between identity checks
# ---------------------------------------------
# ποΈ Confidence Heuristic
# ---------------------------------------------
def is_confident_pose(face_roi):
"""Check if eyes are visible and head is upright."""
gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)
eyes = eye_cascade.detectMultiScale(gray, 1.1, 4)
return len(eyes) >= 1
# ---------------------------------------------
# 𧬠Identity Verification
# ---------------------------------------------
def match_identity(live_face, ref_img):
try:
result = DeepFace.verify(
live_face, ref_img,
enforce_detection=False,
model_name='Facenet',
detector_backend='opencv'
)
return result["verified"]
except Exception as e:
print("Verification error:", e)
return False
# ---------------------------------------------
# πΉ Streamlit Webcam Video Processor
# ---------------------------------------------
class VideoProcessor:
def __init__(self):
self.last_check = time.time()
self.last_verified = time.time()
self.face_missing = False
def recv(self, frame):
global face_detected_time, last_verified_time, user_ref_img
img = frame.to_ndarray(format="bgr24")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
if len(faces) == 0:
if time.time() - face_detected_time > FACE_TIMEOUT:
cv2.putText(img, "β Interview Cancelled: Face not visible!", (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
else:
cv2.putText(img, "β οΈ Face not visible. You have 60 seconds.", (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
else:
face_detected_time = time.time()
for (x, y, w, h) in faces:
face_roi = img[y:y+h, x:x+w]
# Eye/Posture Check
confident = is_confident_pose(face_roi)
status = "β
Confident Pose" if confident else "β οΈ Look Straight!"
color = (0, 255, 0) if confident else (0, 255, 255)
cv2.putText(img, status, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
# Identity Verification
if uploaded_image and (time.time() - last_verified_time > VERIFY_INTERVAL):
matched = match_identity(face_roi, user_ref_img)
last_verified_time = time.time()
if matched:
cv2.putText(img, "β
Identity Verified", (x, y + h + 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
else:
cv2.putText(img, "β Identity mismatch!", (x, y + h + 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
return av.VideoFrame.from_ndarray(img, format="bgr24")
# ---------------------------------------------
# π Activate Webcam Stream
# ---------------------------------------------
webrtc_streamer(
key="monitor",
video_processor_factory=VideoProcessor,
mode=WebRtcMode.RECVONLY,
rtc_configuration=RTCConfiguration(iceServers=[])
)
|