Spaces:
Runtime error
Runtime error
Testimony Adekoya
commited on
Commit
Β·
929f736
1
Parent(s):
2a23c85
Making this commit to retest issues fiX
Browse files- pages/1_Live_Detection.py +39 -44
- pages/2_Test_Streamlit.py +0 -0
- src/detection/strategies/geometric.py +18 -16
pages/1_Live_Detection.py
CHANGED
|
@@ -8,24 +8,29 @@ from dotenv import load_dotenv
|
|
| 8 |
import base64
|
| 9 |
import queue
|
| 10 |
import time
|
|
|
|
| 11 |
|
|
|
|
| 12 |
from src.detection.factory import get_detector
|
| 13 |
from src.alerting.alert_system import get_alerter
|
| 14 |
|
|
|
|
|
|
|
| 15 |
if "status_queue" not in st.session_state:
|
| 16 |
st.session_state.status_queue = queue.Queue()
|
| 17 |
if "audio_queue" not in st.session_state:
|
| 18 |
st.session_state.audio_queue = queue.Queue()
|
| 19 |
-
if "last_status" not in st.session_state:
|
| 20 |
st.session_state.last_status = {"drowsiness_level": "Awake", "lighting": "Good"}
|
| 21 |
|
| 22 |
-
|
| 23 |
# --- Load Configuration and Environment Variables ---
|
| 24 |
@st.cache_resource
|
| 25 |
def load_app_config():
|
| 26 |
"""Loads config from yaml and .env files."""
|
| 27 |
load_dotenv()
|
| 28 |
-
|
|
|
|
| 29 |
with open(config_path, 'r') as f:
|
| 30 |
config = yaml.safe_load(f)
|
| 31 |
# Load secrets from environment
|
|
@@ -51,12 +56,13 @@ def autoplay_audio(audio_bytes: bytes):
|
|
| 51 |
|
| 52 |
# --- WebRTC Video Processor ---
|
| 53 |
class VideoProcessor(VideoProcessorBase):
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
self._detector = get_detector(config)
|
| 56 |
self._alerter = get_alerter(config, secrets["gemini_api_key"])
|
| 57 |
-
# Thread-safe queues for communication
|
| 58 |
-
self.status_queue = queue.Queue()
|
| 59 |
-
self.audio_queue = queue.Queue()
|
| 60 |
|
| 61 |
def recv(self, frame: av.VideoFrame) -> av.VideoFrame:
|
| 62 |
img = frame.to_ndarray(format="bgr24")
|
|
@@ -64,30 +70,16 @@ class VideoProcessor(VideoProcessorBase):
|
|
| 64 |
strategy = config.get('detection_strategy')
|
| 65 |
|
| 66 |
# The return signature of process_frame varies by strategy.
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
# The geometric processor returns frame, indicators, and landmarks.
|
| 73 |
-
processed_frame, indicators, _ = self._detector.process_frame(img)
|
| 74 |
-
alert_triggered = any(v for k, v in indicators.items() if k not in ['low_light', 'details'])
|
| 75 |
-
self.status_queue.put(indicators if alert_triggered or indicators.get('low_light') else {"status": "Awake"})
|
| 76 |
-
print(f"Indicators: {indicators}") # Debugging line
|
| 77 |
-
print(self.status_queue) # Debugging line
|
| 78 |
-
elif strategy == 'cnn_model':
|
| 79 |
-
# The cnn_model processor returns frame and indicators.
|
| 80 |
-
processed_frame, indicators = self._detector.process_frame(img)
|
| 81 |
-
alert_triggered = any(indicators.values())
|
| 82 |
-
self.status_queue.put(indicators if alert_triggered else {"status": "Awake"})
|
| 83 |
-
else:
|
| 84 |
-
# Default case if strategy is unknown
|
| 85 |
-
processed_frame = img
|
| 86 |
-
alert_triggered = False
|
| 87 |
|
| 88 |
-
if
|
| 89 |
-
audio_data = self._alerter.trigger_alert()
|
| 90 |
if audio_data:
|
|
|
|
| 91 |
self.audio_queue.put(audio_data)
|
| 92 |
else:
|
| 93 |
self._alerter.reset_alert()
|
|
@@ -109,19 +101,25 @@ if secrets["turn_username"] and secrets["turn_credential"]:
|
|
| 109 |
|
| 110 |
RTC_CONFIGURATION = RTCConfiguration({"iceServers": ice_servers})
|
| 111 |
|
|
|
|
| 112 |
col1, col2 = st.columns([3, 1])
|
| 113 |
|
| 114 |
with col1:
|
| 115 |
webrtc_ctx = webrtc_streamer(
|
| 116 |
key="drowsiness-detection",
|
| 117 |
-
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
media_stream_constraints={"video": True, "audio": False},
|
| 120 |
async_processing=True,
|
| 121 |
)
|
| 122 |
|
| 123 |
with col2:
|
| 124 |
st.header("System Status")
|
|
|
|
| 125 |
if not webrtc_ctx.state.playing:
|
| 126 |
st.warning("System Inactive.")
|
| 127 |
else:
|
|
@@ -129,14 +127,10 @@ with col2:
|
|
| 129 |
|
| 130 |
st.subheader("Live Status:")
|
| 131 |
status_placeholder = st.empty()
|
| 132 |
-
audio_placeholder = st.empty()
|
| 133 |
|
| 134 |
if webrtc_ctx.state.playing:
|
| 135 |
-
if "last_status" not in st.session_state:
|
| 136 |
-
st.session_state.last_status = {"status": "Awake"}
|
| 137 |
-
|
| 138 |
try:
|
| 139 |
-
# This
|
| 140 |
status_result = st.session_state.status_queue.get(timeout=0.1)
|
| 141 |
st.session_state.last_status = status_result
|
| 142 |
except queue.Empty:
|
|
@@ -149,13 +143,16 @@ if webrtc_ctx.state.playing:
|
|
| 149 |
score = last_status.get("details", {}).get("Score", 0)
|
| 150 |
|
| 151 |
st.metric(label="Lighting Condition", value=lighting)
|
| 152 |
-
|
| 153 |
-
|
| 154 |
|
| 155 |
-
if drowsiness_level == "Awake":
|
| 156 |
-
|
| 157 |
-
elif drowsiness_level == "
|
| 158 |
-
|
|
|
|
|
|
|
|
|
|
| 159 |
try:
|
| 160 |
audio_data = st.session_state.audio_queue.get(timeout=0.1)
|
| 161 |
with audio_placeholder.container():
|
|
@@ -168,5 +165,3 @@ if webrtc_ctx.state.playing:
|
|
| 168 |
else:
|
| 169 |
with status_placeholder.container():
|
| 170 |
st.info("βοΈ Driver is Awake")
|
| 171 |
-
|
| 172 |
-
|
|
|
|
| 8 |
import base64
|
| 9 |
import queue
|
| 10 |
import time
|
| 11 |
+
from typing import List, Dict, Union
|
| 12 |
|
| 13 |
+
# Correctly import from the drive_paddy package structure
|
| 14 |
from src.detection.factory import get_detector
|
| 15 |
from src.alerting.alert_system import get_alerter
|
| 16 |
|
| 17 |
+
# --- Initialize Session State at the TOP of the script ---
|
| 18 |
+
# This is the single source of truth for our queues and must run on every page load.
|
| 19 |
if "status_queue" not in st.session_state:
|
| 20 |
st.session_state.status_queue = queue.Queue()
|
| 21 |
if "audio_queue" not in st.session_state:
|
| 22 |
st.session_state.audio_queue = queue.Queue()
|
| 23 |
+
if "last_status" not in st.session_state:
|
| 24 |
st.session_state.last_status = {"drowsiness_level": "Awake", "lighting": "Good"}
|
| 25 |
|
| 26 |
+
|
| 27 |
# --- Load Configuration and Environment Variables ---
|
| 28 |
@st.cache_resource
|
| 29 |
def load_app_config():
|
| 30 |
"""Loads config from yaml and .env files."""
|
| 31 |
load_dotenv()
|
| 32 |
+
# Navigate up to the root to find the config file
|
| 33 |
+
config_path = os.path.join(os.path.dirname(__file__), '..', '..', 'config.yaml')
|
| 34 |
with open(config_path, 'r') as f:
|
| 35 |
config = yaml.safe_load(f)
|
| 36 |
# Load secrets from environment
|
|
|
|
| 56 |
|
| 57 |
# --- WebRTC Video Processor ---
|
| 58 |
class VideoProcessor(VideoProcessorBase):
|
| 59 |
+
# The __init__ method now accepts the queues as arguments
|
| 60 |
+
def __init__(self, status_queue: queue.Queue, audio_queue: queue.Queue):
|
| 61 |
+
# It uses the queues passed in from session_state, not new ones.
|
| 62 |
+
self.status_queue = status_queue
|
| 63 |
+
self.audio_queue = audio_queue
|
| 64 |
self._detector = get_detector(config)
|
| 65 |
self._alerter = get_alerter(config, secrets["gemini_api_key"])
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
def recv(self, frame: av.VideoFrame) -> av.VideoFrame:
|
| 68 |
img = frame.to_ndarray(format="bgr24")
|
|
|
|
| 70 |
strategy = config.get('detection_strategy')
|
| 71 |
|
| 72 |
# The return signature of process_frame varies by strategy.
|
| 73 |
+
processed_frame, indicators, _ = self._detector.process_frame(img)
|
| 74 |
+
drowsiness_level = indicators.get("drowsiness_level", "Awake")
|
| 75 |
+
|
| 76 |
+
# This now correctly puts data into the shared session_state queue.
|
| 77 |
+
self.status_queue.put(indicators)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
+
if drowsiness_level != "Awake":
|
| 80 |
+
audio_data = self._alerter.trigger_alert(level=drowsiness_level)
|
| 81 |
if audio_data:
|
| 82 |
+
# This now correctly puts audio data into the shared queue.
|
| 83 |
self.audio_queue.put(audio_data)
|
| 84 |
else:
|
| 85 |
self._alerter.reset_alert()
|
|
|
|
| 101 |
|
| 102 |
RTC_CONFIGURATION = RTCConfiguration({"iceServers": ice_servers})
|
| 103 |
|
| 104 |
+
|
| 105 |
col1, col2 = st.columns([3, 1])
|
| 106 |
|
| 107 |
with col1:
|
| 108 |
webrtc_ctx = webrtc_streamer(
|
| 109 |
key="drowsiness-detection",
|
| 110 |
+
# The factory now correctly passes the queues from session_state
|
| 111 |
+
video_processor_factory=lambda: VideoProcessor(
|
| 112 |
+
status_queue=st.session_state.status_queue,
|
| 113 |
+
audio_queue=st.session_state.audio_queue
|
| 114 |
+
),
|
| 115 |
+
rtc_configuration=RTC_CONFIGURATION,
|
| 116 |
media_stream_constraints={"video": True, "audio": False},
|
| 117 |
async_processing=True,
|
| 118 |
)
|
| 119 |
|
| 120 |
with col2:
|
| 121 |
st.header("System Status")
|
| 122 |
+
audio_placeholder = st.empty()
|
| 123 |
if not webrtc_ctx.state.playing:
|
| 124 |
st.warning("System Inactive.")
|
| 125 |
else:
|
|
|
|
| 127 |
|
| 128 |
st.subheader("Live Status:")
|
| 129 |
status_placeholder = st.empty()
|
|
|
|
| 130 |
|
| 131 |
if webrtc_ctx.state.playing:
|
|
|
|
|
|
|
|
|
|
| 132 |
try:
|
| 133 |
+
# This now reads from the correct queue that the processor is writing to.
|
| 134 |
status_result = st.session_state.status_queue.get(timeout=0.1)
|
| 135 |
st.session_state.last_status = status_result
|
| 136 |
except queue.Empty:
|
|
|
|
| 143 |
score = last_status.get("details", {}).get("Score", 0)
|
| 144 |
|
| 145 |
st.metric(label="Lighting Condition", value=lighting)
|
| 146 |
+
if lighting == "Low":
|
| 147 |
+
st.warning("Detection paused due to low light.")
|
| 148 |
|
| 149 |
+
if drowsiness_level == "Awake":
|
| 150 |
+
st.info(f"βοΈ Awake (Score: {score:.2f})")
|
| 151 |
+
elif drowsiness_level == "Slightly Drowsy":
|
| 152 |
+
st.warning(f"β οΈ Slightly Drowsy (Score: {score:.2f})")
|
| 153 |
+
elif drowsiness_level == "Very Drowsy":
|
| 154 |
+
st.error(f"π¨ Very Drowsy! (Score: {score:.2f})")
|
| 155 |
+
|
| 156 |
try:
|
| 157 |
audio_data = st.session_state.audio_queue.get(timeout=0.1)
|
| 158 |
with audio_placeholder.container():
|
|
|
|
| 165 |
else:
|
| 166 |
with status_placeholder.container():
|
| 167 |
st.info("βοΈ Driver is Awake")
|
|
|
|
|
|
pages/2_Test_Streamlit.py
ADDED
|
File without changes
|
src/detection/strategies/geometric.py
CHANGED
|
@@ -3,7 +3,7 @@ import cv2
|
|
| 3 |
import mediapipe as mp
|
| 4 |
import numpy as np
|
| 5 |
import math
|
| 6 |
-
from
|
| 7 |
|
| 8 |
# --- Helper Functions (No changes here) ---
|
| 9 |
def calculate_ear(eye_landmarks, frame_shape):
|
|
@@ -37,9 +37,7 @@ class GeometricProcessor(BaseProcessor):
|
|
| 37 |
}
|
| 38 |
face_landmarks = None
|
| 39 |
|
| 40 |
-
if is_low_light:
|
| 41 |
-
drowsiness_indicators["lighting"] = "Low"
|
| 42 |
-
else:
|
| 43 |
img_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 44 |
results = self.face_mesh.process(img_rgb)
|
| 45 |
face_landmarks = results.multi_face_landmarks
|
|
@@ -49,22 +47,28 @@ class GeometricProcessor(BaseProcessor):
|
|
| 49 |
score = 0
|
| 50 |
weights = self.settings['indicator_weights']
|
| 51 |
|
| 52 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
ear = (calculate_ear([landmarks[i] for i in self.L_EYE],(h,w)) + calculate_ear([landmarks[i] for i in self.R_EYE],(h,w)))/2.0
|
| 54 |
if ear < self.settings['eye_ar_thresh']: self.counters['eye_closure']+=1
|
| 55 |
else: self.counters['eye_closure']=0
|
| 56 |
if self.counters['eye_closure'] >= self.settings['eye_ar_consec_frames']: score += weights['eye_closure']
|
| 57 |
|
| 58 |
-
# Yawning
|
| 59 |
mar = calculate_mar([landmarks[i] for i in self.MOUTH], (h, w))
|
| 60 |
if mar > self.settings['yawn_mar_thresh']: self.counters['yawning']+=1
|
| 61 |
else: self.counters['yawning']=0
|
| 62 |
if self.counters['yawning'] >= self.settings['yawn_consec_frames']: score += weights['yawning']
|
| 63 |
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
cam_matrix = np.array([[w,0,w/2],[0,w,h/2],[0,0,1]], dtype=np.float64)
|
| 68 |
_, rot_vec, _ = cv2.solvePnP(face_3d, face_2d, cam_matrix, np.zeros((4,1),dtype=np.float64))
|
| 69 |
rmat, _ = cv2.Rodrigues(rot_vec); angles, _, _, _, _, _ = cv2.RQDecomp3x3(rmat)
|
| 70 |
pitch, yaw = angles[0], angles[1]
|
|
@@ -76,13 +80,14 @@ class GeometricProcessor(BaseProcessor):
|
|
| 76 |
if abs(yaw) > self.settings['head_look_away_thresh']: self.counters['looking_away']+=1
|
| 77 |
else: self.counters['looking_away']=0
|
| 78 |
if self.counters['looking_away'] >= self.settings['head_pose_consec_frames']: score += weights['looking_away']
|
| 79 |
-
|
| 80 |
-
# Determine Drowsiness Level
|
| 81 |
levels = self.settings['drowsiness_levels']
|
| 82 |
if score >= levels['very_drowsy_threshold']: drowsiness_indicators['drowsiness_level'] = "Very Drowsy"
|
| 83 |
elif score >= levels['slightly_drowsy_threshold']: drowsiness_indicators['drowsiness_level'] = "Slightly Drowsy"
|
| 84 |
|
| 85 |
drowsiness_indicators['details']['Score'] = score
|
|
|
|
|
|
|
| 86 |
|
| 87 |
# --- Visualization on Video Frame ---
|
| 88 |
level = drowsiness_indicators['drowsiness_level']
|
|
@@ -98,12 +103,9 @@ class GeometricProcessor(BaseProcessor):
|
|
| 98 |
elif level == "Very Drowsy":
|
| 99 |
color = (0, 0, 255) # Red
|
| 100 |
|
| 101 |
-
# Draw a colored border around the frame
|
| 102 |
cv2.rectangle(frame, (0, 0), (w, h), color, 10)
|
| 103 |
-
|
| 104 |
-
# Display status text
|
| 105 |
status_text = f"Status: {level} (Score: {score_val:.2f})"
|
| 106 |
print(status_text)
|
| 107 |
cv2.putText(frame, status_text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
|
| 108 |
|
| 109 |
-
return frame, drowsiness_indicators, face_landmarks
|
|
|
|
| 3 |
import mediapipe as mp
|
| 4 |
import numpy as np
|
| 5 |
import math
|
| 6 |
+
from ..base_processor import BaseProcessor
|
| 7 |
|
| 8 |
# --- Helper Functions (No changes here) ---
|
| 9 |
def calculate_ear(eye_landmarks, frame_shape):
|
|
|
|
| 37 |
}
|
| 38 |
face_landmarks = None
|
| 39 |
|
| 40 |
+
if not is_low_light:
|
|
|
|
|
|
|
| 41 |
img_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 42 |
results = self.face_mesh.process(img_rgb)
|
| 43 |
face_landmarks = results.multi_face_landmarks
|
|
|
|
| 47 |
score = 0
|
| 48 |
weights = self.settings['indicator_weights']
|
| 49 |
|
| 50 |
+
# --- Draw Facial Landmarks (Logic Added Back) ---
|
| 51 |
+
# This will draw the green dots for eyes and mouth.
|
| 52 |
+
eye_mouth_landmarks = self.L_EYE + self.R_EYE + self.MOUTH
|
| 53 |
+
for idx in eye_mouth_landmarks:
|
| 54 |
+
lm = landmarks[idx]
|
| 55 |
+
x, y = int(lm.x * w), int(lm.y * h)
|
| 56 |
+
cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)
|
| 57 |
+
|
| 58 |
+
# --- Drowsiness Calculations ---
|
| 59 |
ear = (calculate_ear([landmarks[i] for i in self.L_EYE],(h,w)) + calculate_ear([landmarks[i] for i in self.R_EYE],(h,w)))/2.0
|
| 60 |
if ear < self.settings['eye_ar_thresh']: self.counters['eye_closure']+=1
|
| 61 |
else: self.counters['eye_closure']=0
|
| 62 |
if self.counters['eye_closure'] >= self.settings['eye_ar_consec_frames']: score += weights['eye_closure']
|
| 63 |
|
|
|
|
| 64 |
mar = calculate_mar([landmarks[i] for i in self.MOUTH], (h, w))
|
| 65 |
if mar > self.settings['yawn_mar_thresh']: self.counters['yawning']+=1
|
| 66 |
else: self.counters['yawning']=0
|
| 67 |
if self.counters['yawning'] >= self.settings['yawn_consec_frames']: score += weights['yawning']
|
| 68 |
|
| 69 |
+
face_3d = np.array([[0.0,0.0,0.0],[0.0,-330.0,-65.0],[-225.0,170.0,-135.0],[225.0,170.0,-135.0],[-150.0,-150.0,-125.0],[150.0,-150.0,-125.0]],dtype=np.float64)
|
| 70 |
+
face_2d = np.array([(landmarks[1].x*w,landmarks[1].y*h),(landmarks[152].x*w,landmarks[152].y*h),(landmarks[263].x*w,landmarks[263].y*h),(landmarks[33].x*w,landmarks[33].y*h),(landmarks[287].x*w,landmarks[287].y*h),(landmarks[57].x*w,landmarks[57].y*h)],dtype=np.float64)
|
| 71 |
+
cam_matrix = np.array([[w,0,w/2],[0,w,h/2],[0,0,1]],dtype=np.float64)
|
|
|
|
| 72 |
_, rot_vec, _ = cv2.solvePnP(face_3d, face_2d, cam_matrix, np.zeros((4,1),dtype=np.float64))
|
| 73 |
rmat, _ = cv2.Rodrigues(rot_vec); angles, _, _, _, _, _ = cv2.RQDecomp3x3(rmat)
|
| 74 |
pitch, yaw = angles[0], angles[1]
|
|
|
|
| 80 |
if abs(yaw) > self.settings['head_look_away_thresh']: self.counters['looking_away']+=1
|
| 81 |
else: self.counters['looking_away']=0
|
| 82 |
if self.counters['looking_away'] >= self.settings['head_pose_consec_frames']: score += weights['looking_away']
|
| 83 |
+
|
|
|
|
| 84 |
levels = self.settings['drowsiness_levels']
|
| 85 |
if score >= levels['very_drowsy_threshold']: drowsiness_indicators['drowsiness_level'] = "Very Drowsy"
|
| 86 |
elif score >= levels['slightly_drowsy_threshold']: drowsiness_indicators['drowsiness_level'] = "Slightly Drowsy"
|
| 87 |
|
| 88 |
drowsiness_indicators['details']['Score'] = score
|
| 89 |
+
else: # is_low_light is True
|
| 90 |
+
drowsiness_indicators["lighting"] = "Low"
|
| 91 |
|
| 92 |
# --- Visualization on Video Frame ---
|
| 93 |
level = drowsiness_indicators['drowsiness_level']
|
|
|
|
| 103 |
elif level == "Very Drowsy":
|
| 104 |
color = (0, 0, 255) # Red
|
| 105 |
|
|
|
|
| 106 |
cv2.rectangle(frame, (0, 0), (w, h), color, 10)
|
|
|
|
|
|
|
| 107 |
status_text = f"Status: {level} (Score: {score_val:.2f})"
|
| 108 |
print(status_text)
|
| 109 |
cv2.putText(frame, status_text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
|
| 110 |
|
| 111 |
+
return frame, drowsiness_indicators, face_landmarks
|