Testimony Adekoya commited on
Commit
8eabd81
·
1 Parent(s): d305df8

Oti su mi mehn

Browse files
Files changed (1) hide show
  1. pages/1_Live_Detection.py +20 -6
pages/1_Live_Detection.py CHANGED
@@ -51,24 +51,36 @@ def autoplay_audio(audio_bytes: bytes):
51
 
52
  # --- WebRTC Video Processor ---
53
  class VideoProcessor(VideoProcessorBase):
54
- def __init__(self):
 
 
55
  self._detector = get_detector(config)
56
  self._alerter = get_alerter(config, secrets["gemini_api_key"])
57
- # Thread-safe queues for communication
58
- self.status_queue = queue.Queue()
59
- self.audio_queue = queue.Queue()
60
 
61
  def recv(self, frame: av.VideoFrame) -> av.VideoFrame:
62
  img = frame.to_ndarray(format="bgr24")
63
 
64
  strategy = config.get('detection_strategy')
 
 
 
65
  if strategy == 'hybrid':
66
  processed_frame, alert_triggered, active_alerts = self._detector.process_frame(img)
67
- self.status_queue.put(active_alerts if alert_triggered else {"status": "Awake"})
68
- else:
 
 
 
 
 
 
69
  processed_frame, indicators = self._detector.process_frame(img)
70
  alert_triggered = any(indicators.values())
71
  self.status_queue.put(indicators if alert_triggered else {"status": "Awake"})
 
 
 
 
72
 
73
  if alert_triggered:
74
  audio_data = self._alerter.trigger_alert()
@@ -78,6 +90,8 @@ class VideoProcessor(VideoProcessorBase):
78
  self._alerter.reset_alert()
79
 
80
  return av.VideoFrame.from_ndarray(processed_frame, format="bgr24")
 
 
81
  # --- Page UI ---
82
  st.title("📹 Live Drowsiness Detection")
83
  st.info("Press 'START' to activate your camera and begin monitoring.")
 
51
 
52
  # --- WebRTC Video Processor ---
53
  class VideoProcessor(VideoProcessorBase):
54
+ def __init__(self, status_queue: queue.Queue, audio_queue: queue.Queue):
55
+ self.status_queue = status_queue
56
+ self.audio_queue = audio_queue
57
  self._detector = get_detector(config)
58
  self._alerter = get_alerter(config, secrets["gemini_api_key"])
 
 
 
59
 
60
  def recv(self, frame: av.VideoFrame) -> av.VideoFrame:
61
  img = frame.to_ndarray(format="bgr24")
62
 
63
  strategy = config.get('detection_strategy')
64
+
65
+ # The return signature of process_frame varies by strategy.
66
+ # We need to handle each case correctly.
67
  if strategy == 'hybrid':
68
  processed_frame, alert_triggered, active_alerts = self._detector.process_frame(img)
69
+ self.status_queue.put(active_alerts if alert_triggered or 'Low Light' in active_alerts else {"status": "Awake"})
70
+ elif strategy == 'geometric':
71
+ # The geometric processor returns frame, indicators, and landmarks.
72
+ processed_frame, indicators, _ = self._detector.process_frame(img)
73
+ alert_triggered = any(v for k, v in indicators.items() if k not in ['low_light', 'details'])
74
+ self.status_queue.put(indicators if alert_triggered or indicators.get('low_light') else {"status": "Awake"})
75
+ elif strategy == 'cnn_model':
76
+ # The cnn_model processor returns frame and indicators.
77
  processed_frame, indicators = self._detector.process_frame(img)
78
  alert_triggered = any(indicators.values())
79
  self.status_queue.put(indicators if alert_triggered else {"status": "Awake"})
80
+ else:
81
+ # Default case if strategy is unknown
82
+ processed_frame = img
83
+ alert_triggered = False
84
 
85
  if alert_triggered:
86
  audio_data = self._alerter.trigger_alert()
 
90
  self._alerter.reset_alert()
91
 
92
  return av.VideoFrame.from_ndarray(processed_frame, format="bgr24")
93
+
94
+
95
  # --- Page UI ---
96
  st.title("📹 Live Drowsiness Detection")
97
  st.info("Press 'START' to activate your camera and begin monitoring.")