Testys commited on
Commit
0534bd0
Β·
verified Β·
1 Parent(s): 6bf90e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -14
app.py CHANGED
@@ -93,7 +93,6 @@ class AlertManager:
93
  alert_manager = AlertManager(CFG["alerting"])
94
 
95
  # ───────────────────────────── frame processing <--- MAJOR CHANGE
96
- # Simplified by the AlertManager. No longer needs to pass 'last_alert_ts' back and forth.
97
  def process_live_frame(frame):
98
  if frame is None:
99
  return (
@@ -105,7 +104,6 @@ def process_live_frame(frame):
105
  t0 = time.perf_counter()
106
 
107
  try:
108
- # Assuming your detector returns (processed_image, indicators_dict)
109
  processed, indic = detector.process_frame(frame)
110
  except Exception as e:
111
  logging.error(f"Error processing frame: {e}")
@@ -125,30 +123,36 @@ def process_live_frame(frame):
125
  else f"Status: {level}\nScore: {score:.2f}")
126
  )
127
 
128
- # Check for an alert and get the audio payload if ready
129
  audio_payload = alert_manager.trigger_alert(level, lighting)
130
 
131
- # This is the key: return a new gr.Audio component when an alert fires.
132
- # Otherwise, return None to clear the component on the frontend.
133
  if audio_payload:
134
  return processed, status_txt, gr.Audio(value=audio_payload, autoplay=True)
135
  else:
136
  return processed, status_txt, None
137
 
138
 
 
139
  # Constants for the video experiment
140
  VIDEO_FPS = 30.0
141
  CHUNK_SIZE_SECONDS = 2
142
  CHUNK_FRAME_COUNT = int(VIDEO_FPS * CHUNK_SIZE_SECONDS)
143
  TEMP_VIDEO_FILE = "temp_video_chunk.mp4"
144
 
145
- def process_video_chunk(frame, frame_buffer):
146
  """
147
  Processes a single frame, adds it to a buffer, and encodes a video chunk
148
- when the buffer is full. The alert system remains real-time.
149
  """
150
  if frame is None:
151
- return None, "Status: Inactive", None, [] # Return empty buffer
 
 
 
 
 
 
 
 
152
 
153
  # --- Real-time detection and alerting (This is not delayed) ---
154
  try:
@@ -161,6 +165,7 @@ def process_video_chunk(frame, frame_buffer):
161
  level = indic.get("drowsiness_level", "Awake")
162
  lighting = indic.get("lighting", "Good")
163
  score = indic.get("details", {}).get("Score", 0.0)
 
164
  status_txt = f"Lighting: {lighting}\nStatus: {level}\nScore: {score:.2f}"
165
 
166
  audio_payload = alert_manager.trigger_alert(level, lighting)
@@ -171,7 +176,13 @@ def process_video_chunk(frame, frame_buffer):
171
 
172
  video_out = None # No video output until the chunk is ready
173
  if len(frame_buffer) >= CHUNK_FRAME_COUNT:
174
- logging.info(f"Buffer full. Encoding {len(frame_buffer)} frames to video chunk...")
 
 
 
 
 
 
175
  # Encode the buffer to a video file
176
  h, w, _ = frame_buffer[0].shape
177
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
@@ -180,13 +191,18 @@ def process_video_chunk(frame, frame_buffer):
180
  writer.write(f)
181
  writer.release()
182
 
 
 
 
183
  video_out = TEMP_VIDEO_FILE # Set the output to the new video file path
184
  frame_buffer = [] # Clear the buffer for the next chunk
185
- logging.info("Encoding complete. Sending video to frontend.")
186
 
 
 
 
187
  # Note: Status and Audio are returned on every frame for real-time feedback
188
- return video_out, status_txt, audio_out, frame_buffer
189
-
190
 
191
  # ───────────────────────────── UI Definition
192
  def create_readme_tab():
@@ -280,8 +296,8 @@ def create_video_experiment_tab():
280
  out_text_video = gr.Textbox(label="Live Status", lines=3, interactive=False)
281
  out_audio_video = gr.Audio(label="Alert", autoplay=True, visible=False)
282
 
283
- # State to hold the buffer of frames between updates
284
- frame_buffer_state = gr.State([])
285
 
286
  cam_video.stream(
287
  fn=process_video_chunk,
 
93
  alert_manager = AlertManager(CFG["alerting"])
94
 
95
  # ───────────────────────────── frame processing <--- MAJOR CHANGE
 
96
  def process_live_frame(frame):
97
  if frame is None:
98
  return (
 
104
  t0 = time.perf_counter()
105
 
106
  try:
 
107
  processed, indic = detector.process_frame(frame)
108
  except Exception as e:
109
  logging.error(f"Error processing frame: {e}")
 
123
  else f"Status: {level}\nScore: {score:.2f}")
124
  )
125
 
 
126
  audio_payload = alert_manager.trigger_alert(level, lighting)
127
 
 
 
128
  if audio_payload:
129
  return processed, status_txt, gr.Audio(value=audio_payload, autoplay=True)
130
  else:
131
  return processed, status_txt, None
132
 
133
 
134
+
135
  # Constants for the video experiment
136
  VIDEO_FPS = 30.0
137
  CHUNK_SIZE_SECONDS = 2
138
  CHUNK_FRAME_COUNT = int(VIDEO_FPS * CHUNK_SIZE_SECONDS)
139
  TEMP_VIDEO_FILE = "temp_video_chunk.mp4"
140
 
141
+ def process_video_chunk(frame, state_dict):
142
  """
143
  Processes a single frame, adds it to a buffer, and encodes a video chunk
144
+ when the buffer is full. Now includes detailed timing logs.
145
  """
146
  if frame is None:
147
+ return None, "Status: Inactive", None, state_dict
148
+
149
+ # Unpack the state
150
+ frame_buffer = state_dict['buffer']
151
+ start_time = state_dict['start_time']
152
+
153
+ # If the buffer is empty, this is the first frame of a new chunk. Record start time.
154
+ if not frame_buffer:
155
+ start_time = time.perf_counter()
156
 
157
  # --- Real-time detection and alerting (This is not delayed) ---
158
  try:
 
165
  level = indic.get("drowsiness_level", "Awake")
166
  lighting = indic.get("lighting", "Good")
167
  score = indic.get("details", {}).get("Score", 0.0)
168
+ # The status text is updated on every single frame.
169
  status_txt = f"Lighting: {lighting}\nStatus: {level}\nScore: {score:.2f}"
170
 
171
  audio_payload = alert_manager.trigger_alert(level, lighting)
 
176
 
177
  video_out = None # No video output until the chunk is ready
178
  if len(frame_buffer) >= CHUNK_FRAME_COUNT:
179
+ # --- NEW: Logging buffer fill time ---
180
+ buffer_fill_time = time.perf_counter() - start_time
181
+ logging.info(f"BUFFER: Filled {len(frame_buffer)} frames in {buffer_fill_time:.2f} seconds.")
182
+
183
+ # --- NEW: Logging encoding time ---
184
+ encoding_start_time = time.perf_counter()
185
+
186
  # Encode the buffer to a video file
187
  h, w, _ = frame_buffer[0].shape
188
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
 
191
  writer.write(f)
192
  writer.release()
193
 
194
+ encoding_time = time.perf_counter() - encoding_start_time
195
+ logging.info(f"ENCODING: Video chunk encoded in {encoding_time:.2f} seconds.")
196
+
197
  video_out = TEMP_VIDEO_FILE # Set the output to the new video file path
198
  frame_buffer = [] # Clear the buffer for the next chunk
199
+ start_time = None # Reset start time
200
 
201
+ # Pack state back up to return
202
+ new_state = {'buffer': frame_buffer, 'start_time': start_time}
203
+
204
  # Note: Status and Audio are returned on every frame for real-time feedback
205
+ return video_out, status_txt, audio_out, new_state
 
206
 
207
  # ───────────────────────────── UI Definition
208
  def create_readme_tab():
 
296
  out_text_video = gr.Textbox(label="Live Status", lines=3, interactive=False)
297
  out_audio_video = gr.Audio(label="Alert", autoplay=True, visible=False)
298
 
299
+ # NEW: State is now a dictionary to hold the buffer and start time
300
+ frame_buffer_state = gr.State({'buffer': [], 'start_time': None})
301
 
302
  cam_video.stream(
303
  fn=process_video_chunk,