Testys commited on
Commit
d918bfb
Β·
verified Β·
1 Parent(s): e5303f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -94
app.py CHANGED
@@ -131,82 +131,40 @@ def process_live_frame(frame):
131
  return processed, status_txt, None
132
 
133
 
134
-
135
- # Constants for the video experiment
136
- VIDEO_FPS = 30.0
137
- CHUNK_SIZE_SECONDS = 2
138
- CHUNK_FRAME_COUNT = int(VIDEO_FPS * CHUNK_SIZE_SECONDS)
139
- TEMP_VIDEO_FILE = "temp_video_chunk.mp4"
140
-
141
- def process_video_chunk(frame, state_dict):
142
  if frame is None:
143
- return None, "Status: Inactive", None, state_dict
144
-
145
- # Unpack the state
146
- frame_buffer = state_dict['buffer']
147
- start_time = state_dict['start_time']
148
- last_buffer_fill_time = state_dict['last_buffer_fill_time']
149
- last_encoding_time = state_dict['last_encoding_time']
150
-
151
- # If the buffer is empty, this is the first frame of a new chunk. Record start time.
152
- if not frame_buffer:
153
- start_time = time.perf_counter()
154
 
155
- # --- Real-time detection and alerting (This is not delayed) ---
156
  try:
157
- processed_frame, indic = detector.process_frame(frame)
 
158
  except Exception as e:
159
  logging.error(f"Error processing frame: {e}")
160
- processed_frame = np.zeros_like(frame)
161
  indic = {"drowsiness_level": "Error", "lighting": "Unknown", "details": {"Score": 0.0}}
162
 
163
  level = indic.get("drowsiness_level", "Awake")
164
  lighting = indic.get("lighting", "Good")
165
  score = indic.get("details", {}).get("Score", 0.0)
166
- audio_payload = alert_manager.trigger_alert(level, lighting)
167
- audio_out = gr.Audio(value=audio_payload, autoplay=True) if audio_payload else None
168
 
169
- # --- Video Buffering Logic ---
170
- frame_buffer.append(processed_frame)
171
-
172
- video_out = None
173
- if len(frame_buffer) >= CHUNK_FRAME_COUNT:
174
- buffer_fill_time = time.perf_counter() - start_time
175
- logging.info(f"BUFFER: Filled {len(frame_buffer)} frames in {buffer_fill_time:.2f} seconds.")
176
-
177
- encoding_start_time = time.perf_counter()
178
- h, w, _ = frame_buffer[0].shape
179
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
180
- writer = cv2.VideoWriter(TEMP_VIDEO_FILE, fourcc, VIDEO_FPS, (w, h))
181
- for f in frame_buffer:
182
- writer.write(f)
183
- writer.release()
184
- encoding_time = time.perf_counter() - encoding_start_time
185
- logging.info(f"ENCODING: Video chunk encoded in {encoding_time:.2f} seconds.")
186
-
187
- video_out = TEMP_VIDEO_FILE
188
- last_buffer_fill_time = buffer_fill_time
189
- last_encoding_time = encoding_time
190
- frame_buffer = []
191
- start_time = None
192
 
193
- # --- NEW: Update status text to include buffer/encoding times ---
194
  status_txt = (
195
  f"Status: {level} (Score: {score:.2f})\n"
196
- f"Buffer Fill Time: {last_buffer_fill_time:.2f}s\n"
197
- f"Video Encode Time: {last_encoding_time:.2f}s"
198
  )
199
- logging.info(f"Status: {status_txt}")
200
-
201
- # Pack state back up to return
202
- new_state = {
203
- 'buffer': frame_buffer,
204
- 'start_time': start_time,
205
- 'last_buffer_fill_time': last_buffer_fill_time,
206
- 'last_encoding_time': last_encoding_time
207
- }
208
-
209
- return video_out, status_txt, audio_out, new_state
210
 
211
 
212
  # ───────────────────────────── UI Definition
@@ -287,46 +245,33 @@ def create_detection_tab():
287
  inputs=[cam],
288
  outputs=[out_img, out_text, out_audio] # The output now targets the placeholder
289
  )
290
-
291
- def create_video_experiment_tab():
292
- """Creates the content for the Video Chunk experiment tab - UPDATED."""
293
- with gr.Blocks() as video_tab:
294
- gr.Markdown("## πŸ§ͺ Video Output Experiment")
295
- gr.Markdown(f"This feed buffers processed frames and outputs them as **{CHUNK_SIZE_SECONDS}-second video chunks**. Notice the trade-off between smoothness and latency. Alerts remain real-time.")
296
- with gr.Row():
297
- with gr.Column(scale=2):
298
- cam_video = gr.Image(sources=["webcam"], streaming=True, label="Live Camera Feed")
299
- with gr.Column(scale=1):
300
- # --- NEW: Use the blank video as the initial value ---
301
- out_video = gr.Video(label="Processed Video Chunk", value=BLANK_VIDEO_FILE)
302
- # --- NEW: Textbox is now larger to show more info ---
303
- out_text_video = gr.Textbox(label="Live Status", lines=4, interactive=False)
304
- out_audio_video = gr.Audio(label="Alert", autoplay=True, visible=False)
305
-
306
- # --- NEW: State is a dictionary to hold the buffer and timing info ---
307
- initial_state = {
308
- 'buffer': [],
309
- 'start_time': None,
310
- 'last_buffer_fill_time': 0.0,
311
- 'last_encoding_time': 0.0
312
- }
313
- state = gr.State(initial_state)
314
 
315
- cam_video.stream(
316
- fn=process_video_chunk,
317
- inputs=[cam_video, state],
318
- outputs=[out_video, out_text_video, out_audio_video, state]
319
- )
320
- return video_tab
321
-
 
 
 
 
 
 
 
 
 
 
 
322
 
323
  with gr.Blocks(title="πŸš— Drive Paddy – Drowsiness Detection", theme=gr.themes.Soft()) as app:
324
  gr.Markdown("# πŸš— **Drive Paddy**")
325
  with gr.Tabs():
326
  with gr.TabItem("Live Detection"):
327
  create_detection_tab()
328
- with gr.TabItem("Video Output Experiment"):
329
- create_video_experiment_tab()
330
  with gr.TabItem("About this App"):
331
  create_readme_tab()
332
 
 
131
  return processed, status_txt, None
132
 
133
 
134
+ # ───────────────────────────── NEW: Frame Processing for Tab 2 (Analysis-Only)
135
+ def process_for_stats_only(frame):
136
+ """
137
+ Processes a frame but does not return any video/image output.
138
+ This is the fastest method, focused only on status and alerts.
139
+ """
 
 
140
  if frame is None:
141
+ return "Status: Inactive", None
 
 
 
 
 
 
 
 
 
 
142
 
143
+ t0 = time.perf_counter()
144
  try:
145
+ # We still call the same detector, but we will ignore the processed frame it returns.
146
+ _, indic = detector.process_frame(frame)
147
  except Exception as e:
148
  logging.error(f"Error processing frame: {e}")
 
149
  indic = {"drowsiness_level": "Error", "lighting": "Unknown", "details": {"Score": 0.0}}
150
 
151
  level = indic.get("drowsiness_level", "Awake")
152
  lighting = indic.get("lighting", "Good")
153
  score = indic.get("details", {}).get("Score", 0.0)
 
 
154
 
155
+ dt_ms = (time.perf_counter() - t0) * 1000.0
156
+ logging.info(f"ANALYSIS ONLY β”‚ {dt_ms:6.1f} ms β”‚ {lighting:<4} β”‚ {level:<14} β”‚ score={score:.2f}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
 
 
158
  status_txt = (
159
  f"Status: {level} (Score: {score:.2f})\n"
160
+ f"Lighting: {lighting}\n"
161
+ f"Processing Time: {dt_ms:.1f} ms"
162
  )
163
+
164
+ audio_payload = alert_manager.trigger_alert(level, lighting)
165
+ audio_out = gr.Audio(value=audio_payload, autoplay=True) if audio_payload else None
166
+
167
+ return status_txt, audio_out
 
 
 
 
 
 
168
 
169
 
170
  # ───────────────────────────── UI Definition
 
245
  inputs=[cam],
246
  outputs=[out_img, out_text, out_audio] # The output now targets the placeholder
247
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
+ def create_analysis_only_tab():
250
+ """Creates the content for the Analysis-Only Mode tab."""
251
+ gr.Markdown("## ⚑ Analysis-Only Mode")
252
+ gr.Markdown("This mode provides the fastest possible analysis by not sending any video back to the browser. The camera is still active for detection, but you will only see the live status and hear alerts.")
253
+ with gr.Row():
254
+ with gr.Column(scale=1):
255
+ # The input camera is visible so the user knows it's working,
256
+ # but there is no corresponding video output component.
257
+ cam_analysis = gr.Image(sources=["webcam"], streaming=True, label="Live Camera Feed (for detection)")
258
+ with gr.Column(scale=1):
259
+ out_text_analysis = gr.Textbox(label="Live Status & Performance", lines=4, interactive=False)
260
+ out_audio_analysis = gr.Audio(label="Alert", autoplay=True, visible=False)
261
+
262
+ cam_analysis.stream(
263
+ fn=process_for_stats_only,
264
+ inputs=[cam_analysis],
265
+ outputs=[out_text_analysis, out_audio_analysis]
266
+ )
267
 
268
  with gr.Blocks(title="πŸš— Drive Paddy – Drowsiness Detection", theme=gr.themes.Soft()) as app:
269
  gr.Markdown("# πŸš— **Drive Paddy**")
270
  with gr.Tabs():
271
  with gr.TabItem("Live Detection"):
272
  create_detection_tab()
273
+ with gr.TabItem("Analysis-Only Mode"):
274
+ create_analysis_only_tab()
275
  with gr.TabItem("About this App"):
276
  create_readme_tab()
277