Testys commited on
Commit
6bf90e5
Β·
verified Β·
1 Parent(s): f131341

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -0
app.py CHANGED
@@ -136,6 +136,58 @@ def process_live_frame(frame):
136
  return processed, status_txt, None
137
 
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  # ───────────────────────────── UI Definition
140
  def create_readme_tab():
141
  """Creates the content for the 'About' tab."""
@@ -215,11 +267,36 @@ def create_detection_tab():
215
  outputs=[out_img, out_text, out_audio] # The output now targets the placeholder
216
  )
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  with gr.Blocks(title="πŸš— Drive Paddy – Drowsiness Detection", theme=gr.themes.Soft()) as app:
219
  gr.Markdown("# πŸš— **Drive Paddy**")
220
  with gr.Tabs():
221
  with gr.TabItem("Live Detection"):
222
  create_detection_tab()
 
 
223
  with gr.TabItem("About this App"):
224
  create_readme_tab()
225
 
 
136
  return processed, status_txt, None
137
 
138
 
139
+ # Constants for the video experiment
140
+ VIDEO_FPS = 30.0
141
+ CHUNK_SIZE_SECONDS = 2
142
+ CHUNK_FRAME_COUNT = int(VIDEO_FPS * CHUNK_SIZE_SECONDS)
143
+ TEMP_VIDEO_FILE = "temp_video_chunk.mp4"
144
+
145
+ def process_video_chunk(frame, frame_buffer):
146
+ """
147
+ Processes a single frame, adds it to a buffer, and encodes a video chunk
148
+ when the buffer is full. The alert system remains real-time.
149
+ """
150
+ if frame is None:
151
+ return None, "Status: Inactive", None, [] # Return empty buffer
152
+
153
+ # --- Real-time detection and alerting (This is not delayed) ---
154
+ try:
155
+ processed_frame, indic = detector.process_frame(frame)
156
+ except Exception as e:
157
+ logging.error(f"Error processing frame: {e}")
158
+ processed_frame = np.zeros_like(frame)
159
+ indic = {"drowsiness_level": "Error", "lighting": "Unknown", "details": {"Score": 0.0}}
160
+
161
+ level = indic.get("drowsiness_level", "Awake")
162
+ lighting = indic.get("lighting", "Good")
163
+ score = indic.get("details", {}).get("Score", 0.0)
164
+ status_txt = f"Lighting: {lighting}\nStatus: {level}\nScore: {score:.2f}"
165
+
166
+ audio_payload = alert_manager.trigger_alert(level, lighting)
167
+ audio_out = gr.Audio(value=audio_payload, autoplay=True) if audio_payload else None
168
+
169
+ # --- Video Buffering Logic ---
170
+ frame_buffer.append(processed_frame)
171
+
172
+ video_out = None # No video output until the chunk is ready
173
+ if len(frame_buffer) >= CHUNK_FRAME_COUNT:
174
+ logging.info(f"Buffer full. Encoding {len(frame_buffer)} frames to video chunk...")
175
+ # Encode the buffer to a video file
176
+ h, w, _ = frame_buffer[0].shape
177
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
178
+ writer = cv2.VideoWriter(TEMP_VIDEO_FILE, fourcc, VIDEO_FPS, (w, h))
179
+ for f in frame_buffer:
180
+ writer.write(f)
181
+ writer.release()
182
+
183
+ video_out = TEMP_VIDEO_FILE # Set the output to the new video file path
184
+ frame_buffer = [] # Clear the buffer for the next chunk
185
+ logging.info("Encoding complete. Sending video to frontend.")
186
+
187
+ # Note: Status and Audio are returned on every frame for real-time feedback
188
+ return video_out, status_txt, audio_out, frame_buffer
189
+
190
+
191
  # ───────────────────────────── UI Definition
192
  def create_readme_tab():
193
  """Creates the content for the 'About' tab."""
 
267
  outputs=[out_img, out_text, out_audio] # The output now targets the placeholder
268
  )
269
 
270
+ def create_video_experiment_tab():
271
+ """Creates the content for the Video Chunk experiment tab."""
272
+ with gr.Blocks() as video_tab:
273
+ gr.Markdown("## πŸ§ͺ Video Output Experiment")
274
+ gr.Markdown(f"This feed buffers processed frames and outputs them as **{CHUNK_SIZE_SECONDS}-second video chunks**. Notice the trade-off between smoothness and latency. Alerts remain real-time.")
275
+ with gr.Row():
276
+ with gr.Column(scale=2):
277
+ cam_video = gr.Image(sources=["webcam"], streaming=True, label="Live Camera Feed")
278
+ with gr.Column(scale=1):
279
+ out_video = gr.Video(label="Processed Video Chunk")
280
+ out_text_video = gr.Textbox(label="Live Status", lines=3, interactive=False)
281
+ out_audio_video = gr.Audio(label="Alert", autoplay=True, visible=False)
282
+
283
+ # State to hold the buffer of frames between updates
284
+ frame_buffer_state = gr.State([])
285
+
286
+ cam_video.stream(
287
+ fn=process_video_chunk,
288
+ inputs=[cam_video, frame_buffer_state],
289
+ outputs=[out_video, out_text_video, out_audio_video, frame_buffer_state]
290
+ )
291
+ return video_tab
292
+
293
  with gr.Blocks(title="πŸš— Drive Paddy – Drowsiness Detection", theme=gr.themes.Soft()) as app:
294
  gr.Markdown("# πŸš— **Drive Paddy**")
295
  with gr.Tabs():
296
  with gr.TabItem("Live Detection"):
297
  create_detection_tab()
298
+ with gr.TabItem("Video Output Experiment"):
299
+ create_video_experiment_tab()
300
  with gr.TabItem("About this App"):
301
  create_readme_tab()
302