Testys commited on
Commit
5101617
·
verified ·
1 Parent(s): f39959a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +140 -0
app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app_gradio.py
2
+ import gradio as gr
3
+ import numpy as np
4
+ import os
5
+ import yaml
6
+ from dotenv import load_dotenv
7
+ import io
8
+ from scipy.io.wavfile import read as read_wav
9
+
10
+ # Correctly import from the drive_paddy package structure
11
+ from src.detection.factory import get_detector
12
+ from src.alerting.alert_system import get_alerter
13
+
14
+ # --- Load Configuration and Environment Variables ---
15
+ # This part is the same as our Streamlit app
16
+ load_dotenv()
17
+ config_path = 'config.yaml'
18
+ with open(config_path, 'r') as f:
19
+ config = yaml.safe_load(f)
20
+ secrets = {
21
+ "gemini_api_key": os.getenv("GEMINI_API_KEY"),
22
+ }
23
+
24
+ # --- Initialize Backend Components ---
25
+ # We create these once and reuse them.
26
+ detector = get_detector(config)
27
+ alerter = get_alerter(config, secrets["gemini_api_key"])
28
+
29
+ # --- Audio Processing for Gradio ---
30
+ # Gradio's gr.Audio component needs a specific format: (sample_rate, numpy_array)
31
+ def process_audio_for_gradio(audio_bytes):
32
+ """Converts in-memory audio bytes to a format Gradio can play."""
33
+ # gTTS creates MP3, so we read it as such
34
+ byte_io = io.BytesIO(audio_bytes)
35
+ # The 'read' function from scipy.io.wavfile expects a WAV file.
36
+ # We need to first convert the MP3 bytes from gTTS to WAV bytes.
37
+ # This requires pydub.
38
+ try:
39
+ from pydub import AudioSegment
40
+ audio = AudioSegment.from_mp3(byte_io)
41
+ wav_byte_io = io.BytesIO()
42
+ audio.export(wav_byte_io, format="wav")
43
+ wav_byte_io.seek(0)
44
+
45
+ sample_rate, data = read_wav(wav_byte_io)
46
+ return (sample_rate, data)
47
+ except Exception as e:
48
+ print(f"Could not process audio for Gradio: {e}")
49
+ return None
50
+
51
+ # --- Main Processing Function for Gradio ---
52
+ # This function is the core of the app. It takes a webcam frame and returns
53
+ # updates for all the output components.
54
+ def process_live_frame(frame):
55
+ """
56
+ Takes a single frame from the Gradio webcam input, processes it,
57
+ and returns the processed frame, status text, and any audio alerts.
58
+ """
59
+ if frame is None:
60
+ # Return default values if frame is None
61
+ blank_image = np.zeros((480, 640, 3), dtype=np.uint8)
62
+ return blank_image, "Status: Inactive", None
63
+
64
+ # Process the frame using our existing detector
65
+ processed_frame, indicators, _ = detector.process_frame(frame)
66
+ drowsiness_level = indicators.get("drowsiness_level", "Awake")
67
+ lighting = indicators.get("lighting", "Good")
68
+ score = indicators.get("details", {}).get("Score", 0)
69
+
70
+ # Build the status text
71
+ status_text = f"Lighting: {lighting}\n"
72
+ if lighting == "Low":
73
+ status_text += "Detection paused due to low light."
74
+ else:
75
+ status_text += f"Status: {drowsiness_level}\nScore: {score:.2f}"
76
+
77
+ # Handle alerts
78
+ audio_output = None
79
+ if drowsiness_level != "Awake":
80
+ audio_data = alerter.trigger_alert(level=drowsiness_level)
81
+ if audio_data:
82
+ audio_output = process_audio_for_gradio(audio_data)
83
+ else:
84
+ alerter.reset_alert()
85
+
86
+ return processed_frame, status_text, audio_output
87
+
88
+ # --- UI Definition for the Live Detection Page ---
89
+ def create_live_detection_page():
90
+ """Builds the Gradio UI components for the live detection tab."""
91
+ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="blue")) as live_detection_page:
92
+ gr.Markdown("A live test using Gradio's webcam component.")
93
+ with gr.Row():
94
+ with gr.Column():
95
+ webcam_input = gr.Image(sources=["webcam"], streaming=True, label="Live Camera Feed")
96
+ with gr.Column():
97
+ processed_output = gr.Image(label="Processed Feed")
98
+ status_output = gr.Textbox(label="Live Status", lines=3, interactive=False)
99
+ # Audio player is now visible for debugging and user feedback.
100
+ audio_alert_output = gr.Audio(autoplay=True, visible=True, label="Alert Sound")
101
+
102
+ webcam_input.stream(
103
+ fn=process_live_frame,
104
+ inputs=[webcam_input],
105
+ outputs=[processed_output, status_output, audio_alert_output]
106
+ )
107
+ return live_detection_page
108
+
109
+ # --- UI Definition for the Home Page ---
110
+ def create_home_page():
111
+ """Builds the Gradio UI components for the home/welcome tab."""
112
+ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="blue")) as home_page:
113
+ gr.Markdown(
114
+ """
115
+ <div align="center">
116
+ <img src="https://em-content.zobj.net/source/samsung/380/automobile_1f697.png" alt="Car Emoji" width="100"/>
117
+ <h1>Welcome to Drive Paddy!</h1>
118
+ <p><strong>Your Drowsiness Detection Assistant</strong></p>
119
+ </div>
120
+
121
+ ---
122
+
123
+ ### How It Works
124
+ This application uses your webcam to monitor for signs of drowsiness in real-time. Navigate to the **Live Detection** tab to begin.
125
+
126
+ - **Multi-Signal Analysis**: Detects eye closure, yawning, and head position.
127
+ - **AI-Powered Alerts**: Uses Gemini to generate dynamic audio warnings.
128
+ - **Live Feedback**: Provides instant visual feedback on the video stream and status panel.
129
+ """
130
+ )
131
+ return home_page
132
+
133
+ # --- Combine Pages into a Tabbed Interface ---
134
+ app = gr.TabbedInterface(
135
+ [create_home_page(), create_live_detection_page()],
136
+ ["Home", "Live Detection"]
137
+ )
138
+
139
+ # --- Launch the App ---
140
+ app.launch(debug=True)