lokesh341 commited on
Commit
1ba758f
·
1 Parent(s): 3040d90

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +163 -50
app.py CHANGED
@@ -3,50 +3,47 @@ import cv2
3
  import time
4
  import os
5
  import json
 
6
  from datetime import datetime
 
7
  from services.video_service import get_next_video_frame, reset_video_index, preload_video, release_video
8
- # Under Construction services
9
- from services.under_construction.earthwork_detection import process_earthwork
10
- from services.under_construction.culvert_check import process_culverts
11
- from services.under_construction.bridge_pier_check import process_bridge_piers
12
- # Original services
13
- from services.detection_service import process_frame as process_generic
14
- from services.metrics_service import compute_metrics
15
- from services.overlay_service import add_overlay
16
  from services.salesforce_dispatcher import dispatch_to_salesforce
17
- from services.shadow_detection import detect_shadows
18
- from services.thermal_service import process_thermal
19
 
20
  # Globals
21
  paused = False
22
  frame_rate = 0.5 # Process every 0.5 seconds for real-time feel
23
  frame_count = 0
24
  log_entries = []
 
 
25
  last_frame = None
26
  last_detections = {}
27
  last_timestamp = ""
 
 
28
  video_loaded = False
29
 
30
  # Constants
31
  DEFAULT_VIDEO_PATH = "sample.mp4"
32
  TEMP_IMAGE_PATH = "temp.jpg"
 
33
  OUTPUT_DIR = "outputs"
 
34
  os.makedirs(OUTPUT_DIR, exist_ok=True)
35
 
36
  def initialize_video(video_file=None):
37
  """
38
  Initialize the video with the provided file or default path.
39
- Args:
40
- video_file: Uploaded video file (Gradio File object) or None.
41
- Returns:
42
- str: Status message
43
  """
44
  global video_loaded, log_entries
45
- release_video() # Release any existing video capture
46
  video_path = DEFAULT_VIDEO_PATH
47
 
48
  if video_file is not None:
49
- video_path = video_file.name # Gradio File object has a 'name' attribute with the temp path
50
  log_entries.append(f"Using uploaded video: {video_path}")
51
 
52
  status = preload_video(video_path)
@@ -58,11 +55,11 @@ def monitor_feed():
58
  """
59
  Main function to process video frames in real-time.
60
  """
61
- global paused, frame_count, last_frame, last_detections, last_timestamp, video_loaded
62
 
63
  if not video_loaded:
64
  log_entries.append("Cannot start streaming: Video not loaded successfully.")
65
- return None, json.dumps({"error": "Video not loaded. Please upload a video file."}, indent=2), "\n".join(log_entries[-10:])
66
 
67
  if paused and last_frame is not None:
68
  frame = last_frame.copy()
@@ -72,27 +69,35 @@ def monitor_feed():
72
  frame = get_next_video_frame()
73
  except RuntimeError as e:
74
  log_entries.append(f"Error: {str(e)}")
75
- return None, json.dumps(last_detections, indent=2), "\n".join(log_entries[-10:])
76
-
77
- # Run Under Construction detections
78
- earthwork_dets, frame = process_earthwork(frame)
79
- culvert_dets, frame = process_culverts(frame)
80
- bridge_pier_dets, frame = process_bridge_piers(frame)
81
-
82
- # Combine detections
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  all_detections = {
84
- "earthwork": earthwork_dets,
85
- "culverts": culvert_dets,
86
- "bridge_piers": bridge_pier_dets,
87
  "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
88
- "frame_count": frame_count
 
89
  }
90
 
91
- # Compute metrics
92
- all_dets_list = earthwork_dets + culvert_dets + bridge_pier_dets
93
- metrics = compute_metrics(all_dets_list)
94
- all_detections["metrics"] = metrics
95
-
96
  # Dispatch to Salesforce
97
  dispatch_to_salesforce(all_detections, all_detections["timestamp"])
98
 
@@ -102,24 +107,128 @@ def monitor_feed():
102
 
103
  frame_count += 1
104
  last_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
105
- log_entries.append(f"{last_timestamp} - Frame {frame_count} - Detections: {len(all_dets_list)} - Avg Conf: {metrics['avg_confidence']:.2f}")
106
-
107
  last_frame = frame.copy()
108
- last_detections = all_detections
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  if len(log_entries) > 100:
111
  log_entries.pop(0)
 
 
 
 
112
 
113
  # Add frame count and timestamp to display
114
  frame = cv2.resize(last_frame, (640, 480))
115
  cv2.putText(frame, f"Frame: {frame_count}", (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
116
  cv2.putText(frame, f"{last_timestamp}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
117
 
118
- return frame[:, :, ::-1], json.dumps(last_detections, indent=2), "\n".join(log_entries[-10:])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
  # Gradio UI
121
  with gr.Blocks(theme=gr.themes.Soft()) as app:
122
- gr.Markdown("# 🛡️ NHAI Drone Analytics Monitoring System - Under Construction")
123
 
124
  # Video upload section
125
  with gr.Row():
@@ -133,15 +242,21 @@ with gr.Blocks(theme=gr.themes.Soft()) as app:
133
  with gr.Column(scale=3):
134
  video_output = gr.Image(label="Live Drone Feed", width=640, height=480)
135
  with gr.Column(scale=1):
136
- detections_output = gr.Textbox(label="Detections", lines=10)
137
 
138
  with gr.Row():
139
  logs_output = gr.Textbox(label="Live Logs", lines=8)
 
 
 
 
 
 
140
 
141
  with gr.Row():
142
  pause_btn = gr.Button("⏸️ Pause")
143
  resume_btn = gr.Button("▶️ Resume")
144
- frame_slider = gr.Slider(0.1, 5, value=0.5, label="Frame Interval (seconds)")
145
 
146
  def toggle_pause():
147
  global paused
@@ -157,10 +272,9 @@ with gr.Blocks(theme=gr.themes.Soft()) as app:
157
  global frame_rate
158
  frame_rate = val
159
 
160
- # Initialize video on app load (try default path)
161
  video_status.value = initialize_video()
162
 
163
- # Load video when user uploads a file
164
  load_button.click(
165
  initialize_video,
166
  inputs=[video_input],
@@ -172,19 +286,18 @@ with gr.Blocks(theme=gr.themes.Soft()) as app:
172
  frame_slider.change(set_frame_rate, inputs=[frame_slider])
173
 
174
  def streaming_loop():
175
- global video_loaded
176
  while True:
177
  if not video_loaded:
178
- yield None, json.dumps({"error": "Video not loaded. Please upload a video file."}, indent=2), "\n".join(log_entries[-10:])
179
  else:
180
- frame, detections, logs = monitor_feed()
181
  if frame is None:
182
- yield None, detections, logs
183
  else:
184
- yield frame, detections, logs
185
  time.sleep(frame_rate)
186
 
187
- app.load(streaming_loop, outputs=[video_output, detections_output, logs_output])
188
 
189
  if __name__ == "__main__":
190
  app.launch(share=True)
 
3
  import time
4
  import os
5
  import json
6
+ import random
7
  from datetime import datetime
8
+ from collections import Counter
9
  from services.video_service import get_next_video_frame, reset_video_index, preload_video, release_video
10
+ from services.crack_detection_service import detect_cracks_and_objects
11
+ from services.overlay_service import overlay_boxes
12
+ from services.metrics_service import update_metrics
 
 
 
 
 
13
  from services.salesforce_dispatcher import dispatch_to_salesforce
 
 
14
 
15
  # Globals
16
  paused = False
17
  frame_rate = 0.5 # Process every 0.5 seconds for real-time feel
18
  frame_count = 0
19
  log_entries = []
20
+ crack_counts = []
21
+ crack_severity_all = []
22
  last_frame = None
23
  last_detections = {}
24
  last_timestamp = ""
25
+ last_detected_images = [] # Store up to 100+ crack images
26
+ gps_coordinates = []
27
  video_loaded = False
28
 
29
  # Constants
30
  DEFAULT_VIDEO_PATH = "sample.mp4"
31
  TEMP_IMAGE_PATH = "temp.jpg"
32
+ CAPTURED_FRAMES_DIR = "captured_frames"
33
  OUTPUT_DIR = "outputs"
34
+ os.makedirs(CAPTURED_FRAMES_DIR, exist_ok=True)
35
  os.makedirs(OUTPUT_DIR, exist_ok=True)
36
 
37
  def initialize_video(video_file=None):
38
  """
39
  Initialize the video with the provided file or default path.
 
 
 
 
40
  """
41
  global video_loaded, log_entries
42
+ release_video()
43
  video_path = DEFAULT_VIDEO_PATH
44
 
45
  if video_file is not None:
46
+ video_path = video_file.name
47
  log_entries.append(f"Using uploaded video: {video_path}")
48
 
49
  status = preload_video(video_path)
 
55
  """
56
  Main function to process video frames in real-time.
57
  """
58
+ global paused, frame_count, last_frame, last_detections, last_timestamp, gps_coordinates, last_detected_images, video_loaded
59
 
60
  if not video_loaded:
61
  log_entries.append("Cannot start streaming: Video not loaded successfully.")
62
+ return None, json.dumps({"error": "Video not loaded. Please upload a video file."}, indent=2), "\n".join(log_entries[-10:]), None, None, last_detected_images
63
 
64
  if paused and last_frame is not None:
65
  frame = last_frame.copy()
 
69
  frame = get_next_video_frame()
70
  except RuntimeError as e:
71
  log_entries.append(f"Error: {str(e)}")
72
+ return None, json.dumps(last_detections, indent=2), "\n".join(log_entries[-10:]), None, None, last_detected_images
73
+
74
+ # Detect cracks and objects
75
+ detected_items = detect_cracks_and_objects(frame)
76
+ frame = overlay_boxes(frame, detected_items)
77
+ cv2.imwrite(TEMP_IMAGE_PATH, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
78
+ metrics = update_metrics(detected_items)
79
+
80
+ # Simulate GPS coordinates
81
+ gps_coord = [17.385044 + random.uniform(-0.001, 0.001), 78.486671 + frame_count * 0.0001]
82
+ gps_coordinates.append(gps_coord)
83
+
84
+ # Save frame if cracks are detected
85
+ if any(item['type'] == 'crack' for item in detected_items):
86
+ captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"crack_{frame_count}.jpg")
87
+ cv2.imwrite(captured_frame_path, frame)
88
+ last_detected_images.append(captured_frame_path)
89
+ if len(last_detected_images) > 100:
90
+ last_detected_images.pop(0)
91
+
92
+ # Combine detections for Salesforce
93
  all_detections = {
94
+ "items": detected_items,
95
+ "metrics": metrics,
 
96
  "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
97
+ "frame_count": frame_count,
98
+ "gps_coordinates": gps_coord
99
  }
100
 
 
 
 
 
 
101
  # Dispatch to Salesforce
102
  dispatch_to_salesforce(all_detections, all_detections["timestamp"])
103
 
 
107
 
108
  frame_count += 1
109
  last_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
 
 
110
  last_frame = frame.copy()
111
+ last_detections = metrics
112
+
113
+ # Update logs and stats
114
+ crack_detected = len([item for item in last_detections.get('items', []) if item['type'] == 'crack'])
115
+ crack_severity_all.extend([
116
+ item['severity']
117
+ for item in last_detections.get('items', [])
118
+ if item['type'] == 'crack' and 'severity' in item
119
+ ])
120
+
121
+ log_entries.append(f"{last_timestamp} - Frame {frame_count} - Cracks: {crack_detected} - GPS: {gps_coord} - Avg Conf: {metrics['avg_confidence']:.2f}")
122
+ crack_counts.append(crack_detected)
123
 
124
  if len(log_entries) > 100:
125
  log_entries.pop(0)
126
+ if len(crack_counts) > 500:
127
+ crack_counts.pop(0)
128
+ if len(crack_severity_all) > 500:
129
+ crack_severity_all.pop(0)
130
 
131
  # Add frame count and timestamp to display
132
  frame = cv2.resize(last_frame, (640, 480))
133
  cv2.putText(frame, f"Frame: {frame_count}", (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
134
  cv2.putText(frame, f"{last_timestamp}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
135
 
136
+ # Generate charts
137
+ line_chart = generate_line_chart()
138
+ pie_chart = generate_pie_chart()
139
+
140
+ return frame[:, :, ::-1], json.dumps(last_detections, indent=2), "\n".join(log_entries[-10:]), line_chart, pie_chart, last_detected_images
141
+
142
+ def generate_line_chart():
143
+ """
144
+ Generate a line chart for crack counts over time using Chart.js.
145
+ """
146
+ if not crack_counts:
147
+ return None
148
+
149
+ data = crack_counts[-50:] # Last 50 frames
150
+ labels = list(range(len(data)))
151
+
152
+ return {
153
+ "type": "line",
154
+ "data": {
155
+ "labels": labels,
156
+ "datasets": [{
157
+ "label": "Cracks Over Time",
158
+ "data": data,
159
+ "borderColor": "#FF6347", # Tomato
160
+ "backgroundColor": "rgba(255, 99, 71, 0.2)",
161
+ "fill": True,
162
+ "tension": 0.4
163
+ }]
164
+ },
165
+ "options": {
166
+ "responsive": True,
167
+ "plugins": {
168
+ "title": {
169
+ "display": True,
170
+ "text": "Cracks Over Time"
171
+ }
172
+ },
173
+ "scales": {
174
+ "x": {
175
+ "title": {
176
+ "display": True,
177
+ "text": "Frame"
178
+ }
179
+ },
180
+ "y": {
181
+ "title": {
182
+ "display": True,
183
+ "text": "Count"
184
+ },
185
+ "beginAtZero": True
186
+ }
187
+ }
188
+ }
189
+ }
190
+
191
+ def generate_pie_chart():
192
+ """
193
+ Generate a pie chart for crack severity distribution using Chart.js.
194
+ """
195
+ if not crack_severity_all:
196
+ return None
197
+
198
+ count = Counter(crack_severity_all[-200:]) # Last 200 cracks
199
+ labels = list(count.keys())
200
+ sizes = list(count.values())
201
+
202
+ return {
203
+ "type": "pie",
204
+ "data": {
205
+ "labels": labels,
206
+ "datasets": [{
207
+ "data": sizes,
208
+ "backgroundColor": [
209
+ "#FF6347", # Tomato
210
+ "#4682B4", # SteelBlue
211
+ "#FFD700" # Gold
212
+ ]
213
+ }]
214
+ },
215
+ "options": {
216
+ "responsive": True,
217
+ "plugins": {
218
+ "title": {
219
+ "display": True,
220
+ "text": "Crack Severity Distribution"
221
+ },
222
+ "legend": {
223
+ "position": "top"
224
+ }
225
+ }
226
+ }
227
+ }
228
 
229
  # Gradio UI
230
  with gr.Blocks(theme=gr.themes.Soft()) as app:
231
+ gr.Markdown("# 🛡️ NHAI Drone Road Inspection Dashboard")
232
 
233
  # Video upload section
234
  with gr.Row():
 
242
  with gr.Column(scale=3):
243
  video_output = gr.Image(label="Live Drone Feed", width=640, height=480)
244
  with gr.Column(scale=1):
245
+ detections_output = gr.Textbox(label="Crack Metrics", lines=4)
246
 
247
  with gr.Row():
248
  logs_output = gr.Textbox(label="Live Logs", lines=8)
249
+ with gr.Column(scale=1):
250
+ chart_output = gr.Plot(label="Crack Trend")
251
+ pie_output = gr.Plot(label="Crack Severity")
252
+
253
+ with gr.Row():
254
+ captured_images = gr.Gallery(label="Detected Cracks (Last 100+)", columns=4, rows=25)
255
 
256
  with gr.Row():
257
  pause_btn = gr.Button("⏸️ Pause")
258
  resume_btn = gr.Button("▶️ Resume")
259
+ frame_slider = gr.Slider(0.0005, 5, value=0.5, label="Frame Interval (seconds)")
260
 
261
  def toggle_pause():
262
  global paused
 
272
  global frame_rate
273
  frame_rate = val
274
 
275
+ # Initialize video on app load
276
  video_status.value = initialize_video()
277
 
 
278
  load_button.click(
279
  initialize_video,
280
  inputs=[video_input],
 
286
  frame_slider.change(set_frame_rate, inputs=[frame_slider])
287
 
288
  def streaming_loop():
 
289
  while True:
290
  if not video_loaded:
291
+ yield None, json.dumps({"error": "Video not loaded. Please upload a video file."}, indent=2), "\n".join(log_entries[-10:]), None, None, last_detected_images
292
  else:
293
+ frame, detections, logs, line_chart, pie_chart, captured = monitor_feed()
294
  if frame is None:
295
+ yield None, detections, logs, line_chart, pie_chart, captured
296
  else:
297
+ yield frame, detections, logs, line_chart, pie_chart, captured
298
  time.sleep(frame_rate)
299
 
300
+ app.load(streaming_loop, outputs=[video_output, detections_output, logs_output, chart_output, pie_output, captured_images])
301
 
302
  if __name__ == "__main__":
303
  app.launch(share=True)