lokesh341 commited on
Commit
2e0ab58
·
verified ·
1 Parent(s): daef382

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -214
app.py CHANGED
@@ -18,32 +18,14 @@ os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
18
  # Import service modules
19
  try:
20
  from services.video_service import get_next_video_frame, reset_video_index, preload_video, release_video
21
- from services.detection_service import process_frame as process_generic
22
  from services.metrics_service import update_metrics
23
- from services.overlay_service import overlay_boxes
24
  from services.salesforce_dispatcher import send_to_salesforce
25
  from services.shadow_detection import detect_shadow_coverage
26
  from services.thermal_service import process_thermal
27
  from services.map_service import generate_map
28
- # Under Construction services
29
  from services.under_construction.earthwork_detection import process_earthwork
30
  from services.under_construction.culvert_check import process_culverts
31
  from services.under_construction.bridge_pier_check import process_bridge_piers
32
- # Operations Maintenance services
33
- from services.operations_maintenance.crack_detection import detect_cracks_and_holes
34
- from services.operations_maintenance.pothole_detection import process_potholes
35
- from services.operations_maintenance.signage_check import process_signages
36
- # Road Safety services
37
- from services.road_safety.barrier_check import process_barriers
38
- from services.road_safety.lighting_check import process_lighting
39
- from services.road_safety.accident_spot_check import process_accident_spots
40
- from services.road_safety.pothole_crack_detection import detect_potholes_and_cracks
41
- # Plantation services
42
- from services.plantation.plant_count import process_plants
43
- from services.plantation.plant_health import process_plant_health
44
- from services.plantation.missing_patch_check import process_missing_patches
45
- # General object detection
46
- from services.object_detection import detect_objects
47
  except ImportError as e:
48
  print(f"Failed to import service modules: {str(e)}")
49
  logging.error(f"Import error: {str(e)}")
@@ -65,13 +47,13 @@ detected_counts: List[int] = []
65
  last_frame: Optional[np.ndarray] = None
66
  last_metrics: Dict[str, Any] = {}
67
  last_timestamp: str = ""
68
- detected_plants: List[str] = [] # For plants and missing patches
69
- detected_issues: List[str] = [] # For cracks, holes, and other issues
70
  gps_coordinates: List[List[float]] = []
71
  media_loaded: bool = False
72
  active_service: Optional[str] = None
73
  is_video: bool = True
74
  static_image: Optional[np.ndarray] = None
 
75
 
76
  # Constants
77
  DEFAULT_VIDEO_PATH = "sample.mp4"
@@ -83,21 +65,19 @@ TEMP_MEDIA_DIR = os.path.abspath("temp_media")
83
  # Ensure directories exist with write permissions
84
  for directory in [CAPTURED_FRAMES_DIR, OUTPUT_DIR, TEMP_MEDIA_DIR]:
85
  os.makedirs(directory, exist_ok=True)
86
- os.chmod(directory, 0o777) # Ensure write permissions
87
 
88
  def initialize_media(media_file: Optional[Any] = None) -> str:
89
  global media_loaded, is_video, static_image, log_entries, frame_count
90
  release_video()
91
  static_image = None
92
- frame_count = 0 # Reset frame count on new media load
93
 
94
- # If no media file is provided, try the default video
95
  if media_file is None:
96
  media_path = DEFAULT_VIDEO_PATH
97
  log_entries.append(f"No media uploaded, attempting to load default: {media_path}")
98
  logging.info(f"No media uploaded, attempting to load default: {media_path}")
99
  else:
100
- # Validate media file
101
  if not hasattr(media_file, 'name') or not media_file.name:
102
  status = "Error: Invalid media file uploaded."
103
  log_entries.append(status)
@@ -105,7 +85,6 @@ def initialize_media(media_file: Optional[Any] = None) -> str:
105
  media_loaded = False
106
  return status
107
 
108
- # Copy the uploaded file to a known location to avoid path issues
109
  original_path = media_file.name
110
  file_extension = os.path.splitext(original_path)[1].lower()
111
  temp_media_path = os.path.join(TEMP_MEDIA_DIR, f"uploaded_media{file_extension}")
@@ -121,7 +100,6 @@ def initialize_media(media_file: Optional[Any] = None) -> str:
121
  media_loaded = False
122
  return status
123
 
124
- # Verify the file exists
125
  if not os.path.exists(media_path):
126
  status = f"Error: Media file '{media_path}' not found."
127
  log_entries.append(status)
@@ -130,7 +108,6 @@ def initialize_media(media_file: Optional[Any] = None) -> str:
130
  return status
131
 
132
  try:
133
- # Determine if the file is a video or image
134
  if file_extension in (".mp4", ".avi"):
135
  is_video = True
136
  preload_video(media_path)
@@ -161,42 +138,28 @@ def initialize_media(media_file: Optional[Any] = None) -> str:
161
  logging.error(status)
162
  return status
163
 
164
- def set_active_service(
165
- service_name: str,
166
- uc_val: bool,
167
- om_val: bool,
168
- rs_val: bool,
169
- pl_val: bool
170
- ) -> Tuple[Optional[str], str]:
171
- global active_service
172
- # Enable all requested services
173
  enabled_services = []
174
  if uc_val:
175
  enabled_services.append("under_construction")
176
- if om_val:
177
- enabled_services.append("operations_maintenance")
178
- if rs_val:
179
- enabled_services.append("road_safety")
180
- if pl_val:
181
- enabled_services.append("plantation")
182
 
183
  if not enabled_services:
184
  active_service = None
185
- log_entries.append("No service category enabled.")
186
- logging.info("No service category enabled.")
187
- return None, "No Service Category Enabled"
188
 
189
- # Since multiple services are requested, we'll process all enabled services
190
- active_service = "all_enabled" # Custom state to process all enabled services
191
- log_entries.append(f"Enabled services: {', '.join(enabled_services)}")
192
- logging.info(f"Enabled services: {', '.join(enabled_services)}")
193
- return active_service, f"Enabled: {', '.join([s.replace('_', ' ').title() for s in enabled_services])}"
194
 
195
  def generate_line_chart() -> Optional[str]:
196
  if not detected_counts:
197
  return None
198
  fig, ax = plt.subplots(figsize=(4, 2))
199
- ax.plot(detected_counts[-50:], marker='o', color='#4682B4')
200
  ax.set_title("Detections Over Time")
201
  ax.set_xlabel("Frame")
202
  ax.set_ylabel("Count")
@@ -217,13 +180,12 @@ def monitor_feed() -> Tuple[
217
  str,
218
  str,
219
  List[str],
220
- List[str],
221
  Optional[str],
222
  Optional[str]
223
  ]:
224
  global paused, frame_count, last_frame, last_metrics, last_timestamp
225
- global gps_coordinates, detected_plants, detected_issues, media_loaded
226
- global is_video, static_image
227
 
228
  if not media_loaded:
229
  log_entries.append("Cannot start processing: Media not loaded successfully.")
@@ -232,7 +194,6 @@ def monitor_feed() -> Tuple[
232
  None,
233
  json.dumps({"error": "Media not loaded. Please upload a video or image file."}, indent=2),
234
  "\n".join(log_entries[-10:]),
235
- detected_plants,
236
  detected_issues,
237
  None,
238
  None
@@ -265,7 +226,6 @@ def monitor_feed() -> Tuple[
265
  None,
266
  json.dumps(last_metrics, indent=2),
267
  "\n".join(log_entries[-10:]),
268
- detected_plants,
269
  detected_issues,
270
  None,
271
  None
@@ -277,51 +237,23 @@ def monitor_feed() -> Tuple[
277
  None,
278
  json.dumps(last_metrics, indent=2),
279
  "\n".join(log_entries[-10:]),
280
- detected_plants,
281
  detected_issues,
282
  None,
283
  None
284
  )
285
 
286
- # Resize frame for faster detection (320x512)
287
  detection_frame = cv2.resize(frame, (512, 320))
288
-
289
  all_detected_items: List[Dict[str, Any]] = []
290
  shadow_issue = False
291
  thermal_flag = False
292
 
293
  try:
294
- # Process all enabled services
295
- # Under Construction Services
296
- earthwork_dets, detection_frame = process_earthwork(detection_frame)
297
- culvert_dets, detection_frame = process_culverts(detection_frame)
298
- bridge_pier_dets, detection_frame = process_bridge_piers(detection_frame)
299
- all_detected_items.extend(earthwork_dets + culvert_dets + bridge_pier_dets)
300
-
301
- # Operations Maintenance Services
302
- crack_hole_dets, detection_frame = detect_cracks_and_holes(detection_frame)
303
- pothole_dets, detection_frame = process_potholes(detection_frame)
304
- signage_dets, detection_frame = process_signages(detection_frame)
305
- all_detected_items.extend(crack_hole_dets + pothole_dets + signage_dets)
306
-
307
- # Road Safety Services
308
- barrier_dets, detection_frame = process_barriers(detection_frame)
309
- lighting_dets, detection_frame = process_lighting(detection_frame)
310
- accident_dets, detection_frame = process_accident_spots(detection_frame)
311
- pothole_crack_dets, detection_frame = detect_potholes_and_cracks(detection_frame)
312
- all_detected_items.extend(barrier_dets + lighting_dets + accident_dets + pothole_crack_dets)
313
-
314
- # Plantation Services
315
- plant_dets, detection_frame = process_plants(detection_frame)
316
- health_dets, detection_frame = process_plant_health(detection_frame)
317
- missing_dets, detection_frame = process_missing_patches(detection_frame)
318
- all_detected_items.extend(plant_dets + health_dets + missing_dets)
319
-
320
- # General Object Detection (cars, bikes, humans, dogs, etc.)
321
- object_dets, detection_frame = detect_objects(detection_frame)
322
- all_detected_items.extend(object_dets)
323
-
324
- # Apply shadow detection
325
  try:
326
  cv2.imwrite(TEMP_IMAGE_PATH, detection_frame)
327
  shadow_issue = detect_shadow_coverage(TEMP_IMAGE_PATH)
@@ -330,7 +262,6 @@ def monitor_feed() -> Tuple[
330
  logging.error(f"Error saving temp image: {str(e)}")
331
  shadow_issue = False
332
 
333
- # Apply thermal processing if frame is grayscale
334
  if len(detection_frame.shape) == 2:
335
  thermal_results = process_thermal(detection_frame)
336
  thermal_dets = thermal_results["detections"]
@@ -338,7 +269,6 @@ def monitor_feed() -> Tuple[
338
  all_detected_items.extend(thermal_dets)
339
  thermal_flag = bool(thermal_dets)
340
 
341
- # Scale bounding boxes back to original frame size
342
  orig_h, orig_w = frame.shape[:2]
343
  det_h, det_w = detection_frame.shape[:2]
344
  scale_x, scale_y = orig_w / det_w, orig_h / det_h
@@ -352,7 +282,6 @@ def monitor_feed() -> Tuple[
352
  int(box[3] * scale_y)
353
  ]
354
 
355
- # Overlay detections on the original frame with specified colors
356
  for item in all_detected_items:
357
  box = item.get("box", [])
358
  if not box:
@@ -360,43 +289,27 @@ def monitor_feed() -> Tuple[
360
  x_min, y_min, x_max, y_max = box
361
  label = item.get("label", "")
362
  dtype = item.get("type", "")
363
- health = item.get("health", "") # For plant health
364
-
365
- # Assign colors based on detection type as per requirements
366
- if dtype == "plant":
367
- color = (255, 0, 0) # Blue mark for plant count
368
- if health == "healthy":
369
- color = (255, 165, 0) # Orange mark for healthy plants
370
- elif dtype == "missing_patch":
371
- color = (0, 0, 255) # Red mark for missing patches
372
- elif dtype == "earthwork":
373
- color = (255, 105, 180) # Pink for earthwork
374
  elif dtype == "culvert":
375
- color = (0, 165, 255) # Blue and orange mix (approximated)
376
  elif dtype == "bridge_pier":
377
- color = (255, 99, 71) # Light red for bridge piers
378
- elif dtype == "pothole" or dtype == "hole":
379
- color = (255, 0, 0) # Red for potholes (from pothole_detection and pothole_crack_detection)
380
- elif dtype == "crack":
381
- color = (255, 105, 180) # Pink for cracks
382
- elif dtype == "signage":
383
- color = (255, 255, 0) # Yellow for signage
384
- elif dtype == "car":
385
- color = (128, 0, 128) # Purple for cars
386
- elif dtype == "bike":
387
- color = (0, 255, 255) # Cyan for bikes
388
- elif dtype == "person":
389
- color = (0, 255, 0) # Green for humans
390
- elif dtype == "dog":
391
- color = (139, 69, 19) # Brown for dogs
392
  else:
393
- color = (255, 255, 255) # White for other objects
394
 
395
- cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, 2)
396
- cv2.putText(frame, label, (x_min, y_min - 10),
397
- cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
 
 
 
 
 
 
 
398
 
399
- # Save temporary image
400
  try:
401
  cv2.imwrite(TEMP_IMAGE_PATH, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
402
  except Exception as e:
@@ -408,40 +321,28 @@ def monitor_feed() -> Tuple[
408
  logging.error(f"Processing error: {str(e)}")
409
  all_detected_items = []
410
 
411
- # Update detection metrics
412
  metrics = update_metrics(all_detected_items)
413
-
414
- # Generate GPS coordinates
415
  gps_coord = [17.385044 + random.uniform(-0.001, 0.001), 78.486671 + frame_count * 0.0001]
416
  gps_coordinates.append(gps_coord)
417
 
418
- # Add GPS to detected items for mapping
419
  for item in all_detected_items:
420
  item["gps"] = gps_coord
421
 
422
- # Save frame if detections are present
423
  detection_types = {item.get("type") for item in all_detected_items if "type" in item}
424
  if detection_types:
425
  try:
426
  captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count}.jpg")
427
- success = cv2.imwrite(captured_frame_path, frame)
428
  if not success:
429
  raise RuntimeError(f"Failed to save captured frame: {captured_frame_path}")
430
  for item in all_detected_items:
431
- dtype = item.get("type", "")
432
- if dtype == "plant":
433
- detected_plants.append(captured_frame_path)
434
- if len(detected_plants) > 100:
435
- detected_plants.pop(0)
436
- else:
437
- detected_issues.append(captured_frame_path)
438
- if len(detected_issues) > 100:
439
- detected_issues.pop(0)
440
  except Exception as e:
441
  log_entries.append(f"Error saving captured frame: {str(e)}")
442
  logging.error(f"Error saving captured frame: {str(e)}")
443
 
444
- # Prepare data for Salesforce dispatch
445
  all_detections = {
446
  "detections": all_detected_items,
447
  "metrics": metrics,
@@ -452,54 +353,38 @@ def monitor_feed() -> Tuple[
452
  "thermal": thermal_flag
453
  }
454
 
455
- # Dispatch to Salesforce
456
  try:
457
  send_to_salesforce(all_detections)
458
  except Exception as e:
459
  log_entries.append(f"Salesforce Dispatch Error: {str(e)}")
460
  logging.error(f"Salesforce dispatch error: {str(e)}")
461
 
462
- # Save processed frame
463
  try:
464
  frame_path = os.path.join(OUTPUT_DIR, f"frame_{frame_count:04d}.jpg")
465
- success = cv2.imwrite(frame_path, frame)
466
  if not success:
467
  raise RuntimeError(f"Failed to save output frame: {frame_path}")
468
  except Exception as e:
469
  log_entries.append(f"Error saving output frame: {str(e)}")
470
  logging.error(f"Error saving output frame: {str(e)}")
471
 
472
- # Update global variables
473
  frame_count += 1
474
- last_timestamp = datetime.now().strftime("%Y-%m-d %H:%M:%S")
475
  last_frame = frame.copy()
476
  last_metrics = metrics
477
 
478
- # Track detections for metrics
479
- plant_detected = len([item for item in all_detected_items if item.get("type") == "plant"])
480
- crack_detected = len([item for item in all_detected_items if item.get("type") == "crack"])
481
- hole_detected = len([item for item in all_detected_items if item.get("type") == "hole" or item.get("type") == "pothole"])
482
- missing_detected = len([item for item in all_detected_items if item.get("type") == "missing_patch"])
483
- car_detected = len([item for item in all_detected_items if item.get("type") == "car"])
484
- bike_detected = len([item for item in all_detected_items if item.get("type") == "bike"])
485
- person_detected = len([item for item in all_detected_items if item.get("type") == "person"])
486
- dog_detected = len([item for item in all_detected_items if item.get("type") == "dog"])
487
- detected_counts.append(plant_detected + crack_detected + hole_detected + missing_detected +
488
- car_detected + bike_detected + person_detected + dog_detected)
489
-
490
- # Log frame processing details in the requested format
491
  processing_time = time.time() - start_time
492
  detection_summary = {
493
  "timestamp": last_timestamp,
494
  "frame": frame_count,
495
- "plants": plant_detected,
496
- "cracks": crack_detected,
497
- "holes": hole_detected,
498
- "missing_patches": missing_detected,
499
- "cars": car_detected,
500
- "bikes": bike_detected,
501
- "persons": person_detected,
502
- "dogs": dog_detected,
503
  "gps": gps_coord,
504
  "processing_time_ms": processing_time * 1000
505
  }
@@ -507,37 +392,32 @@ def monitor_feed() -> Tuple[
507
  log_entries.append(log_message)
508
  logging.info(log_message)
509
 
510
- # Limit the size of logs and detection data
511
  if len(log_entries) > 100:
512
  log_entries.pop(0)
513
  if len(detected_counts) > 500:
514
  detected_counts.pop(0)
515
 
516
- # Resize frame and add metadata for display
517
  frame = cv2.resize(last_frame, (640, 480))
518
  cv2.putText(frame, f"Frame: {frame_count}", (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
519
  cv2.putText(frame, f"{last_timestamp}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
520
 
521
- # Generate map
522
- map_items = [item for item in last_metrics.get("items", []) if item.get("type") in ["crack", "hole", "pothole", "missing_patch"]]
523
  map_path = generate_map(gps_coordinates[-5:], map_items)
524
 
525
  return (
526
- frame[:, :, ::-1], # Convert BGR to RGB for Gradio
527
  json.dumps(last_metrics, indent=2),
528
  "\n".join(log_entries[-10:]),
529
- detected_plants,
530
  detected_issues,
531
  generate_line_chart(),
532
  map_path
533
  )
534
 
535
- # Gradio UI setup
536
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="green")) as app:
537
  gr.Markdown(
538
  """
539
- # 🛡️ NHAI Drone Road Inspection Dashboard
540
- Monitor highway conditions in real-time using drone footage or static images. All services are enabled as requested.
541
  """
542
  )
543
 
@@ -554,19 +434,10 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="green"))
554
 
555
  with gr.Row():
556
  with gr.Column():
557
- uc_toggle = gr.Checkbox(label="Enable Under Construction Services", value=True)
558
- uc_status = gr.Textbox(label="Under Construction Status", value="Enabled", interactive=False)
559
- with gr.Column():
560
- om_toggle = gr.Checkbox(label="Enable Operations Maintenance Services", value=True)
561
- om_status = gr.Textbox(label="Operations Maintenance Status", value="Enabled", interactive=False)
562
- with gr.Column():
563
- rs_toggle = gr.Checkbox(label="Enable Road Safety Services", value=True)
564
- rs_status = gr.Textbox(label="Road Safety Status", value="Enabled", interactive=False)
565
- with gr.Column():
566
- pl_toggle = gr.Checkbox(label="Enable Plantation Services", value=True)
567
- pl_status = gr.Textbox(label="Plantation Status", value="Enabled", interactive=False)
568
 
569
- status_text = gr.Markdown("**Status:** 🟢 Ready (Upload a media file to start)")
570
 
571
  with gr.Row():
572
  with gr.Column(scale=3):
@@ -583,7 +454,6 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="green"))
583
  with gr.Column(scale=2):
584
  logs_output = gr.Textbox(label="Live Logs", lines=8, interactive=False)
585
  with gr.Column(scale=1):
586
- plant_images = gr.Gallery(label="Detected Plants (Last 100+)", columns=4, rows=13, height="auto")
587
  issue_images = gr.Gallery(label="Detected Issues (Last 100+)", columns=4, rows=13, height="auto")
588
 
589
  with gr.Row():
@@ -597,12 +467,15 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="green"))
597
 
598
  gr.HTML("""
599
  <style>
 
 
 
600
  #live-feed {
601
- border: 2px solid #4682B4;
602
  border-radius: 10px;
603
  }
604
  .gr-button-primary {
605
- background-color: #4682B4 !important;
606
  }
607
  .gr-button-secondary {
608
  background-color: #FF6347 !important;
@@ -632,22 +505,12 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="green"))
632
  outputs=[media_status]
633
  )
634
 
635
- def update_toggles(uc_val: bool, om_val: bool, rs_val: bool, pl_val: bool) -> Tuple[str, str, str, str, str]:
636
- active, status_message = set_active_service("toggle", uc_val, om_val, rs_val, pl_val)
637
- uc_status_val = "Enabled" if "under_construction" in status_message.lower() else "Disabled"
638
- om_status_val = "Enabled" if "operations_maintenance" in status_message.lower() else "Disabled"
639
- rs_status_val = "Enabled" if "road_safety" in status_message.lower() else "Disabled"
640
- pl_status_val = "Enabled" if "plantation" in status_message.lower() else "Disabled"
641
- return (
642
- uc_status_val, om_status_val, rs_status_val, pl_status_val, status_message
643
- )
644
 
645
- toggle_inputs = [uc_toggle, om_toggle, rs_toggle, pl_toggle]
646
- toggle_outputs = [uc_status, om_status, rs_status, pl_status, status_text]
647
- uc_toggle.change(update_toggles, inputs=toggle_inputs, outputs=toggle_outputs)
648
- om_toggle.change(update_toggles, inputs=toggle_inputs, outputs=toggle_outputs)
649
- rs_toggle.change(update_toggles, inputs=toggle_inputs, outputs=toggle_outputs)
650
- pl_toggle.change(update_toggles, inputs=toggle_inputs, outputs=toggle_outputs)
651
 
652
  pause_btn.click(toggle_pause, outputs=status_text)
653
  resume_btn.click(toggle_resume, outputs=status_text)
@@ -656,19 +519,18 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="green"))
656
  def streaming_loop():
657
  while True:
658
  if not media_loaded:
659
- yield None, json.dumps({"error": "Media not loaded. Please upload a video or image file."}, indent=2), "\n".join(log_entries[-10:]), detected_plants, detected_issues, None, None
660
  else:
661
- frame, metrics, logs, plants, issues, chart, map_path = monitor_feed()
662
  if frame is None:
663
- yield None, metrics, logs, plants, issues, chart, map_path
664
  else:
665
- yield frame, metrics, logs, plants, issues, chart, map_path
666
  if not is_video:
667
- # For static images, yield once and pause
668
  break
669
  time.sleep(frame_rate)
670
 
671
- app.load(streaming_loop, outputs=[media_output, metrics_output, logs_output, plant_images, issue_images, chart_output, map_output])
672
 
673
  if __name__ == "__main__":
674
- app.launch(share=True) # Set share=True to create a public link
 
18
  # Import service modules
19
  try:
20
  from services.video_service import get_next_video_frame, reset_video_index, preload_video, release_video
 
21
  from services.metrics_service import update_metrics
 
22
  from services.salesforce_dispatcher import send_to_salesforce
23
  from services.shadow_detection import detect_shadow_coverage
24
  from services.thermal_service import process_thermal
25
  from services.map_service import generate_map
 
26
  from services.under_construction.earthwork_detection import process_earthwork
27
  from services.under_construction.culvert_check import process_culverts
28
  from services.under_construction.bridge_pier_check import process_bridge_piers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  except ImportError as e:
30
  print(f"Failed to import service modules: {str(e)}")
31
  logging.error(f"Import error: {str(e)}")
 
47
  last_frame: Optional[np.ndarray] = None
48
  last_metrics: Dict[str, Any] = {}
49
  last_timestamp: str = ""
50
+ detected_issues: List[str] = []
 
51
  gps_coordinates: List[List[float]] = []
52
  media_loaded: bool = False
53
  active_service: Optional[str] = None
54
  is_video: bool = True
55
  static_image: Optional[np.ndarray] = None
56
+ enabled_services: List[str] = []
57
 
58
  # Constants
59
  DEFAULT_VIDEO_PATH = "sample.mp4"
 
65
  # Ensure directories exist with write permissions
66
  for directory in [CAPTURED_FRAMES_DIR, OUTPUT_DIR, TEMP_MEDIA_DIR]:
67
  os.makedirs(directory, exist_ok=True)
68
+ os.chmod(directory, 0o777)
69
 
70
  def initialize_media(media_file: Optional[Any] = None) -> str:
71
  global media_loaded, is_video, static_image, log_entries, frame_count
72
  release_video()
73
  static_image = None
74
+ frame_count = 0
75
 
 
76
  if media_file is None:
77
  media_path = DEFAULT_VIDEO_PATH
78
  log_entries.append(f"No media uploaded, attempting to load default: {media_path}")
79
  logging.info(f"No media uploaded, attempting to load default: {media_path}")
80
  else:
 
81
  if not hasattr(media_file, 'name') or not media_file.name:
82
  status = "Error: Invalid media file uploaded."
83
  log_entries.append(status)
 
85
  media_loaded = False
86
  return status
87
 
 
88
  original_path = media_file.name
89
  file_extension = os.path.splitext(original_path)[1].lower()
90
  temp_media_path = os.path.join(TEMP_MEDIA_DIR, f"uploaded_media{file_extension}")
 
100
  media_loaded = False
101
  return status
102
 
 
103
  if not os.path.exists(media_path):
104
  status = f"Error: Media file '{media_path}' not found."
105
  log_entries.append(status)
 
108
  return status
109
 
110
  try:
 
111
  if file_extension in (".mp4", ".avi"):
112
  is_video = True
113
  preload_video(media_path)
 
138
  logging.error(status)
139
  return status
140
 
141
+ def set_active_service(uc_val: bool) -> Tuple[Optional[str], str]:
142
+ global active_service, enabled_services
 
 
 
 
 
 
 
143
  enabled_services = []
144
  if uc_val:
145
  enabled_services.append("under_construction")
 
 
 
 
 
 
146
 
147
  if not enabled_services:
148
  active_service = None
149
+ log_entries.append("Under Construction service disabled.")
150
+ logging.info("Under Construction service disabled.")
151
+ return None, "No Service Enabled"
152
 
153
+ active_service = "under_construction"
154
+ log_entries.append("Enabled service: Under Construction")
155
+ logging.info("Enabled service: Under Construction")
156
+ return active_service, "Enabled: Under Construction"
 
157
 
158
  def generate_line_chart() -> Optional[str]:
159
  if not detected_counts:
160
  return None
161
  fig, ax = plt.subplots(figsize=(4, 2))
162
+ ax.plot(detected_counts[-50:], marker='o', color='#FF8C00')
163
  ax.set_title("Detections Over Time")
164
  ax.set_xlabel("Frame")
165
  ax.set_ylabel("Count")
 
180
  str,
181
  str,
182
  List[str],
 
183
  Optional[str],
184
  Optional[str]
185
  ]:
186
  global paused, frame_count, last_frame, last_metrics, last_timestamp
187
+ global gps_coordinates, detected_issues, media_loaded
188
+ global is_video, static_image, enabled_services
189
 
190
  if not media_loaded:
191
  log_entries.append("Cannot start processing: Media not loaded successfully.")
 
194
  None,
195
  json.dumps({"error": "Media not loaded. Please upload a video or image file."}, indent=2),
196
  "\n".join(log_entries[-10:]),
 
197
  detected_issues,
198
  None,
199
  None
 
226
  None,
227
  json.dumps(last_metrics, indent=2),
228
  "\n".join(log_entries[-10:]),
 
229
  detected_issues,
230
  None,
231
  None
 
237
  None,
238
  json.dumps(last_metrics, indent=2),
239
  "\n".join(log_entries[-10:]),
 
240
  detected_issues,
241
  None,
242
  None
243
  )
244
 
 
245
  detection_frame = cv2.resize(frame, (512, 320))
 
246
  all_detected_items: List[Dict[str, Any]] = []
247
  shadow_issue = False
248
  thermal_flag = False
249
 
250
  try:
251
+ if "under_construction" in enabled_services:
252
+ earthwork_dets, detection_frame = process_earthwork(detection_frame)
253
+ culvert_dets, detection_frame = process_culverts(detection_frame)
254
+ bridge_pier_dets, detection_frame = process_bridge_piers(detection_frame)
255
+ all_detected_items.extend(earthwork_dets + culvert_dets + bridge_pier_dets)
256
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  try:
258
  cv2.imwrite(TEMP_IMAGE_PATH, detection_frame)
259
  shadow_issue = detect_shadow_coverage(TEMP_IMAGE_PATH)
 
262
  logging.error(f"Error saving temp image: {str(e)}")
263
  shadow_issue = False
264
 
 
265
  if len(detection_frame.shape) == 2:
266
  thermal_results = process_thermal(detection_frame)
267
  thermal_dets = thermal_results["detections"]
 
269
  all_detected_items.extend(thermal_dets)
270
  thermal_flag = bool(thermal_dets)
271
 
 
272
  orig_h, orig_w = frame.shape[:2]
273
  det_h, det_w = detection_frame.shape[:2]
274
  scale_x, scale_y = orig_w / det_w, orig_h / det_h
 
282
  int(box[3] * scale_y)
283
  ]
284
 
 
285
  for item in all_detected_items:
286
  box = item.get("box", [])
287
  if not box:
 
289
  x_min, y_min, x_max, y_max = box
290
  label = item.get("label", "")
291
  dtype = item.get("type", "")
292
+
293
+ if dtype == "earthwork":
294
+ color = (255, 105, 180) # Pink
 
 
 
 
 
 
 
 
295
  elif dtype == "culvert":
296
+ color = (0, 128, 128) # Teal
297
  elif dtype == "bridge_pier":
298
+ color = (255, 127, 127) # Coral
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
  else:
300
+ continue
301
 
302
+ cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, 3)
303
+ (text_w, text_h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
304
+ label_background = frame[y_min - text_h - 15:y_min - 5, x_min:x_min + text_w + 10]
305
+ if label_background.size > 0:
306
+ overlay = label_background.copy()
307
+ cv2.rectangle(overlay, (0, 0), (text_w + 10, text_h + 10), (0, 0, 0), -1)
308
+ alpha = 0.5
309
+ cv2.addWeighted(overlay, alpha, label_background, 1 - alpha, 0, label_background)
310
+ cv2.putText(frame, label, (x_min + 5, y_min - 10),
311
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
312
 
 
313
  try:
314
  cv2.imwrite(TEMP_IMAGE_PATH, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
315
  except Exception as e:
 
321
  logging.error(f"Processing error: {str(e)}")
322
  all_detected_items = []
323
 
 
324
  metrics = update_metrics(all_detected_items)
 
 
325
  gps_coord = [17.385044 + random.uniform(-0.001, 0.001), 78.486671 + frame_count * 0.0001]
326
  gps_coordinates.append(gps_coord)
327
 
 
328
  for item in all_detected_items:
329
  item["gps"] = gps_coord
330
 
 
331
  detection_types = {item.get("type") for item in all_detected_items if "type" in item}
332
  if detection_types:
333
  try:
334
  captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count}.jpg")
335
+ success = cv2.imwrite(captured_frame_path, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
336
  if not success:
337
  raise RuntimeError(f"Failed to save captured frame: {captured_frame_path}")
338
  for item in all_detected_items:
339
+ detected_issues.append(captured_frame_path)
340
+ if len(detected_issues) > 100:
341
+ detected_issues.pop(0)
 
 
 
 
 
 
342
  except Exception as e:
343
  log_entries.append(f"Error saving captured frame: {str(e)}")
344
  logging.error(f"Error saving captured frame: {str(e)}")
345
 
 
346
  all_detections = {
347
  "detections": all_detected_items,
348
  "metrics": metrics,
 
353
  "thermal": thermal_flag
354
  }
355
 
 
356
  try:
357
  send_to_salesforce(all_detections)
358
  except Exception as e:
359
  log_entries.append(f"Salesforce Dispatch Error: {str(e)}")
360
  logging.error(f"Salesforce dispatch error: {str(e)}")
361
 
 
362
  try:
363
  frame_path = os.path.join(OUTPUT_DIR, f"frame_{frame_count:04d}.jpg")
364
+ success = cv2.imwrite(frame_path, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
365
  if not success:
366
  raise RuntimeError(f"Failed to save output frame: {frame_path}")
367
  except Exception as e:
368
  log_entries.append(f"Error saving output frame: {str(e)}")
369
  logging.error(f"Error saving output frame: {str(e)}")
370
 
 
371
  frame_count += 1
372
+ last_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
373
  last_frame = frame.copy()
374
  last_metrics = metrics
375
 
376
+ earthwork_detected = len([item for item in all_detected_items if item.get("type") == "earthwork"])
377
+ culvert_detected = len([item for item in all_detected_items if item.get("type") == "culvert"])
378
+ bridge_pier_detected = len([item for item in all_detected_items if item.get("type") == "bridge_pier"])
379
+ detected_counts.append(earthwork_detected + culvert_detected + bridge_pier_detected)
380
+
 
 
 
 
 
 
 
 
381
  processing_time = time.time() - start_time
382
  detection_summary = {
383
  "timestamp": last_timestamp,
384
  "frame": frame_count,
385
+ "earthworks": earthwork_detected,
386
+ "culverts": culvert_detected,
387
+ "bridge_piers": bridge_pier_detected,
 
 
 
 
 
388
  "gps": gps_coord,
389
  "processing_time_ms": processing_time * 1000
390
  }
 
392
  log_entries.append(log_message)
393
  logging.info(log_message)
394
 
 
395
  if len(log_entries) > 100:
396
  log_entries.pop(0)
397
  if len(detected_counts) > 500:
398
  detected_counts.pop(0)
399
 
 
400
  frame = cv2.resize(last_frame, (640, 480))
401
  cv2.putText(frame, f"Frame: {frame_count}", (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
402
  cv2.putText(frame, f"{last_timestamp}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
403
 
404
+ map_items = [item for item in last_metrics.get("items", []) if item.get("type") in ["earthwork", "culvert", "bridge_pier"]]
 
405
  map_path = generate_map(gps_coordinates[-5:], map_items)
406
 
407
  return (
408
+ frame[:, :, ::-1],
409
  json.dumps(last_metrics, indent=2),
410
  "\n".join(log_entries[-10:]),
 
411
  detected_issues,
412
  generate_line_chart(),
413
  map_path
414
  )
415
 
416
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange", secondary_hue="amber")) as app:
 
417
  gr.Markdown(
418
  """
419
+ # 🛠️ Under Construction Inspection Dashboard
420
+ Monitor under construction elements in real-time using drone footage or static images.
421
  """
422
  )
423
 
 
434
 
435
  with gr.Row():
436
  with gr.Column():
437
+ uc_toggle = gr.Checkbox(label="Enable Under Construction Services", value=False)
438
+ uc_status = gr.Textbox(label="Under Construction Status", value="Disabled", interactive=False)
 
 
 
 
 
 
 
 
 
439
 
440
+ status_text = gr.Markdown("**Status:** 🟢 Ready (Upload a media file and enable the service to start)")
441
 
442
  with gr.Row():
443
  with gr.Column(scale=3):
 
454
  with gr.Column(scale=2):
455
  logs_output = gr.Textbox(label="Live Logs", lines=8, interactive=False)
456
  with gr.Column(scale=1):
 
457
  issue_images = gr.Gallery(label="Detected Issues (Last 100+)", columns=4, rows=13, height="auto")
458
 
459
  with gr.Row():
 
467
 
468
  gr.HTML("""
469
  <style>
470
+ body {
471
+ background-color: #FFDAB9 !important;
472
+ }
473
  #live-feed {
474
+ border: 2px solid #FF8C00;
475
  border-radius: 10px;
476
  }
477
  .gr-button-primary {
478
+ background-color: #FF8C00 !important;
479
  }
480
  .gr-button-secondary {
481
  background-color: #FF6347 !important;
 
505
  outputs=[media_status]
506
  )
507
 
508
+ def update_toggles(uc_val: bool) -> Tuple[str, str]:
509
+ active, status_message = set_active_service(uc_val)
510
+ uc_status_val = "Enabled" if uc_val else "Disabled"
511
+ return uc_status_val, status_message
 
 
 
 
 
512
 
513
+ uc_toggle.change(update_toggles, inputs=[uc_toggle], outputs=[uc_status, status_text])
 
 
 
 
 
514
 
515
  pause_btn.click(toggle_pause, outputs=status_text)
516
  resume_btn.click(toggle_resume, outputs=status_text)
 
519
  def streaming_loop():
520
  while True:
521
  if not media_loaded:
522
+ yield None, json.dumps({"error": "Media not loaded. Please upload a video or image file."}, indent=2), "\n".join(log_entries[-10:]), detected_issues, None, None
523
  else:
524
+ frame, metrics, logs, issues, chart, map_path = monitor_feed()
525
  if frame is None:
526
+ yield None, metrics, logs, issues, chart, map_path
527
  else:
528
+ yield frame, metrics, logs, issues, chart, map_path
529
  if not is_video:
 
530
  break
531
  time.sleep(frame_rate)
532
 
533
+ app.load(streaming_loop, outputs=[media_output, metrics_output, logs_output, issue_images, chart_output, map_output])
534
 
535
  if __name__ == "__main__":
536
+ app.launch(share=True)