nagasurendra commited on
Commit
1379d90
·
verified ·
1 Parent(s): 71ba206

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +323 -62
app.py CHANGED
@@ -1,16 +1,21 @@
1
- import os
2
- import zipfile
3
- import json
4
- import logging
5
  import cv2
6
  import torch
 
7
  import numpy as np
 
 
 
8
  import matplotlib.pyplot as plt
 
9
  from datetime import datetime
10
  from collections import Counter
 
11
  from ultralytics import YOLO
12
- import piexif
13
  import time
 
 
 
14
 
15
  # Set YOLO config directory
16
  os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
@@ -29,19 +34,37 @@ FLIGHT_LOG_DIR = "flight_logs"
29
  os.makedirs(CAPTURED_FRAMES_DIR, exist_ok=True)
30
  os.makedirs(OUTPUT_DIR, exist_ok=True)
31
  os.makedirs(FLIGHT_LOG_DIR, exist_ok=True)
 
 
 
32
 
33
  # Global variables
34
- detected_counts = []
35
- detected_issues = []
36
- gps_coordinates = []
37
- log_entries = []
38
- frame_count = 0
 
 
 
 
39
  DETECTION_CLASSES = ["Longitudinal", "Pothole", "Transverse"]
40
 
 
 
 
 
 
 
41
  # Load custom YOLO model
42
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
43
  model = YOLO('./data/best.pt').to(device)
 
 
 
44
 
 
45
  def zip_directory(folder_path: str, zip_path: str) -> str:
46
  """Zip all files in a directory."""
47
  try:
@@ -54,10 +77,11 @@ def zip_directory(folder_path: str, zip_path: str) -> str:
54
  return zip_path
55
  except Exception as e:
56
  logging.error(f"Failed to zip {folder_path}: {str(e)}")
 
57
  return ""
58
 
 
59
  def generate_map(gps_coords: List[List[float]], items: List[Dict[str, Any]]) -> str:
60
- """Generate and save map of detected issue locations."""
61
  map_path = os.path.join(OUTPUT_DIR, "map_temp.png")
62
  plt.figure(figsize=(4, 4))
63
  plt.scatter([x[1] for x in gps_coords], [x[0] for x in gps_coords], c='blue', label='GPS Points')
@@ -69,11 +93,13 @@ def generate_map(gps_coords: List[List[float]], items: List[Dict[str, Any]]) ->
69
  plt.close()
70
  return map_path
71
 
 
72
  def write_geotag(image_path: str, gps_coord: List[float]) -> bool:
73
- """Add GPS coordinates as EXIF data to an image."""
74
  try:
75
- lat, lon = abs(gps_coord[0]), abs(gps_coord[1])
76
- lat_ref, lon_ref = ("N" if gps_coord[0] >= 0 else "S"), ("E" if gps_coord[1] >= 0 else "W")
 
 
77
  exif_dict = piexif.load(image_path) if os.path.exists(image_path) else {"GPS": {}}
78
  exif_dict["GPS"] = {
79
  piexif.GPSIFD.GPSLatitudeRef: lat_ref,
@@ -85,98 +111,333 @@ def write_geotag(image_path: str, gps_coord: List[float]) -> bool:
85
  return True
86
  except Exception as e:
87
  logging.error(f"Failed to geotag {image_path}: {str(e)}")
 
88
  return False
89
 
90
- def process_video(video):
91
- global frame_count, detected_counts, detected_issues, gps_coordinates, log_entries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  frame_count = 0
93
  detected_counts.clear()
94
  detected_issues.clear()
95
  gps_coordinates.clear()
96
  log_entries.clear()
 
 
 
 
 
 
97
 
 
98
  cap = cv2.VideoCapture(video)
99
  if not cap.isOpened():
 
100
  logging.error("Could not open video file")
101
- return None, json.dumps({"error": "Could not open video file"}, indent=2)
102
 
 
 
 
103
  fps = cap.get(cv2.CAP_PROP_FPS)
104
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
 
 
 
 
105
 
106
- out_path = os.path.join(OUTPUT_DIR, "processed_output.mp4")
107
- out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (4000, 3000))
 
 
 
 
 
 
108
 
 
109
  all_detections = []
110
- data_lake_submission = {"images": [], "flight_logs": [], "metrics": {}}
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
  while True:
113
  ret, frame = cap.read()
114
  if not ret:
115
  break
116
  frame_count += 1
117
- results = model(frame)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  annotated_frame = results[0].plot()
 
 
 
 
119
 
120
- # Simulate GPS coordinates for each frame
121
  gps_coord = [17.385044 + (frame_count * 0.0001), 78.486671 + (frame_count * 0.0001)]
122
  gps_coordinates.append(gps_coord)
123
 
 
124
  frame_detections = []
125
  for detection in results[0].boxes:
126
- label = model.names[int(detection.cls)]
 
 
 
127
  if label in DETECTION_CLASSES:
128
- frame_detections.append({"label": label, "box": detection.xyxy[0].cpu().numpy().tolist()})
129
- log_entries.append(f"Detected {label} in frame {frame_count}")
 
 
 
 
 
 
 
 
130
 
131
  if frame_detections:
132
- captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count:06d}.jpg")
133
- cv2.imwrite(captured_frame_path, annotated_frame)
134
- write_geotag(captured_frame_path, gps_coord)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
- detected_issues.append(captured_frame_path)
137
- data_lake_submission["images"].append({"path": captured_frame_path, "frame": frame_count, "gps": gps_coord})
 
 
 
 
138
 
139
- log_path = os.path.join(FLIGHT_LOG_DIR, f"flight_log_{frame_count:06d}.csv")
140
- with open(log_path, 'w', newline='') as csvfile:
141
- writer = csv.writer(csvfile)
142
- writer.writerow(["Frame", "Latitude", "Longitude", "Timestamp"])
143
- writer.writerow([frame_count, gps_coord[0], gps_coord[1], datetime.now().strftime("%Y-%m-%d %H:%M:%S")])
144
-
145
- data_lake_submission["flight_logs"].append({"path": log_path, "frame": frame_count})
146
 
147
  out.write(annotated_frame)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
  cap.release()
150
  out.release()
151
 
152
- # Generate the map and trend chart
153
- map_path = generate_map(gps_coordinates, all_detections)
154
- trend_chart_path = os.path.join(OUTPUT_DIR, "detection_trend.png")
155
- plt.plot(detected_counts)
156
- plt.savefig(trend_chart_path)
157
- plt.close()
158
 
159
- # Compile everything into a single ZIP file
160
- zip_path = os.path.join(OUTPUT_DIR, "final_report.zip")
161
- with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
162
- zipf.write(out_path, os.path.basename(out_path)) # Add processed video
163
- zipf.write(map_path, os.path.basename(map_path)) # Add map
164
- zipf.write(trend_chart_path, os.path.basename(trend_chart_path)) # Add trend chart
165
- zipf.write("data_lake_submission.json", "data_lake_submission.json") # Add submission JSON
166
- zipf = zip_directory(CAPTURED_FRAMES_DIR, zip_path) # Add captured frames
167
- zipf = zip_directory(FLIGHT_LOG_DIR, zip_path) # Add flight logs
 
 
168
 
169
- return zip_path
 
170
 
 
 
 
 
 
 
 
 
 
171
 
172
- # Gradio interface (keep unchanged)
173
- import gradio as gr
174
- with gr.Blocks() as iface:
175
- gr.Markdown("# Drone Analysis Report")
176
- video_input = gr.Video(label="Upload Video")
177
- process_btn = gr.Button("Generate Report")
178
- zip_output = gr.File(label="Download Final Report (ZIP)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
- process_btn.click(fn=process_video, inputs=[video_input], outputs=[zip_output])
 
 
 
 
181
 
182
- iface.launch()
 
 
 
 
 
 
1
  import cv2
2
  import torch
3
+ import gradio as gr
4
  import numpy as np
5
+ import os
6
+ import json
7
+ import logging
8
  import matplotlib.pyplot as plt
9
+ import csv
10
  from datetime import datetime
11
  from collections import Counter
12
+ from typing import List, Dict, Any, Optional
13
  from ultralytics import YOLO
14
+ import ultralytics
15
  import time
16
+ import piexif
17
+ import zipfile
18
+ import shutil
19
 
20
  # Set YOLO config directory
21
  os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
 
34
  os.makedirs(CAPTURED_FRAMES_DIR, exist_ok=True)
35
  os.makedirs(OUTPUT_DIR, exist_ok=True)
36
  os.makedirs(FLIGHT_LOG_DIR, exist_ok=True)
37
+ os.chmod(CAPTURED_FRAMES_DIR, 0o777)
38
+ os.chmod(OUTPUT_DIR, 0o777)
39
+ os.chmod(FLIGHT_LOG_DIR, 0o777)
40
 
41
  # Global variables
42
+ log_entries: List[str] = []
43
+ detected_counts: List[int] = []
44
+ detected_issues: List[str] = []
45
+ gps_coordinates: List[List[float]] = []
46
+ last_metrics: Dict[str, Any] = {}
47
+ frame_count: int = 0
48
+ SAVE_IMAGE_INTERVAL = 1
49
+
50
+ # Detection classes
51
  DETECTION_CLASSES = ["Longitudinal", "Pothole", "Transverse"]
52
 
53
+ # Debug: Check environment
54
+ print(f"Torch version: {torch.__version__}")
55
+ print(f"Gradio version: {gr.__version__}")
56
+ print(f"Ultralytics version: {ultralytics.__version__}")
57
+ print(f"CUDA available: {torch.cuda.is_available()}")
58
+
59
  # Load custom YOLO model
60
  device = "cuda" if torch.cuda.is_available() else "cpu"
61
+ print(f"Using device: {device}")
62
  model = YOLO('./data/best.pt').to(device)
63
+ if device == "cuda":
64
+ model.half()
65
+ print(f"Model classes: {model.names}")
66
 
67
+ # Function to zip all files in a directory
68
  def zip_directory(folder_path: str, zip_path: str) -> str:
69
  """Zip all files in a directory."""
70
  try:
 
77
  return zip_path
78
  except Exception as e:
79
  logging.error(f"Failed to zip {folder_path}: {str(e)}")
80
+ log_entries.append(f"Error: Failed to zip {folder_path}: {str(e)}")
81
  return ""
82
 
83
+ # Function to generate the map of detected issues' locations
84
  def generate_map(gps_coords: List[List[float]], items: List[Dict[str, Any]]) -> str:
 
85
  map_path = os.path.join(OUTPUT_DIR, "map_temp.png")
86
  plt.figure(figsize=(4, 4))
87
  plt.scatter([x[1] for x in gps_coords], [x[0] for x in gps_coords], c='blue', label='GPS Points')
 
93
  plt.close()
94
  return map_path
95
 
96
+ # Function to geotag the image with GPS coordinates
97
  def write_geotag(image_path: str, gps_coord: List[float]) -> bool:
 
98
  try:
99
+ lat = abs(gps_coord[0])
100
+ lon = abs(gps_coord[1])
101
+ lat_ref = "N" if gps_coord[0] >= 0 else "S"
102
+ lon_ref = "E" if gps_coord[1] >= 0 else "W"
103
  exif_dict = piexif.load(image_path) if os.path.exists(image_path) else {"GPS": {}}
104
  exif_dict["GPS"] = {
105
  piexif.GPSIFD.GPSLatitudeRef: lat_ref,
 
111
  return True
112
  except Exception as e:
113
  logging.error(f"Failed to geotag {image_path}: {str(e)}")
114
+ log_entries.append(f"Error: Failed to geotag {image_path}: {str(e)}")
115
  return False
116
 
117
+ # Function to write flight logs
118
+ def write_flight_log(frame_count: int, gps_coord: List[float], timestamp: str) -> str:
119
+ log_path = os.path.join(FLIGHT_LOG_DIR, f"flight_log_{frame_count:06d}.csv")
120
+ try:
121
+ with open(log_path, 'w', newline='') as csvfile:
122
+ writer = csv.writer(csvfile)
123
+ writer.writerow(["Frame", "Timestamp", "Latitude", "Longitude", "Speed_ms", "Satellites", "Altitude_m"])
124
+ writer.writerow([frame_count, timestamp, gps_coord[0], gps_coord[1], 5.0, 12, 60])
125
+ return log_path
126
+ except Exception as e:
127
+ logging.error(f"Failed to write flight log {log_path}: {str(e)}")
128
+ log_entries.append(f"Error: Failed to write flight log {log_path}: {str(e)}")
129
+ return ""
130
+
131
+ # Function to update the metrics for detections
132
+ def update_metrics(detections: List[Dict[str, Any]]) -> Dict[str, Any]:
133
+ counts = Counter([det["label"] for det in detections])
134
+ return {
135
+ "items": [{"type": k, "count": v} for k, v in counts.items()],
136
+ "total_detections": len(detections),
137
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
138
+ }
139
+
140
+ # Function to generate detection trend chart
141
+ def generate_line_chart() -> Optional[str]:
142
+ if not detected_counts:
143
+ return None
144
+ plt.figure(figsize=(4, 2))
145
+ plt.plot(detected_counts[-50:], marker='o', color='#FF8C00')
146
+ plt.title("Detections Over Time")
147
+ plt.xlabel("Frame")
148
+ plt.ylabel("Count")
149
+ plt.grid(True)
150
+ plt.tight_layout()
151
+ chart_path = os.path.join(OUTPUT_DIR, "chart_temp.png")
152
+ plt.savefig(chart_path)
153
+ plt.close()
154
+ return chart_path
155
+
156
+ # Function to generate a single ZIP report containing all results
157
+ def generate_single_report(output_path, detected_issues, flight_logs, metrics, chart_path, map_path):
158
+ try:
159
+ # Create a directory for the report files
160
+ report_dir = os.path.join(OUTPUT_DIR, "final_report")
161
+ os.makedirs(report_dir, exist_ok=True)
162
+
163
+ # Copy the processed video
164
+ shutil.copy(output_path, os.path.join(report_dir, "processed_video.mp4"))
165
+
166
+ # Save the metrics JSON
167
+ metrics_json_path = os.path.join(report_dir, "metrics.json")
168
+ with open(metrics_json_path, 'w') as json_file:
169
+ json.dump(metrics, json_file, indent=2)
170
+
171
+ # Zip all captured frames
172
+ images_zip_path = zip_directory(CAPTURED_FRAMES_DIR, os.path.join(report_dir, "captured_frames.zip"))
173
+
174
+ # Zip the flight logs
175
+ logs_zip_path = zip_directory(FLIGHT_LOG_DIR, os.path.join(report_dir, "flight_logs.zip"))
176
+
177
+ # Save the detection trend chart
178
+ if chart_path:
179
+ shutil.copy(chart_path, os.path.join(report_dir, "detection_trend_chart.png"))
180
+
181
+ # Save the issue locations map
182
+ if map_path:
183
+ shutil.copy(map_path, os.path.join(report_dir, "issue_locations_map.png"))
184
+
185
+ # Create a ZIP of the entire report folder
186
+ zip_path = os.path.join(OUTPUT_DIR, "final_report.zip")
187
+ shutil.make_archive(zip_path.replace('.zip', ''), 'zip', report_dir)
188
+
189
+ # Clean up the report directory after zipping
190
+ shutil.rmtree(report_dir)
191
+
192
+ return zip_path
193
+ except Exception as e:
194
+ logging.error(f"Error generating single report: {str(e)}")
195
+ log_entries.append(f"Error generating single report: {str(e)}")
196
+ return ""
197
+
198
+ # Video processing function
199
+ def process_video(video, resize_width=4000, resize_height=3000, frame_skip=5):
200
+ global frame_count, last_metrics, detected_counts, detected_issues, gps_coordinates, log_entries
201
  frame_count = 0
202
  detected_counts.clear()
203
  detected_issues.clear()
204
  gps_coordinates.clear()
205
  log_entries.clear()
206
+ last_metrics = {}
207
+
208
+ if video is None:
209
+ log_entries.append("Error: No video uploaded")
210
+ logging.error("No video uploaded")
211
+ return None, json.dumps({"error": "No video uploaded"}, indent=2), "\n".join(log_entries), [], None, None, None, None, None, None
212
 
213
+ start_time = time.time()
214
  cap = cv2.VideoCapture(video)
215
  if not cap.isOpened():
216
+ log_entries.append("Error: Could not open video file")
217
  logging.error("Could not open video file")
218
+ return None, json.dumps({"error": "Could not open video file"}, indent=2), "\n".join(log_entries), [], None, None, None, None, None, None
219
 
220
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
221
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
222
+ input_resolution = frame_width * frame_height
223
  fps = cap.get(cv2.CAP_PROP_FPS)
224
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
225
+ expected_duration = total_frames / fps if fps > 0 else 0
226
+ log_entries.append(f"Input video: {frame_width}x{frame_height} ({input_resolution/1e6:.2f}MP), {fps} FPS, {total_frames} frames, {expected_duration:.2f} seconds, Frame skip: {frame_skip}")
227
+ logging.info(f"Input video: {frame_width}x{frame_height} ({input_resolution/1e6:.2f}MP), {fps} FPS, {total_frames} frames, {expected_duration:.2f} seconds, Frame skip: {frame_skip}")
228
+ print(f"Input video: {frame_width}x{frame_height} ({input_resolution/1e6:.2f}MP), {fps} FPS, {total_frames} frames, {expected_duration:.2f} seconds, Frame skip: {frame_skip}")
229
 
230
+ out_width, out_height = resize_width, resize_height
231
+ output_path = os.path.join(OUTPUT_DIR, "processed_output.mp4")
232
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (out_width, out_height))
233
+ if not out.isOpened():
234
+ log_entries.append("Error: Failed to initialize mp4v codec")
235
+ logging.error("Failed to initialize mp4v codec")
236
+ cap.release()
237
+ return None, json.dumps({"error": "mp4v codec failed"}, indent=2), "\n".join(log_entries), [], None, None, None, None, None, None
238
 
239
+ processed_frames = 0
240
  all_detections = []
241
+ frame_times = []
242
+ inference_times = []
243
+ resize_times = []
244
+ io_times = []
245
+ detection_frame_count = 0
246
+ output_frame_count = 0
247
+ last_annotated_frame = None
248
+ data_lake_submission = {
249
+ "images": [],
250
+ "flight_logs": [],
251
+ "analytics": [],
252
+ "metrics": {}
253
+ }
254
 
255
  while True:
256
  ret, frame = cap.read()
257
  if not ret:
258
  break
259
  frame_count += 1
260
+ if frame_count % frame_skip != 0:
261
+ continue
262
+ processed_frames += 1
263
+ frame_start = time.time()
264
+
265
+ # Resize
266
+ resize_start = time.time()
267
+ frame = cv2.resize(frame, (out_width, out_height))
268
+ resize_times.append((time.time() - resize_start) * 1000)
269
+
270
+ if not check_image_quality(frame, input_resolution):
271
+ log_entries.append(f"Frame {frame_count}: Skipped due to low resolution")
272
+ continue
273
+
274
+ # Inference
275
+ inference_start = time.time()
276
+ results = model(frame, verbose=False, conf=0.5, iou=0.7)
277
  annotated_frame = results[0].plot()
278
+ inference_times.append((time.time() - inference_start) * 1000)
279
+
280
+ frame_timestamp = frame_count / fps if fps > 0 else 0
281
+ timestamp_str = f"{int(frame_timestamp // 60)}:{int(frame_timestamp % 60):02d}"
282
 
 
283
  gps_coord = [17.385044 + (frame_count * 0.0001), 78.486671 + (frame_count * 0.0001)]
284
  gps_coordinates.append(gps_coord)
285
 
286
+ io_start = time.time()
287
  frame_detections = []
288
  for detection in results[0].boxes:
289
+ cls = int(detection.cls)
290
+ conf = float(detection.conf)
291
+ box = detection.xyxy[0].cpu().numpy().astype(int).tolist()
292
+ label = model.names[cls]
293
  if label in DETECTION_CLASSES:
294
+ frame_detections.append({
295
+ "label": label,
296
+ "box": box,
297
+ "conf": conf,
298
+ "gps": gps_coord,
299
+ "timestamp": timestamp_str
300
+ })
301
+ log_message = f"Frame {frame_count} at {timestamp_str}: Detected {label} with confidence {conf:.2f}"
302
+ log_entries.append(log_message)
303
+ logging.info(log_message)
304
 
305
  if frame_detections:
306
+ detection_frame_count += 1
307
+ if detection_frame_count % SAVE_IMAGE_INTERVAL == 0:
308
+ captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count:06d}.jpg")
309
+ if cv2.imwrite(captured_frame_path, annotated_frame):
310
+ if write_geotag(captured_frame_path, gps_coord):
311
+ detected_issues.append(captured_frame_path)
312
+ data_lake_submission["images"].append({
313
+ "path": captured_frame_path,
314
+ "frame": frame_count,
315
+ "gps": gps_coord,
316
+ "timestamp": timestamp_str
317
+ })
318
+ if len(detected_issues) > 100:
319
+ detected_issues.pop(0)
320
+ else:
321
+ log_entries.append(f"Frame {frame_count}: Geotagging failed")
322
+ else:
323
+ log_entries.append(f"Error: Failed to save {captured_frame_path}")
324
+ logging.error(f"Failed to save {captured_frame_path}")
325
 
326
+ flight_log_path = write_flight_log(frame_count, gps_coord, timestamp_str)
327
+ if flight_log_path:
328
+ data_lake_submission["flight_logs"].append({
329
+ "path": flight_log_path,
330
+ "frame": frame_count
331
+ })
332
 
333
+ io_times.append((time.time() - io_start) * 1000)
 
 
 
 
 
 
334
 
335
  out.write(annotated_frame)
336
+ output_frame_count += 1
337
+ last_annotated_frame = annotated_frame
338
+ if frame_skip > 1:
339
+ for _ in range(frame_skip - 1):
340
+ out.write(annotated_frame)
341
+ output_frame_count += 1
342
+
343
+ detected_counts.append(len(frame_detections))
344
+ all_detections.extend(frame_detections)
345
+
346
+ frame_time = (time.time() - frame_start) * 1000
347
+ frame_times.append(frame_time)
348
+ log_entries.append(f"Frame {frame_count}: Processed in {frame_time:.2f} ms (Resize: {resize_times[-1]:.2f} ms, Inference: {inference_times[-1]:.2f} ms, I/O: {io_times[-1]:.2f} ms)")
349
+ if len(log_entries) > 50:
350
+ log_entries.pop(0)
351
+
352
+ if time.time() - start_time > 600:
353
+ log_entries.append("Error: Processing timeout after 600 seconds")
354
+ logging.error("Processing timeout after 600 seconds")
355
+ break
356
+
357
+ while output_frame_count < total_frames and last_annotated_frame is not None:
358
+ out.write(last_annotated_frame)
359
+ output_frame_count += 1
360
+
361
+ last_metrics = update_metrics(all_detections)
362
+ data_lake_submission["metrics"] = last_metrics
363
+ data_lake_submission["frame_count"] = frame_count
364
+ data_lake_submission["gps_coordinates"] = gps_coordinates[-1] if gps_coordinates else [0, 0]
365
+
366
+ submission_json_path = os.path.join(OUTPUT_DIR, "data_lake_submission.json")
367
+ try:
368
+ with open(submission_json_path, 'w') as f:
369
+ json.dump(data_lake_submission, f, indent=2)
370
+ log_entries.append(f"Submission JSON saved: {submission_json_path}")
371
+ logging.info(f"Submission JSON saved: {submission_json_path}")
372
+ except Exception as e:
373
+ log_entries.append(f"Error: Failed to save submission JSON: {str(e)}")
374
+ logging.error(f"Failed to save submission JSON: {str(e)}")
375
 
376
  cap.release()
377
  out.release()
378
 
379
+ cap = cv2.VideoCapture(output_path)
380
+ output_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
381
+ output_fps = cap.get(cv2.CAP_PROP_FPS)
382
+ output_duration = output_frames / output_fps if output_fps > 0 else 0
383
+ cap.release()
 
384
 
385
+ total_time = time.time() - start_time
386
+ avg_frame_time = sum(frame_times) / len(frame_times) if frame_times else 0
387
+ avg_resize_time = sum(resize_times) / len(resize_times) if resize_times else 0
388
+ avg_inference_time = sum(inference_times) / len(inference_times) if inference_times else 0
389
+ avg_io_time = sum(io_times) / len(io_times) if io_times else 0
390
+ log_entries.append(f"Output video: {output_frames} frames, {output_fps:.2f} FPS, {output_duration:.2f} seconds")
391
+ logging.info(f"Output video: {output_frames} frames, {output_fps:.2f} FPS, {output_duration:.2f} seconds")
392
+ log_entries.append(f"Total processing time: {total_time:.2f} seconds, Avg frame time: {avg_frame_time:.2f} ms (Avg Resize: {avg_resize_time:.2f} ms, Avg Inference: {avg_inference_time:.2f} ms, Avg I/O: {avg_io_time:.2f} ms), Detection frames: {detection_frame_count}, Output frames: {output_frame_count}")
393
+ logging.info(f"Total processing time: {total_time:.2f} seconds, Avg frame time: {avg_frame_time:.2f} ms (Avg Resize: {avg_resize_time:.2f} ms, Avg Inference: {avg_inference_time:.2f} ms, Avg I/O: {avg_io_time:.2f} ms), Detection frames: {detection_frame_count}, Output frames: {output_frame_count}")
394
+ print(f"Output video: {output_frames} frames, {output_fps:.2f} FPS, {output_duration:.2f} seconds")
395
+ print(f"Total processing time: {total_time:.2f} seconds, Avg frame time: {avg_frame_time:.2f} ms, Detection frames: {detection_frame_count}, Output frames: {output_frame_count}")
396
 
397
+ chart_path = generate_line_chart()
398
+ map_path = generate_map(gps_coordinates[-5:], all_detections)
399
 
400
+ # Generate the single ZIP report
401
+ final_report_zip = generate_single_report(
402
+ output_path,
403
+ detected_issues,
404
+ flight_logs,
405
+ last_metrics,
406
+ chart_path,
407
+ map_path
408
+ )
409
 
410
+ return final_report_zip
411
+ # Gradio interface
412
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange")) as iface:
413
+ gr.Markdown("# NHAI Road Defect Detection Dashboard")
414
+ with gr.Row():
415
+ with gr.Column(scale=3):
416
+ video_input = gr.Video(label="Upload Video (12MP recommended for NHAI compliance)")
417
+ width_slider = gr.Slider(320, 4000, value=4000, label="Output Width", step=1)
418
+ height_slider = gr.Slider(240, 3000, value=3000, label="Output Height", step=1)
419
+ skip_slider = gr.Slider(1, 10, value=5, label="Frame Skip", step=1)
420
+ process_btn = gr.Button("Process Video", variant="primary")
421
+ with gr.Column(scale=1):
422
+ metrics_output = gr.Textbox(label="Detection Metrics", lines=5, interactive=False)
423
+ with gr.Row():
424
+ video_output = gr.Video(label="Processed Video")
425
+ issue_gallery = gr.Gallery(label="Detected Issues", columns=4, height="auto", object_fit="contain")
426
+ with gr.Row():
427
+ chart_output = gr.Image(label="Detection Trend")
428
+ map_output = gr.Image(label="Issue Locations Map")
429
+ with gr.Row():
430
+ logs_output = gr.Textbox(label="Logs", lines=5, interactive=False)
431
+ with gr.Row():
432
+ gr.Markdown("## Download Results")
433
+ with gr.Row():
434
+ zip_download = gr.File(label="Download Report (ZIP)")
435
 
436
+ process_btn.click(
437
+ fn=process_video,
438
+ inputs=[video_input, width_slider, height_slider, skip_slider],
439
+ outputs=[zip_download]
440
+ )
441
 
442
+ if __name__ == "__main__":
443
+ iface.launch()