Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import cv2
|
2 |
import torch
|
3 |
import gradio as gr
|
@@ -12,13 +14,15 @@ from typing import List, Dict, Any, Optional
|
|
12 |
from ultralytics import YOLO
|
13 |
import ultralytics
|
14 |
import time
|
|
|
|
|
15 |
|
16 |
# Set YOLO config directory
|
17 |
os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
|
18 |
|
19 |
# Set up logging
|
20 |
logging.basicConfig(
|
21 |
-
filename="
|
22 |
level=logging.INFO,
|
23 |
format="%(asctime)s - %(levelname)s - %(message)s"
|
24 |
)
|
@@ -26,10 +30,13 @@ logging.basicConfig(
|
|
26 |
# Directories
|
27 |
CAPTURED_FRAMES_DIR = "captured_frames"
|
28 |
OUTPUT_DIR = "outputs"
|
|
|
29 |
os.makedirs(CAPTURED_FRAMES_DIR, exist_ok=True)
|
30 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
|
|
31 |
os.chmod(CAPTURED_FRAMES_DIR, 0o777)
|
32 |
os.chmod(OUTPUT_DIR, 0o777)
|
|
|
33 |
|
34 |
# Global variables
|
35 |
log_entries: List[str] = []
|
@@ -40,6 +47,24 @@ last_metrics: Dict[str, Any] = {}
|
|
40 |
frame_count: int = 0
|
41 |
SAVE_IMAGE_INTERVAL = 1 # Save every frame with detections
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
# Debug: Check environment
|
44 |
print(f"Torch version: {torch.__version__}")
|
45 |
print(f"Gradio version: {gr.__version__}")
|
@@ -49,14 +74,13 @@ print(f"CUDA available: {torch.cuda.is_available()}")
|
|
49 |
# Load custom YOLO model
|
50 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
51 |
print(f"Using device: {device}")
|
52 |
-
model = YOLO('./data/best.pt').to(device)
|
53 |
if device == "cuda":
|
54 |
model.half() # Use half-precision (FP16)
|
55 |
print(f"Model classes: {model.names}")
|
56 |
|
57 |
-
# Mock service functions
|
58 |
def generate_map(gps_coords: List[List[float]], items: List[Dict[str, Any]]) -> str:
|
59 |
-
map_path = "map_temp.png"
|
60 |
plt.figure(figsize=(4, 4))
|
61 |
plt.scatter([x[1] for x in gps_coords], [x[0] for x in gps_coords], c='blue', label='GPS Points')
|
62 |
plt.title("Issue Locations Map")
|
@@ -67,16 +91,56 @@ def generate_map(gps_coords: List[List[float]], items: List[Dict[str, Any]]) ->
|
|
67 |
plt.close()
|
68 |
return map_path
|
69 |
|
70 |
-
def
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
def update_metrics(detections: List[Dict[str, Any]]) -> Dict[str, Any]:
|
74 |
counts = Counter([det["label"] for det in detections])
|
75 |
-
|
76 |
"items": [{"type": k, "count": v} for k, v in counts.items()],
|
77 |
"total_detections": len(detections),
|
78 |
-
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
}
|
|
|
80 |
|
81 |
def generate_line_chart() -> Optional[str]:
|
82 |
if not detected_counts:
|
@@ -88,13 +152,12 @@ def generate_line_chart() -> Optional[str]:
|
|
88 |
plt.ylabel("Count")
|
89 |
plt.grid(True)
|
90 |
plt.tight_layout()
|
91 |
-
chart_path = "chart_temp.png"
|
92 |
plt.savefig(chart_path)
|
93 |
plt.close()
|
94 |
return chart_path
|
95 |
|
96 |
-
|
97 |
-
def process_video(video, resize_width=320, resize_height=240, frame_skip=5):
|
98 |
global frame_count, last_metrics, detected_counts, detected_issues, gps_coordinates, log_entries
|
99 |
frame_count = 0
|
100 |
detected_counts.clear()
|
@@ -125,14 +188,15 @@ def process_video(video, resize_width=320, resize_height=240, frame_skip=5):
|
|
125 |
print(f"Input video: {frame_width}x{frame_height}, {fps} FPS, {total_frames} frames, {expected_duration:.2f} seconds")
|
126 |
|
127 |
out_width, out_height = resize_width, resize_height
|
128 |
-
output_path = "processed_output.mp4"
|
129 |
codecs = [('mp4v', '.mp4'), ('MJPG', '.avi'), ('XVID', '.avi')]
|
130 |
out = None
|
131 |
for codec, ext in codecs:
|
132 |
fourcc = cv2.VideoWriter_fourcc(*codec)
|
133 |
-
|
134 |
-
out = cv2.VideoWriter(
|
135 |
if out.isOpened():
|
|
|
136 |
log_entries.append(f"Using codec: {codec}, output: {output_path}")
|
137 |
logging.info(f"Using codec: {codec}, output: {output_path}")
|
138 |
break
|
@@ -152,6 +216,11 @@ def process_video(video, resize_width=320, resize_height=240, frame_skip=5):
|
|
152 |
detection_frame_count = 0
|
153 |
output_frame_count = 0
|
154 |
last_annotated_frame = None
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
while True:
|
157 |
ret, frame = cap.read()
|
@@ -167,19 +236,28 @@ def process_video(video, resize_width=320, resize_height=240, frame_skip=5):
|
|
167 |
results = model(frame, verbose=False, conf=0.5, iou=0.7)
|
168 |
annotated_frame = results[0].plot()
|
169 |
|
170 |
-
# Calculate timestamp for the current frame
|
171 |
frame_timestamp = frame_count / fps if fps > 0 else 0
|
172 |
timestamp_str = f"{int(frame_timestamp // 60)}:{int(frame_timestamp % 60):02d}"
|
173 |
|
|
|
|
|
|
|
|
|
|
|
174 |
frame_detections = []
|
175 |
for detection in results[0].boxes:
|
176 |
cls = int(detection.cls)
|
177 |
conf = float(detection.conf)
|
178 |
box = detection.xyxy[0].cpu().numpy().astype(int).tolist()
|
179 |
label = model.names[cls]
|
180 |
-
if label
|
181 |
-
frame_detections.append({
|
182 |
-
|
|
|
|
|
|
|
|
|
|
|
183 |
log_message = f"Frame {frame_count} at {timestamp_str}: Detected {label} with confidence {conf:.2f}"
|
184 |
log_entries.append(log_message)
|
185 |
logging.info(log_message)
|
@@ -187,16 +265,30 @@ def process_video(video, resize_width=320, resize_height=240, frame_skip=5):
|
|
187 |
if frame_detections:
|
188 |
detection_frame_count += 1
|
189 |
if detection_frame_count % SAVE_IMAGE_INTERVAL == 0:
|
190 |
-
captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count}.jpg")
|
191 |
if not cv2.imwrite(captured_frame_path, annotated_frame):
|
192 |
log_entries.append(f"Error: Failed to save {captured_frame_path}")
|
193 |
logging.error(f"Failed to save {captured_frame_path}")
|
194 |
else:
|
195 |
-
|
196 |
-
|
197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
|
199 |
-
# Write frame and duplicates
|
200 |
out.write(annotated_frame)
|
201 |
output_frame_count += 1
|
202 |
last_annotated_frame = annotated_frame
|
@@ -206,42 +298,34 @@ def process_video(video, resize_width=320, resize_height=240, frame_skip=5):
|
|
206 |
output_frame_count += 1
|
207 |
|
208 |
detected_counts.append(len(frame_detections))
|
209 |
-
gps_coord = [17.385044 + (frame_count * 0.0001), 78.486671 + (frame_count * 0.0001)]
|
210 |
gps_coordinates.append(gps_coord)
|
211 |
-
for det in frame_detections:
|
212 |
-
det["gps"] = gps_coord
|
213 |
-
det["timestamp"] = timestamp_str # Add timestamp to detection data
|
214 |
all_detections.extend(frame_detections)
|
215 |
|
216 |
-
frame_time = (time.time() - frame_start) * 1000
|
217 |
-
frame_times.append(frame_time)
|
218 |
detection_summary = {
|
219 |
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
220 |
"video_timestamp": timestamp_str,
|
221 |
"frame": frame_count,
|
222 |
-
"longitudinal": sum(1 for det in frame_detections if det["label"] == "Longitudinal"),
|
223 |
-
"pothole": sum(1 for det in frame_detections if det["label"] == "Pothole"),
|
224 |
-
"transverse": sum(1 for det in frame_detections if det["label"] == "Transverse"),
|
225 |
"gps": gps_coord,
|
226 |
-
"processing_time_ms":
|
|
|
227 |
}
|
|
|
228 |
log_entries.append(json.dumps(detection_summary, indent=2))
|
229 |
if len(log_entries) > 50:
|
230 |
log_entries.pop(0)
|
231 |
|
232 |
-
# Pad remaining frames
|
233 |
while output_frame_count < total_frames and last_annotated_frame is not None:
|
234 |
out.write(last_annotated_frame)
|
235 |
output_frame_count += 1
|
236 |
|
237 |
last_metrics = update_metrics(all_detections)
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
|
246 |
cap.release()
|
247 |
out.release()
|
@@ -272,12 +356,13 @@ def process_video(video, resize_width=320, resize_height=240, frame_skip=5):
|
|
272 |
chart_path,
|
273 |
map_path
|
274 |
)
|
|
|
275 |
# Gradio interface
|
276 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange")) as iface:
|
277 |
-
gr.Markdown("#
|
278 |
with gr.Row():
|
279 |
with gr.Column(scale=3):
|
280 |
-
video_input = gr.Video(label="Upload Video")
|
281 |
width_slider = gr.Slider(320, 640, value=320, label="Output Width", step=1)
|
282 |
height_slider = gr.Slider(240, 480, value=240, label="Output Height", step=1)
|
283 |
skip_slider = gr.Slider(1, 10, value=5, label="Frame Skip", step=1)
|
@@ -299,5 +384,8 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange")) as iface:
|
|
299 |
outputs=[video_output, metrics_output, logs_output, issue_gallery, chart_output, map_output]
|
300 |
)
|
301 |
|
302 |
-
if
|
303 |
-
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import platform
|
3 |
import cv2
|
4 |
import torch
|
5 |
import gradio as gr
|
|
|
14 |
from ultralytics import YOLO
|
15 |
import ultralytics
|
16 |
import time
|
17 |
+
import exiftool
|
18 |
+
import csv
|
19 |
|
20 |
# Set YOLO config directory
|
21 |
os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
|
22 |
|
23 |
# Set up logging
|
24 |
logging.basicConfig(
|
25 |
+
filename="drone_app.log",
|
26 |
level=logging.INFO,
|
27 |
format="%(asctime)s - %(levelname)s - %(message)s"
|
28 |
)
|
|
|
30 |
# Directories
|
31 |
CAPTURED_FRAMES_DIR = "captured_frames"
|
32 |
OUTPUT_DIR = "outputs"
|
33 |
+
FLIGHT_LOG_DIR = "flight_logs"
|
34 |
os.makedirs(CAPTURED_FRAMES_DIR, exist_ok=True)
|
35 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
36 |
+
os.makedirs(FLIGHT_LOG_DIR, exist_ok=True)
|
37 |
os.chmod(CAPTURED_FRAMES_DIR, 0o777)
|
38 |
os.chmod(OUTPUT_DIR, 0o777)
|
39 |
+
os.chmod(FLIGHT_LOG_DIR, 0o777)
|
40 |
|
41 |
# Global variables
|
42 |
log_entries: List[str] = []
|
|
|
47 |
frame_count: int = 0
|
48 |
SAVE_IMAGE_INTERVAL = 1 # Save every frame with detections
|
49 |
|
50 |
+
# SOP Parameters from Annexure-I
|
51 |
+
DRONE_SPEED_MS = 5 # 5 m/s (18 km/hr)
|
52 |
+
MIN_SATELLITES = 12
|
53 |
+
IMAGE_OVERLAP = 0.85 # 85% front and side overlap
|
54 |
+
MIN_RESOLUTION_MP = 12 # Minimum 12 MP
|
55 |
+
RECORDING_ANGLE = 90 # Nadir (90 degrees)
|
56 |
+
IMAGE_FORMAT = "JPEG"
|
57 |
+
|
58 |
+
# Annexure-III Operations and Maintenance parameters
|
59 |
+
DETECTION_CLASSES = [
|
60 |
+
"Potholes", "Edge Drops", "Crack", "Raveling", "Rain Cut Embankments",
|
61 |
+
"Authorized Median Opening", "Unauthorized Median Opening",
|
62 |
+
"Intersection/Crossroads", "Temporary Encroachments", "Permanent Encroachments",
|
63 |
+
"Missing Lane Markings", "Missing Boundary Wall", "Damaged Boundary Wall",
|
64 |
+
"Open Drain", "Covered Drain", "Blocked Drain", "Unclean Drain",
|
65 |
+
"Missing Dissipation Basin"
|
66 |
+
]
|
67 |
+
|
68 |
# Debug: Check environment
|
69 |
print(f"Torch version: {torch.__version__}")
|
70 |
print(f"Gradio version: {gr.__version__}")
|
|
|
74 |
# Load custom YOLO model
|
75 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
76 |
print(f"Using device: {device}")
|
77 |
+
model = YOLO('./data/best.pt').to(device) # Assumes model is trained for all DETECTION_CLASSES
|
78 |
if device == "cuda":
|
79 |
model.half() # Use half-precision (FP16)
|
80 |
print(f"Model classes: {model.names}")
|
81 |
|
|
|
82 |
def generate_map(gps_coords: List[List[float]], items: List[Dict[str, Any]]) -> str:
|
83 |
+
map_path = os.path.join(OUTPUT_DIR, "map_temp.png")
|
84 |
plt.figure(figsize=(4, 4))
|
85 |
plt.scatter([x[1] for x in gps_coords], [x[0] for x in gps_coords], c='blue', label='GPS Points')
|
86 |
plt.title("Issue Locations Map")
|
|
|
91 |
plt.close()
|
92 |
return map_path
|
93 |
|
94 |
+
def write_geotag(image_path: str, gps_coord: List[float]) -> bool:
|
95 |
+
try:
|
96 |
+
with exiftool.ExifToolHelper() as et:
|
97 |
+
et.set_tags(
|
98 |
+
[image_path],
|
99 |
+
{
|
100 |
+
"EXIF:GPSLatitude": gps_coord[0],
|
101 |
+
"EXIF:GPSLongitude": gps_coord[1],
|
102 |
+
"EXIF:GPSLatitudeRef": "N" if gps_coord[0] >= 0 else "S",
|
103 |
+
"EXIF:GPSLongitudeRef": "E" if gps_coord[1] >= 0 else "W"
|
104 |
+
}
|
105 |
+
)
|
106 |
+
return True
|
107 |
+
except Exception as e:
|
108 |
+
logging.error(f"Failed to geotag {image_path}: {str(e)}")
|
109 |
+
return False
|
110 |
+
|
111 |
+
def write_flight_log(frame_count: int, gps_coord: List[float], timestamp: str) -> str:
|
112 |
+
log_path = os.path.join(FLIGHT_LOG_DIR, f"flight_log_{frame_count}.csv")
|
113 |
+
with open(log_path, 'w', newline='') as csvfile:
|
114 |
+
writer = csv.writer(csvfile)
|
115 |
+
writer.writerow(["Frame", "Timestamp", "Latitude", "Longitude", "Speed_ms", "Satellites", "Altitude_m"])
|
116 |
+
writer.writerow([frame_count, timestamp, gps_coord[0], gps_coord[1], DRONE_SPEED_MS, MIN_SATELLITES, 60]) # Example altitude
|
117 |
+
return log_path
|
118 |
+
|
119 |
+
def check_sop_compliance(frame: np.ndarray, gps_coord: List[float], frame_count: int) -> bool:
|
120 |
+
height, width, _ = frame.shape
|
121 |
+
if width * height < MIN_RESOLUTION_MP * 1e6: # Check resolution (12MP)
|
122 |
+
log_entries.append(f"Frame {frame_count}: Resolution below {MIN_RESOLUTION_MP}MP")
|
123 |
+
return False
|
124 |
+
if len(gps_coord) != 2 or not all(isinstance(x, float) for x in gps_coord):
|
125 |
+
log_entries.append(f"Frame {frame_count}: Invalid GPS coordinates")
|
126 |
+
return False
|
127 |
+
return True
|
128 |
|
129 |
def update_metrics(detections: List[Dict[str, Any]]) -> Dict[str, Any]:
|
130 |
counts = Counter([det["label"] for det in detections])
|
131 |
+
metrics = {
|
132 |
"items": [{"type": k, "count": v} for k, v in counts.items()],
|
133 |
"total_detections": len(detections),
|
134 |
+
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
135 |
+
"sop_compliance": {
|
136 |
+
"drone_speed_ms": DRONE_SPEED_MS,
|
137 |
+
"image_overlap": IMAGE_OVERLAP,
|
138 |
+
"min_resolution_mp": MIN_RESOLUTION_MP,
|
139 |
+
"recording_angle_degrees": RECORDING_ANGLE,
|
140 |
+
"image_format": IMAGE_FORMAT
|
141 |
+
}
|
142 |
}
|
143 |
+
return metrics
|
144 |
|
145 |
def generate_line_chart() -> Optional[str]:
|
146 |
if not detected_counts:
|
|
|
152 |
plt.ylabel("Count")
|
153 |
plt.grid(True)
|
154 |
plt.tight_layout()
|
155 |
+
chart_path = os.path.join(OUTPUT_DIR, "chart_temp.png")
|
156 |
plt.savefig(chart_path)
|
157 |
plt.close()
|
158 |
return chart_path
|
159 |
|
160 |
+
async def process_video(video, resize_width=320, resize_height=240, frame_skip=5):
|
|
|
161 |
global frame_count, last_metrics, detected_counts, detected_issues, gps_coordinates, log_entries
|
162 |
frame_count = 0
|
163 |
detected_counts.clear()
|
|
|
188 |
print(f"Input video: {frame_width}x{frame_height}, {fps} FPS, {total_frames} frames, {expected_duration:.2f} seconds")
|
189 |
|
190 |
out_width, out_height = resize_width, resize_height
|
191 |
+
output_path = os.path.join(OUTPUT_DIR, "processed_output.mp4")
|
192 |
codecs = [('mp4v', '.mp4'), ('MJPG', '.avi'), ('XVID', '.avi')]
|
193 |
out = None
|
194 |
for codec, ext in codecs:
|
195 |
fourcc = cv2.VideoWriter_fourcc(*codec)
|
196 |
+
temp_output_path = os.path.join(OUTPUT_DIR, f"processed_output{ext}")
|
197 |
+
out = cv2.VideoWriter(temp_output_path, fourcc, fps, (out_width, out_height))
|
198 |
if out.isOpened():
|
199 |
+
output_path = temp_output_path
|
200 |
log_entries.append(f"Using codec: {codec}, output: {output_path}")
|
201 |
logging.info(f"Using codec: {codec}, output: {output_path}")
|
202 |
break
|
|
|
216 |
detection_frame_count = 0
|
217 |
output_frame_count = 0
|
218 |
last_annotated_frame = None
|
219 |
+
data_lake_submission = {
|
220 |
+
"images": [],
|
221 |
+
"flight_logs": [],
|
222 |
+
"analytics": []
|
223 |
+
}
|
224 |
|
225 |
while True:
|
226 |
ret, frame = cap.read()
|
|
|
236 |
results = model(frame, verbose=False, conf=0.5, iou=0.7)
|
237 |
annotated_frame = results[0].plot()
|
238 |
|
|
|
239 |
frame_timestamp = frame_count / fps if fps > 0 else 0
|
240 |
timestamp_str = f"{int(frame_timestamp // 60)}:{int(frame_timestamp % 60):02d}"
|
241 |
|
242 |
+
gps_coord = [17.385044 + (frame_count * 0.0001), 78.486671 + (frame_count * 0.0001)]
|
243 |
+
if not check_sop_compliance(frame, gps_coord, frame_count):
|
244 |
+
log_entries.append(f"Frame {frame_count}: SOP compliance check failed")
|
245 |
+
continue
|
246 |
+
|
247 |
frame_detections = []
|
248 |
for detection in results[0].boxes:
|
249 |
cls = int(detection.cls)
|
250 |
conf = float(detection.conf)
|
251 |
box = detection.xyxy[0].cpu().numpy().astype(int).tolist()
|
252 |
label = model.names[cls]
|
253 |
+
if label in DETECTION_CLASSES:
|
254 |
+
frame_detections.append({
|
255 |
+
"label": label,
|
256 |
+
"box": box,
|
257 |
+
"conf": conf,
|
258 |
+
"gps": gps_coord,
|
259 |
+
"timestamp": timestamp_str
|
260 |
+
})
|
261 |
log_message = f"Frame {frame_count} at {timestamp_str}: Detected {label} with confidence {conf:.2f}"
|
262 |
log_entries.append(log_message)
|
263 |
logging.info(log_message)
|
|
|
265 |
if frame_detections:
|
266 |
detection_frame_count += 1
|
267 |
if detection_frame_count % SAVE_IMAGE_INTERVAL == 0:
|
268 |
+
captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"detected_{frame_count:06d}.jpg")
|
269 |
if not cv2.imwrite(captured_frame_path, annotated_frame):
|
270 |
log_entries.append(f"Error: Failed to save {captured_frame_path}")
|
271 |
logging.error(f"Failed to save {captured_frame_path}")
|
272 |
else:
|
273 |
+
if write_geotag(captured_frame_path, gps_coord):
|
274 |
+
detected_issues.append(captured_frame_path)
|
275 |
+
data_lake_submission["images"].append({
|
276 |
+
"path": captured_frame_path,
|
277 |
+
"frame": frame_count,
|
278 |
+
"gps": gps_coord,
|
279 |
+
"timestamp": timestamp_str
|
280 |
+
})
|
281 |
+
if len(detected_issues) > 100:
|
282 |
+
detected_issues.pop(0)
|
283 |
+
else:
|
284 |
+
log_entries.append(f"Error: Failed to geotag {captured_frame_path}")
|
285 |
+
|
286 |
+
flight_log_path = write_flight_log(frame_count, gps_coord, timestamp_str)
|
287 |
+
data_lake_submission["flight_logs"].append({
|
288 |
+
"path": flight_log_path,
|
289 |
+
"frame": frame_count
|
290 |
+
})
|
291 |
|
|
|
292 |
out.write(annotated_frame)
|
293 |
output_frame_count += 1
|
294 |
last_annotated_frame = annotated_frame
|
|
|
298 |
output_frame_count += 1
|
299 |
|
300 |
detected_counts.append(len(frame_detections))
|
|
|
301 |
gps_coordinates.append(gps_coord)
|
|
|
|
|
|
|
302 |
all_detections.extend(frame_detections)
|
303 |
|
|
|
|
|
304 |
detection_summary = {
|
305 |
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
306 |
"video_timestamp": timestamp_str,
|
307 |
"frame": frame_count,
|
|
|
|
|
|
|
308 |
"gps": gps_coord,
|
309 |
+
"processing_time_ms": (time.time() - frame_start) * 1000,
|
310 |
+
"detections": {label: sum(1 for det in frame_detections if det["label"] == label) for label in DETECTION_CLASSES}
|
311 |
}
|
312 |
+
data_lake_submission["analytics"].append(detection_summary)
|
313 |
log_entries.append(json.dumps(detection_summary, indent=2))
|
314 |
if len(log_entries) > 50:
|
315 |
log_entries.pop(0)
|
316 |
|
|
|
317 |
while output_frame_count < total_frames and last_annotated_frame is not None:
|
318 |
out.write(last_annotated_frame)
|
319 |
output_frame_count += 1
|
320 |
|
321 |
last_metrics = update_metrics(all_detections)
|
322 |
+
data_lake_submission["metrics"] = last_metrics
|
323 |
+
data_lake_submission["frame_count"] = frame_count
|
324 |
+
data_lake_submission["gps_coordinates"] = gps_coordinates[-1] if gps_coordinates else [0, 0]
|
325 |
+
|
326 |
+
submission_json_path = os.path.join(OUTPUT_DIR, "data_lake_submission.json")
|
327 |
+
with open(submission_json_path, 'w') as f:
|
328 |
+
json.dump(data_lake_submission, f, indent=2)
|
329 |
|
330 |
cap.release()
|
331 |
out.release()
|
|
|
356 |
chart_path,
|
357 |
map_path
|
358 |
)
|
359 |
+
|
360 |
# Gradio interface
|
361 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="orange")) as iface:
|
362 |
+
gr.Markdown("# NHAI Drone Analytics Dashboard")
|
363 |
with gr.Row():
|
364 |
with gr.Column(scale=3):
|
365 |
+
video_input = gr.Video(label="Upload Drone Video")
|
366 |
width_slider = gr.Slider(320, 640, value=320, label="Output Width", step=1)
|
367 |
height_slider = gr.Slider(240, 480, value=240, label="Output Height", step=1)
|
368 |
skip_slider = gr.Slider(1, 10, value=5, label="Frame Skip", step=1)
|
|
|
384 |
outputs=[video_output, metrics_output, logs_output, issue_gallery, chart_output, map_output]
|
385 |
)
|
386 |
|
387 |
+
if platform.system() == "Emscripten":
|
388 |
+
asyncio.ensure_future(process_video())
|
389 |
+
else:
|
390 |
+
if __name__ == "__main__":
|
391 |
+
iface.launch()
|