Update app.py
Browse files
app.py
CHANGED
@@ -26,7 +26,7 @@ class DroneDetectionPipeline:
|
|
26 |
self.tracker = None
|
27 |
self.tracking_active = False
|
28 |
self.last_detection_bbox = None
|
29 |
-
self.confidence_threshold = 0.
|
30 |
|
31 |
# Load model
|
32 |
self._load_model()
|
@@ -190,43 +190,58 @@ class DroneDetectionPipeline:
|
|
190 |
break
|
191 |
|
192 |
frame_processed = frame.copy()
|
193 |
-
current_detections = self._detect_drones(frame)
|
194 |
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
x1, y1, x2, y2, conf = largest_detection
|
201 |
-
detection_bbox = (x1, y1, x2-x1, y2-y1)
|
202 |
-
|
203 |
-
# Draw detection
|
204 |
-
frame_processed = self._draw_detection(frame_processed,
|
205 |
-
(x1, y1, x2, y2), conf, False)
|
206 |
|
207 |
-
|
208 |
self._initialize_tracker(frame, detection_bbox)
|
209 |
detection_count += 1
|
210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
elif self.tracking_active and self.tracker is not None:
|
212 |
-
#
|
213 |
success, tracking_bbox = self.tracker.update(frame)
|
214 |
|
215 |
if success:
|
216 |
x, y, w, h = [int(v) for v in tracking_bbox]
|
217 |
self.last_detection_bbox = tracking_bbox
|
218 |
-
|
219 |
-
# Draw tracking box
|
220 |
-
frame_processed = self._draw_detection(frame_processed,
|
221 |
-
(x, y, x+w, y+h), None, True)
|
222 |
tracking_count += 1
|
223 |
else:
|
224 |
-
# Tracking failed
|
225 |
if self.last_detection_bbox is not None:
|
226 |
-
# Use last known position
|
227 |
x, y, w, h = [int(v) for v in self.last_detection_bbox]
|
228 |
-
frame_processed = self._draw_detection(frame_processed,
|
229 |
-
(x, y, x+w, y+h), None, True)
|
230 |
self.tracking_active = False
|
231 |
|
232 |
# Write processed frame
|
@@ -253,7 +268,7 @@ class DroneDetectionPipeline:
|
|
253 |
# Initialize pipeline
|
254 |
pipeline = DroneDetectionPipeline()
|
255 |
|
256 |
-
def process_video_gradio(input_video
|
257 |
"""Gradio interface function with enhanced error handling"""
|
258 |
if input_video is None:
|
259 |
return None, "β Please upload a video file"
|
@@ -264,7 +279,7 @@ def process_video_gradio(input_video, confidence_threshold):
|
|
264 |
return None, "β Model not loaded. Please check that 'best.pt' is uploaded to the space."
|
265 |
|
266 |
# Update confidence threshold
|
267 |
-
pipeline.confidence_threshold = confidence_threshold
|
268 |
|
269 |
# Add file size check
|
270 |
file_size = os.path.getsize(input_video)
|
@@ -332,13 +347,13 @@ def create_interface():
|
|
332 |
label="πΉ Upload Video",
|
333 |
format="mp4"
|
334 |
)
|
335 |
-
confidence_slider = gr.Slider(
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
)
|
342 |
process_btn = gr.Button(
|
343 |
"π Process Video",
|
344 |
variant="primary",
|
@@ -357,7 +372,7 @@ def create_interface():
|
|
357 |
|
358 |
process_btn.click(
|
359 |
fn=process_video_gradio,
|
360 |
-
inputs=[input_video,
|
361 |
outputs=[output_video, status_text],
|
362 |
show_progress=True
|
363 |
)
|
|
|
26 |
self.tracker = None
|
27 |
self.tracking_active = False
|
28 |
self.last_detection_bbox = None
|
29 |
+
self.confidence_threshold = 0.55
|
30 |
|
31 |
# Load model
|
32 |
self._load_model()
|
|
|
190 |
break
|
191 |
|
192 |
frame_processed = frame.copy()
|
|
|
193 |
|
194 |
+
# Get all detections (including low confidence ones for tracking)
|
195 |
+
all_detections = self.model(frame, verbose=False, conf=0.1) # Low threshold to get all detections
|
196 |
+
high_conf_detections = []
|
197 |
+
low_conf_detections = []
|
198 |
+
|
199 |
+
for result in all_detections:
|
200 |
+
if result.boxes is not None:
|
201 |
+
for box in result.boxes:
|
202 |
+
x1, y1, x2, y2 = box.xyxy[0].cpu().numpy().astype(int)
|
203 |
+
confidence = float(box.conf[0].cpu().numpy())
|
204 |
+
|
205 |
+
if confidence >= 0.55:
|
206 |
+
high_conf_detections.append((x1, y1, x2, y2, confidence))
|
207 |
+
elif confidence >= 0.1: # Lower threshold for tracking
|
208 |
+
low_conf_detections.append((x1, y1, x2, y2, confidence))
|
209 |
+
|
210 |
+
if high_conf_detections:
|
211 |
+
# Use high confidence detection - normal detection display
|
212 |
+
largest_detection = max(high_conf_detections, key=lambda d: (d[2]-d[0]) * (d[3]-d[1]))
|
213 |
x1, y1, x2, y2, conf = largest_detection
|
214 |
+
detection_bbox = (x1, y1, x2-x1, y2-y1)
|
|
|
|
|
|
|
|
|
215 |
|
216 |
+
frame_processed = self._draw_detection(frame_processed, (x1, y1, x2, y2), None, False)
|
217 |
self._initialize_tracker(frame, detection_bbox)
|
218 |
detection_count += 1
|
219 |
|
220 |
+
elif low_conf_detections and self.last_detection_bbox is not None:
|
221 |
+
# Use low confidence detection for tracking only
|
222 |
+
largest_low_conf = max(low_conf_detections, key=lambda d: (d[2]-d[0]) * (d[3]-d[1]))
|
223 |
+
x1, y1, x2, y2, conf = largest_low_conf
|
224 |
+
tracking_bbox = (x1, y1, x2-x1, y2-y1)
|
225 |
+
|
226 |
+
# Update last known position and show as tracking
|
227 |
+
self.last_detection_bbox = tracking_bbox
|
228 |
+
frame_processed = self._draw_detection(frame_processed, (x1, y1, x2, y2), None, True)
|
229 |
+
tracking_count += 1
|
230 |
+
|
231 |
elif self.tracking_active and self.tracker is not None:
|
232 |
+
# Use CSRT tracker when no detections
|
233 |
success, tracking_bbox = self.tracker.update(frame)
|
234 |
|
235 |
if success:
|
236 |
x, y, w, h = [int(v) for v in tracking_bbox]
|
237 |
self.last_detection_bbox = tracking_bbox
|
238 |
+
frame_processed = self._draw_detection(frame_processed, (x, y, x+w, y+h), None, True)
|
|
|
|
|
|
|
239 |
tracking_count += 1
|
240 |
else:
|
241 |
+
# Tracking failed - use last known position
|
242 |
if self.last_detection_bbox is not None:
|
|
|
243 |
x, y, w, h = [int(v) for v in self.last_detection_bbox]
|
244 |
+
frame_processed = self._draw_detection(frame_processed, (x, y, x+w, y+h), None, True)
|
|
|
245 |
self.tracking_active = False
|
246 |
|
247 |
# Write processed frame
|
|
|
268 |
# Initialize pipeline
|
269 |
pipeline = DroneDetectionPipeline()
|
270 |
|
271 |
+
def process_video_gradio(input_video):
|
272 |
"""Gradio interface function with enhanced error handling"""
|
273 |
if input_video is None:
|
274 |
return None, "β Please upload a video file"
|
|
|
279 |
return None, "β Model not loaded. Please check that 'best.pt' is uploaded to the space."
|
280 |
|
281 |
# Update confidence threshold
|
282 |
+
# pipeline.confidence_threshold = confidence_threshold
|
283 |
|
284 |
# Add file size check
|
285 |
file_size = os.path.getsize(input_video)
|
|
|
347 |
label="πΉ Upload Video",
|
348 |
format="mp4"
|
349 |
)
|
350 |
+
# confidence_slider = gr.Slider(
|
351 |
+
# minimum=0.1,
|
352 |
+
# maximum=0.9,
|
353 |
+
# value=0.5,
|
354 |
+
# step=0.1,
|
355 |
+
# label="π― Detection Confidence Threshold"
|
356 |
+
# )
|
357 |
process_btn = gr.Button(
|
358 |
"π Process Video",
|
359 |
variant="primary",
|
|
|
372 |
|
373 |
process_btn.click(
|
374 |
fn=process_video_gradio,
|
375 |
+
inputs=[input_video], # Only input_video
|
376 |
outputs=[output_video, status_text],
|
377 |
show_progress=True
|
378 |
)
|