Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,6 @@ import cv2
|
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
from ultralytics import YOLO
|
7 |
|
8 |
# Load YOLOv8 model
|
@@ -30,7 +29,6 @@ def process_video(video):
|
|
30 |
|
31 |
# Track detected objects by their bounding box coordinates
|
32 |
detected_boxes = set()
|
33 |
-
total_detections = 0
|
34 |
|
35 |
while True:
|
36 |
# Read a frame from the video
|
@@ -58,7 +56,6 @@ def process_video(video):
|
|
58 |
if detection_box not in detected_boxes:
|
59 |
# Add the box to the set to avoid repeating the detection
|
60 |
detected_boxes.add(detection_box)
|
61 |
-
total_detections += 1
|
62 |
|
63 |
# Annotate the frame with bounding boxes
|
64 |
annotated_frame = results[0].plot() # Plot the frame with bounding boxes
|
@@ -68,26 +65,24 @@ def process_video(video):
|
|
68 |
|
69 |
# Add this frame to the list of frames with detections
|
70 |
frames_with_detections.append(annotated_frame_rgb)
|
71 |
-
|
|
|
|
|
72 |
|
73 |
# Release resources
|
74 |
input_video.release()
|
75 |
|
76 |
-
# Return the frames with detections for display
|
77 |
-
return frames_with_detections
|
78 |
-
|
79 |
# Create a Gradio Blocks interface
|
80 |
with gr.Blocks() as demo:
|
81 |
# Define a file input for video upload
|
82 |
video_input = gr.Video(label="Upload Video")
|
83 |
|
84 |
-
# Define the output area to show processed frames
|
85 |
gallery_output = gr.Gallery(label="Detection Album", show_label=True, columns=3) # Display images in a row (album)
|
86 |
|
87 |
# Define the function to update frames in the album
|
88 |
def update_gallery(video):
|
89 |
-
|
90 |
-
return detected_frames # Return all frames with detections
|
91 |
|
92 |
# Connect the video input to the gallery update
|
93 |
video_input.change(update_gallery, inputs=video_input, outputs=gallery_output)
|
|
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
|
|
5 |
from ultralytics import YOLO
|
6 |
|
7 |
# Load YOLOv8 model
|
|
|
29 |
|
30 |
# Track detected objects by their bounding box coordinates
|
31 |
detected_boxes = set()
|
|
|
32 |
|
33 |
while True:
|
34 |
# Read a frame from the video
|
|
|
56 |
if detection_box not in detected_boxes:
|
57 |
# Add the box to the set to avoid repeating the detection
|
58 |
detected_boxes.add(detection_box)
|
|
|
59 |
|
60 |
# Annotate the frame with bounding boxes
|
61 |
annotated_frame = results[0].plot() # Plot the frame with bounding boxes
|
|
|
65 |
|
66 |
# Add this frame to the list of frames with detections
|
67 |
frames_with_detections.append(annotated_frame_rgb)
|
68 |
+
|
69 |
+
# Yield the latest frame immediately for Gradio's real-time display
|
70 |
+
yield annotated_frame_rgb
|
71 |
|
72 |
# Release resources
|
73 |
input_video.release()
|
74 |
|
|
|
|
|
|
|
75 |
# Create a Gradio Blocks interface
|
76 |
with gr.Blocks() as demo:
|
77 |
# Define a file input for video upload
|
78 |
video_input = gr.Video(label="Upload Video")
|
79 |
|
80 |
+
# Define the output area to show processed frames (gallery for continuous update)
|
81 |
gallery_output = gr.Gallery(label="Detection Album", show_label=True, columns=3) # Display images in a row (album)
|
82 |
|
83 |
# Define the function to update frames in the album
|
84 |
def update_gallery(video):
|
85 |
+
return process_video(video) # Return frames one by one as they are detected
|
|
|
86 |
|
87 |
# Connect the video input to the gallery update
|
88 |
video_input.change(update_gallery, inputs=video_input, outputs=gallery_output)
|