Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import numpy as np
|
|
5 |
import os
|
6 |
import matplotlib.pyplot as plt
|
7 |
from ultralytics import YOLO, __version__ as ultralytics_version
|
|
|
8 |
|
9 |
# Debug: Check environment
|
10 |
print(f"Torch version: {torch.__version__}")
|
@@ -19,7 +20,8 @@ model = YOLO('./data/best.pt').to(device)
|
|
19 |
|
20 |
def process_video(video, output_folder="detected_frames", plot_graphs=False):
|
21 |
if video is None:
|
22 |
-
|
|
|
23 |
|
24 |
# Create output folder if it doesn't exist
|
25 |
if not os.path.exists(output_folder):
|
@@ -27,13 +29,15 @@ def process_video(video, output_folder="detected_frames", plot_graphs=False):
|
|
27 |
|
28 |
cap = cv2.VideoCapture(video)
|
29 |
if not cap.isOpened():
|
30 |
-
|
|
|
31 |
|
32 |
frame_width, frame_height = 320, 240 # Smaller resolution
|
33 |
frame_count = 0
|
34 |
frame_skip = 5 # Process every 5th frame
|
35 |
max_frames = 100 # Limit for testing
|
36 |
confidence_scores = [] # Store confidence scores for plotting
|
|
|
37 |
|
38 |
while True:
|
39 |
ret, frame = cap.read()
|
@@ -49,21 +53,24 @@ def process_video(video, output_folder="detected_frames", plot_graphs=False):
|
|
49 |
|
50 |
# Run YOLOv8 inference
|
51 |
results = model(frame)
|
52 |
-
annotated_frame = results[0].plot()
|
53 |
|
54 |
-
# Save
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
60 |
confs = results[0].boxes.conf.cpu().numpy()
|
61 |
confidence_scores.extend(confs)
|
|
|
|
|
|
|
62 |
|
63 |
cap.release()
|
64 |
|
65 |
# Generate confidence score plot if requested
|
66 |
-
graph_path = None
|
67 |
if plot_graphs and confidence_scores:
|
68 |
plt.figure(figsize=(10, 5))
|
69 |
plt.hist(confidence_scores, bins=20, color='blue', alpha=0.7)
|
@@ -73,21 +80,33 @@ def process_video(video, output_folder="detected_frames", plot_graphs=False):
|
|
73 |
graph_path = os.path.join(output_folder, "confidence_histogram.png")
|
74 |
plt.savefig(graph_path)
|
75 |
plt.close()
|
|
|
76 |
|
77 |
-
|
|
|
|
|
78 |
|
79 |
# Gradio interface
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
gr.
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
if __name__ == "__main__":
|
93 |
iface.launch()
|
|
|
5 |
import os
|
6 |
import matplotlib.pyplot as plt
|
7 |
from ultralytics import YOLO, __version__ as ultralytics_version
|
8 |
+
import uuid
|
9 |
|
10 |
# Debug: Check environment
|
11 |
print(f"Torch version: {torch.__version__}")
|
|
|
20 |
|
21 |
def process_video(video, output_folder="detected_frames", plot_graphs=False):
|
22 |
if video is None:
|
23 |
+
yield "Error: No video uploaded", []
|
24 |
+
return
|
25 |
|
26 |
# Create output folder if it doesn't exist
|
27 |
if not os.path.exists(output_folder):
|
|
|
29 |
|
30 |
cap = cv2.VideoCapture(video)
|
31 |
if not cap.isOpened():
|
32 |
+
yield "Error: Could not open video file", []
|
33 |
+
return
|
34 |
|
35 |
frame_width, frame_height = 320, 240 # Smaller resolution
|
36 |
frame_count = 0
|
37 |
frame_skip = 5 # Process every 5th frame
|
38 |
max_frames = 100 # Limit for testing
|
39 |
confidence_scores = [] # Store confidence scores for plotting
|
40 |
+
detected_frame_paths = [] # Store paths of frames with detections
|
41 |
|
42 |
while True:
|
43 |
ret, frame = cap.read()
|
|
|
53 |
|
54 |
# Run YOLOv8 inference
|
55 |
results = model(frame)
|
|
|
56 |
|
57 |
+
# Save and yield frame if objects are detected
|
58 |
+
if results[0].boxes is not None and len(results[0].boxes) > 0:
|
59 |
+
annotated_frame = results[0].plot()
|
60 |
+
frame_filename = os.path.join(output_folder, f"frame_{frame_count:04d}.jpg")
|
61 |
+
cv2.imwrite(frame_filename, annotated_frame)
|
62 |
+
detected_frame_paths.append(frame_filename)
|
63 |
+
|
64 |
+
# Collect confidence scores for plotting
|
65 |
confs = results[0].boxes.conf.cpu().numpy()
|
66 |
confidence_scores.extend(confs)
|
67 |
+
|
68 |
+
# Yield current status and gallery
|
69 |
+
yield f"Processed frame {frame_count} with detections", detected_frame_paths[:]
|
70 |
|
71 |
cap.release()
|
72 |
|
73 |
# Generate confidence score plot if requested
|
|
|
74 |
if plot_graphs and confidence_scores:
|
75 |
plt.figure(figsize=(10, 5))
|
76 |
plt.hist(confidence_scores, bins=20, color='blue', alpha=0.7)
|
|
|
80 |
graph_path = os.path.join(output_folder, "confidence_histogram.png")
|
81 |
plt.savefig(graph_path)
|
82 |
plt.close()
|
83 |
+
detected_frame_paths.append(graph_path)
|
84 |
|
85 |
+
# Final yield with all results
|
86 |
+
status = f"Saved {len(detected_frame_paths)} frames with detections in {output_folder}. {f'Graph saved as {graph_path}' if plot_graphs and confidence_scores else ''}"
|
87 |
+
yield status, detected_frame_paths
|
88 |
|
89 |
# Gradio interface
|
90 |
+
with gr.Blocks() as iface:
|
91 |
+
gr.Markdown("# YOLOv8 Object Detection - Real-time Frame Output")
|
92 |
+
gr.Markdown("Upload a short video to view frames with detections immediately in a gallery. Optionally generate a confidence score graph.")
|
93 |
+
|
94 |
+
with gr.Row():
|
95 |
+
video_input = gr.Video(label="Upload Video")
|
96 |
+
output_folder = gr.Textbox(label="Output Folder", value="detected_frames")
|
97 |
+
plot_graphs = gr.Checkbox(label="Generate Confidence Score Graph", value=False)
|
98 |
+
|
99 |
+
submit_button = gr.Button("Process Video")
|
100 |
+
|
101 |
+
status_output = gr.Text(label="Status")
|
102 |
+
gallery_output = gr.Gallery(label="Detected Frames and Graph", preview=True, columns=3)
|
103 |
+
|
104 |
+
submit_button.click(
|
105 |
+
fn=process_video,
|
106 |
+
inputs=[video_input, output_folder, plot_graphs],
|
107 |
+
outputs=[status_output, gallery_output],
|
108 |
+
concurrency_limit=1
|
109 |
+
)
|
110 |
|
111 |
if __name__ == "__main__":
|
112 |
iface.launch()
|