Spaces:
Sleeping
Sleeping
Pedro Henrique Conrado
commited on
Commit
·
23b399d
1
Parent(s):
a9d3fa2
atualizando
Browse files
app.py
CHANGED
@@ -1,7 +1,4 @@
|
|
1 |
-
import ultralytics
|
2 |
-
import onemetric
|
3 |
import supervision
|
4 |
-
import typing
|
5 |
import tqdm
|
6 |
import os
|
7 |
from ultralytics import YOLO
|
@@ -14,7 +11,6 @@ from supervision import Color
|
|
14 |
from supervision import VideoInfo
|
15 |
from supervision import get_video_frames_generator
|
16 |
from supervision import VideoSink
|
17 |
-
import torch
|
18 |
os.system("pip install git+https://github.com/ifzhang/ByteTrack")
|
19 |
from typing import List
|
20 |
import numpy as np
|
@@ -90,7 +86,13 @@ def ObjectDetection(video_path):
|
|
90 |
video_info = VideoInfo.from_video_path(video_path)
|
91 |
generator = get_video_frames_generator(video_path)
|
92 |
box_annotator = BoxAnnotator(thickness=5, text_thickness=5, text_scale=1)
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
with VideoSink(TARGET_VIDEO_PATH, video_info) as sink:
|
95 |
# loop over video frames
|
96 |
for frame in tqdm(generator, total=video_info.total_frames):
|
@@ -119,10 +121,14 @@ def ObjectDetection(video_path):
|
|
119 |
in detections
|
120 |
]
|
121 |
t = np.unique(detections.class_id, return_counts =True)
|
122 |
-
for x in zip(t[0], t[1]):
|
123 |
-
frame = draw_text(background_color=Color.white(), scene=frame, text=' '.join((str(classes[x[0]]), ':', str(x[1]))), text_anchor=Point(x=50, y=300 + (50 * x[0])), text_scale = 2, text_thickness = 4, )
|
124 |
# annotate and display frame
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
sink.write_frame(frame)
|
127 |
|
128 |
return TARGET_VIDEO_PATH
|
|
|
|
|
|
|
1 |
import supervision
|
|
|
2 |
import tqdm
|
3 |
import os
|
4 |
from ultralytics import YOLO
|
|
|
11 |
from supervision import VideoInfo
|
12 |
from supervision import get_video_frames_generator
|
13 |
from supervision import VideoSink
|
|
|
14 |
os.system("pip install git+https://github.com/ifzhang/ByteTrack")
|
15 |
from typing import List
|
16 |
import numpy as np
|
|
|
86 |
video_info = VideoInfo.from_video_path(video_path)
|
87 |
generator = get_video_frames_generator(video_path)
|
88 |
box_annotator = BoxAnnotator(thickness=5, text_thickness=5, text_scale=1)
|
89 |
+
#polygon
|
90 |
+
polygon = np.array([[200,300], [200,1420], [880, 1420], [880, 300]])
|
91 |
+
#zone
|
92 |
+
zone = supervision.PolygonZone(polygon=polygon, frame_resolution_wh=video_info.resolution_wh)
|
93 |
+
#zone annotator
|
94 |
+
zone_annotator = supervision.PolygonZoneAnnotator(zone=zone, color=Color.white(), thickness=4)
|
95 |
+
# open target video file
|
96 |
with VideoSink(TARGET_VIDEO_PATH, video_info) as sink:
|
97 |
# loop over video frames
|
98 |
for frame in tqdm(generator, total=video_info.total_frames):
|
|
|
121 |
in detections
|
122 |
]
|
123 |
t = np.unique(detections.class_id, return_counts =True)
|
|
|
|
|
124 |
# annotate and display frame
|
125 |
+
mask = zone.trigger(detections=detections)
|
126 |
+
detections_filtered = detections[mask]
|
127 |
+
t = np.unique(detections_filtered.class_id, return_counts =True)
|
128 |
+
for x in zip(t[0], t[1]):
|
129 |
+
frame = draw_text(background_color=Color.white(), scene=frame, text=' '.join((str(classes[x[0]]), ':', str(x[1]))), text_anchor=Point(x=500, y=1550 + (50 * x[0])), text_scale = 2, text_thickness = 4)
|
130 |
+
frame = box_annotator.annotate(scene=frame, detections=detections_filtered, labels=labels)
|
131 |
+
frame = zone_annotator.annotate(scene=frame)
|
132 |
sink.write_frame(frame)
|
133 |
|
134 |
return TARGET_VIDEO_PATH
|