Spaces:
Runtime error
Runtime error
Commit
·
a8274b9
1
Parent(s):
338e517
video function
Browse files- app.py +83 -7
- detector/utils.py +2 -48
app.py
CHANGED
@@ -3,17 +3,93 @@ import cv2
|
|
3 |
from detector.utils import detect_plates, detect_chars
|
4 |
|
5 |
|
6 |
-
def
|
7 |
-
plates = detect_plates(
|
|
|
8 |
if len(plates) > 0:
|
9 |
for plate in plates:
|
10 |
p1, p2, crop = plate
|
11 |
if len(crop) > 0:
|
12 |
-
cv2.rectangle(
|
13 |
text, crop = detect_chars(crop)
|
14 |
-
cv2.putText(
|
15 |
-
|
|
|
16 |
|
17 |
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from detector.utils import detect_plates, detect_chars
|
4 |
|
5 |
|
6 |
+
def fn_image(foto):
|
7 |
+
plates = detect_plates(foto)
|
8 |
+
plates_text = []
|
9 |
if len(plates) > 0:
|
10 |
for plate in plates:
|
11 |
p1, p2, crop = plate
|
12 |
if len(crop) > 0:
|
13 |
+
cv2.rectangle(foto, p1, p2, (0, 0, 255), 2)
|
14 |
text, crop = detect_chars(crop)
|
15 |
+
cv2.putText(foto, text, p1, cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 255, 0), 5)
|
16 |
+
plates_text.append(text)
|
17 |
+
return foto, plates_text
|
18 |
|
19 |
|
20 |
+
def fn_video(video, initial_time, duration):
|
21 |
+
plates_text = []
|
22 |
+
cap = cv2.VideoCapture(video)
|
23 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
24 |
+
image_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
|
25 |
+
final_video = cv2.VideoWriter('output.mp4', cv2.VideoWriter_fourcc(*'VP90'), fps, image_size)
|
26 |
+
num_frames = 0
|
27 |
+
min_frame = int(initial_time * fps)
|
28 |
+
max_frame = int((initial_time + duration) * fps)
|
29 |
+
while cap.isOpened():
|
30 |
+
try:
|
31 |
+
ret, frame = cap.read()
|
32 |
+
if not ret:
|
33 |
+
break
|
34 |
+
except Exception as e:
|
35 |
+
print(e)
|
36 |
+
continue
|
37 |
+
if num_frames < min_frame:
|
38 |
+
num_frames += 1
|
39 |
+
continue
|
40 |
+
plates = detect_plates(frame)
|
41 |
+
for plate in plates:
|
42 |
+
p1, p2, crop = plate
|
43 |
+
if len(crop) > 0:
|
44 |
+
cv2.rectangle(frame, p1, p2, (0, 0, 255), 2)
|
45 |
+
text, crop = detect_chars(crop)
|
46 |
+
cv2.putText(frame, text, p1, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 5)
|
47 |
+
plates_text.append(text)
|
48 |
+
final_video.write(frame)
|
49 |
+
num_frames += 1
|
50 |
+
if num_frames == max_frame:
|
51 |
+
break
|
52 |
+
cap.release()
|
53 |
+
final_video.release()
|
54 |
+
return 'output.mp4', plates_text
|
55 |
+
|
56 |
+
|
57 |
+
image_interface = gr.Interface(
|
58 |
+
fn=fn_image,
|
59 |
+
inputs="image",
|
60 |
+
outputs=["image", "text"],
|
61 |
+
title="Buscar números de placa en una imagen",
|
62 |
+
allow_flagging=False,
|
63 |
+
allow_screenshot=False,
|
64 |
+
)
|
65 |
+
|
66 |
+
video_interface = gr.Interface(
|
67 |
+
fn=fn_video,
|
68 |
+
inputs=[
|
69 |
+
gr.Video(type="file", label="Video"),
|
70 |
+
gr.Slider(0, 600, value=0, label="Tiempo inicial en segundos", step=1),
|
71 |
+
gr.Slider(0, 10, value=4, label="Duración en segundos", step=1),
|
72 |
+
],
|
73 |
+
outputs=["video", "text"],
|
74 |
+
title="Buscar números de placa en un video",
|
75 |
+
allow_flagging=False,
|
76 |
+
allow_screenshot=False,
|
77 |
+
)
|
78 |
+
|
79 |
+
webcam_interface = gr.Interface(
|
80 |
+
fn_image,
|
81 |
+
inputs=[
|
82 |
+
gr.Image(source='webcam', streaming=True),
|
83 |
+
],
|
84 |
+
outputs=gr.Image(type="file"),
|
85 |
+
live=True,
|
86 |
+
title="Buscar placa con la cámara",
|
87 |
+
allow_flagging=False,
|
88 |
+
allow_screenshot=False,
|
89 |
+
)
|
90 |
+
|
91 |
+
if __name__ == "__main__":
|
92 |
+
gr.TabbedInterface(
|
93 |
+
[image_interface, video_interface],
|
94 |
+
["Fotos", "Videos"],
|
95 |
+
).launch()
|
detector/utils.py
CHANGED
@@ -7,13 +7,6 @@ BASE_DIR = os.path.abspath(os.getcwd())
|
|
7 |
|
8 |
model_plates = torch.hub.load('ultralytics/yolov5', 'custom',
|
9 |
path=os.path.join(BASE_DIR, 'detector', 'static', 'plates.pt'))
|
10 |
-
# model.conf = 0.60 # NMS confidence threshold
|
11 |
-
# model.iou = 0.60 # NMS IoU threshold
|
12 |
-
# model.agnostic = False # NMS class-agnostic
|
13 |
-
# model.multi_label = False # NMS multiple labels per box
|
14 |
-
# model.classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
|
15 |
-
# model.max_det = 1 # maximum number of detections per image
|
16 |
-
# model.amp = False # Automatic Mixed Precision (AMP) inference
|
17 |
|
18 |
model_chars = torch.hub.load('ultralytics/yolov5', 'custom',
|
19 |
path=os.path.join(BASE_DIR, 'detector', 'static', 'chars.pt'))
|
@@ -49,7 +42,7 @@ def detect_plates(img):
|
|
49 |
|
50 |
|
51 |
def detect_chars(img):
|
52 |
-
img = cv2.resize(img, (640,
|
53 |
detect = model_chars(img)
|
54 |
records = detect.pandas().xyxy[0].to_dict(orient='records')
|
55 |
yolo = np.squeeze(detect.render())
|
@@ -57,48 +50,9 @@ def detect_chars(img):
|
|
57 |
if records:
|
58 |
records = sorted(records, key=lambda d: d['xmin'])
|
59 |
text = ''.join([i.get('name') for i in records])
|
60 |
-
return text,
|
61 |
|
62 |
|
63 |
def save_plates(img):
|
64 |
detect = model_plates(img)
|
65 |
detect.crop(save=True)
|
66 |
-
|
67 |
-
|
68 |
-
# def yolo_detections_to_norfair_detections(yolo_detections, track_points="centroid"):
|
69 |
-
# """convert detections_as_xywh to norfair detections"""
|
70 |
-
# norfair_detections = []
|
71 |
-
#
|
72 |
-
# if track_points == "centroid":
|
73 |
-
# detections_as_xywh = yolo_detections.xywh[0]
|
74 |
-
# for detection_as_xywh in detections_as_xywh:
|
75 |
-
# centroid = np.array(
|
76 |
-
# [detection_as_xywh[0].item(), detection_as_xywh[1].item()]
|
77 |
-
# )
|
78 |
-
# scores = np.array([detection_as_xywh[4].item()])
|
79 |
-
# norfair_detections.append(
|
80 |
-
# Detection(
|
81 |
-
# points=centroid,
|
82 |
-
# scores=scores,
|
83 |
-
# label=int(detection_as_xywh[-1].item()),
|
84 |
-
# )
|
85 |
-
# )
|
86 |
-
# elif track_points == "bbox":
|
87 |
-
# detections_as_xyxy = yolo_detections.xyxy[0]
|
88 |
-
# for detection_as_xyxy in detections_as_xyxy:
|
89 |
-
# bbox = np.array(
|
90 |
-
# [
|
91 |
-
# [detection_as_xyxy[0].item(), detection_as_xyxy[1].item()],
|
92 |
-
# [detection_as_xyxy[2].item(), detection_as_xyxy[3].item()],
|
93 |
-
# ]
|
94 |
-
# )
|
95 |
-
# scores = np.array(
|
96 |
-
# [detection_as_xyxy[4].item(), detection_as_xyxy[4].item()]
|
97 |
-
# )
|
98 |
-
# norfair_detections.append(
|
99 |
-
# Detection(
|
100 |
-
# points=bbox, scores=scores, label=int(detection_as_xyxy[-1].item())
|
101 |
-
# )
|
102 |
-
# )
|
103 |
-
#
|
104 |
-
# return norfair_detections
|
|
|
7 |
|
8 |
model_plates = torch.hub.load('ultralytics/yolov5', 'custom',
|
9 |
path=os.path.join(BASE_DIR, 'detector', 'static', 'plates.pt'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
model_chars = torch.hub.load('ultralytics/yolov5', 'custom',
|
12 |
path=os.path.join(BASE_DIR, 'detector', 'static', 'chars.pt'))
|
|
|
42 |
|
43 |
|
44 |
def detect_chars(img):
|
45 |
+
img = cv2.resize(img, (640, 320))
|
46 |
detect = model_chars(img)
|
47 |
records = detect.pandas().xyxy[0].to_dict(orient='records')
|
48 |
yolo = np.squeeze(detect.render())
|
|
|
50 |
if records:
|
51 |
records = sorted(records, key=lambda d: d['xmin'])
|
52 |
text = ''.join([i.get('name') for i in records])
|
53 |
+
return text, yolo
|
54 |
|
55 |
|
56 |
def save_plates(img):
|
57 |
detect = model_plates(img)
|
58 |
detect.crop(save=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|