Spaces:
Runtime error
Runtime error
import torch | |
import numpy as np | |
import cv2 | |
import os | |
BASE_DIR = os.path.abspath(os.getcwd()) | |
model_plates = torch.hub.load('ultralytics/yolov5', 'custom', | |
path=os.path.join(BASE_DIR, 'detector', 'static', 'plates.pt')) | |
# model.conf = 0.60 # NMS confidence threshold | |
# model.iou = 0.60 # NMS IoU threshold | |
# model.agnostic = False # NMS class-agnostic | |
# model.multi_label = False # NMS multiple labels per box | |
# model.classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs | |
# model.max_det = 1 # maximum number of detections per image | |
# model.amp = False # Automatic Mixed Precision (AMP) inference | |
model_chars = torch.hub.load('ultralytics/yolov5', 'custom', | |
path=os.path.join(BASE_DIR, 'detector', 'static', 'chars.pt')) | |
def pad_img_to_fit_bbox(img, x1, x2, y1, y2): | |
img = np.pad(img, ((np.abs(np.minimum(0, y1)), np.maximum(y2 - img.shape[0], 0)), | |
(np.abs(np.minimum(0, x1)), np.maximum(x2 - img.shape[1], 0)), (0, 0)), mode="constant") | |
y1 += np.abs(np.minimum(0, y1)) | |
y2 += np.abs(np.minimum(0, y1)) | |
x1 += np.abs(np.minimum(0, x1)) | |
x2 += np.abs(np.minimum(0, x1)) | |
return img, x1, x2, y1, y2 | |
def imcrop(img, bbox): | |
x1, y1, x2, y2 = bbox | |
if x1 < 0 or y1 < 0 or x2 > img.shape[1] or y2 > img.shape[0]: | |
img, x1, x2, y1, y2 = pad_img_to_fit_bbox(img, x1, x2, y1, y2) | |
return img[y1:y2, x1:x2, :] | |
def detect_plates(img): | |
detect = model_plates(img) | |
records = detect.pandas().xyxy[0].to_dict(orient='records') | |
plates = [] | |
if records: | |
for plate in records: | |
xi, yi, xf, yf = int(plate['xmin']), int(plate['ymin']), int(plate['xmax']), int(plate['ymax']) | |
crop = imcrop(img, (xi, yi, xf, yf)) | |
plates.append(((xi, yi), (xf, yf), crop)) | |
return plates | |
def detect_chars(img): | |
img = cv2.resize(img, (640, 480)) | |
detect = model_chars(img) | |
records = detect.pandas().xyxy[0].to_dict(orient='records') | |
yolo = np.squeeze(detect.render()) | |
text = '' | |
if records: | |
records = sorted(records, key=lambda d: d['xmin']) | |
text = ''.join([i.get('name') for i in records]) | |
return text, img | |
def save_plates(img): | |
detect = model_plates(img) | |
detect.crop(save=True) | |
# def yolo_detections_to_norfair_detections(yolo_detections, track_points="centroid"): | |
# """convert detections_as_xywh to norfair detections""" | |
# norfair_detections = [] | |
# | |
# if track_points == "centroid": | |
# detections_as_xywh = yolo_detections.xywh[0] | |
# for detection_as_xywh in detections_as_xywh: | |
# centroid = np.array( | |
# [detection_as_xywh[0].item(), detection_as_xywh[1].item()] | |
# ) | |
# scores = np.array([detection_as_xywh[4].item()]) | |
# norfair_detections.append( | |
# Detection( | |
# points=centroid, | |
# scores=scores, | |
# label=int(detection_as_xywh[-1].item()), | |
# ) | |
# ) | |
# elif track_points == "bbox": | |
# detections_as_xyxy = yolo_detections.xyxy[0] | |
# for detection_as_xyxy in detections_as_xyxy: | |
# bbox = np.array( | |
# [ | |
# [detection_as_xyxy[0].item(), detection_as_xyxy[1].item()], | |
# [detection_as_xyxy[2].item(), detection_as_xyxy[3].item()], | |
# ] | |
# ) | |
# scores = np.array( | |
# [detection_as_xyxy[4].item(), detection_as_xyxy[4].item()] | |
# ) | |
# norfair_detections.append( | |
# Detection( | |
# points=bbox, scores=scores, label=int(detection_as_xyxy[-1].item()) | |
# ) | |
# ) | |
# | |
# return norfair_detections | |