Noah Vriese commited on
Commit
033cb94
·
0 Parent(s):

Initial commit with Git LFS tracking

Browse files
.gitattributes ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *.png filter=lfs diff=lfs merge=lfs -text
2
+ *.onnx filter=lfs diff=lfs merge=lfs -text
3
+ models/*.onnx filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .DS_Store
2
+ examples/*.png
3
+ models/*.onnx
__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from data_objects import (
2
+ YOLOXDetector,
3
+ Detection,
4
+ ObjectDetectionConfig,
5
+ )
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import onnxruntime as ort
4
+
5
+ from data_objects import (
6
+ YOLOXDetector,
7
+ Detection,
8
+ ObjectDetectionConfig,
9
+ )
10
+
11
+ # Model configs
12
+ object_detection_config = ObjectDetectionConfig()
13
+
14
+ # Load object detector
15
+ sess_options = ort.SessionOptions()
16
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
17
+ object_detector = YOLOXDetector(
18
+ model_path=object_detection_config.object_detection_model_path,
19
+ input_shape=object_detection_config.input_shape,
20
+ confidence_threshold=object_detection_config.confidence_threshold,
21
+ providers=["CoreMLExecutionProvider", "CPUExecutionProvider"],
22
+ sess_options=sess_options,
23
+ )
24
+
25
+ def predict(input_img):
26
+
27
+ final_boxes, final_scores, final_cls = object_detector.predict(input_img)
28
+
29
+ detected_objects = [
30
+ Detection(
31
+ points=bbox,
32
+ score=score,
33
+ class_id=class_id,
34
+ color=object_detection_config.color_map.get(class_id),
35
+ display_name=object_detection_config.display_map.get(class_id),
36
+ centroid_thickness=-1,
37
+ centroid_radius=5
38
+ )
39
+ for class_id in list(object_detection_config.class_map.keys())
40
+ for bbox, score in zip(final_boxes[final_cls == class_id], final_scores[final_cls == class_id])
41
+ ]
42
+
43
+ for obj in detected_objects:
44
+ input_img = obj.draw(
45
+ image=input_img,
46
+ draw_boxes=True,
47
+ draw_centroids=True,
48
+ draw_text=True,
49
+ draw_projections=False,
50
+ box_display_type="minimal",
51
+ fill_text_background=False,
52
+ box_line_thickness=4,
53
+ box_corner_length=15,
54
+ text_scale=0.6,
55
+ obfuscate_classes=[],
56
+ )
57
+
58
+ return input_img, {obj.display_name: obj.score for obj in detected_objects}
59
+
60
+ example_images = [
61
+ os.path.join("./examples", img) for img in os.listdir("./examples") if img.lower().endswith(('png', 'jpg', 'jpeg'))
62
+ ]
63
+
64
+ gradio_app = gr.Interface(
65
+ predict,
66
+ inputs=gr.Image(label="Select image to process", sources=['upload', 'webcam'], type="numpy"),
67
+ outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
68
+ title="License Plate Detection",
69
+ examples=example_images,
70
+ )
71
+
72
+ if __name__ == "__main__":
73
+ gradio_app.launch()
data_objects.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import onnxruntime as ort
4
+ from typing import List, Tuple, Union, Literal, Dict
5
+ from pydantic import BaseModel
6
+
7
+ # Configuration for YOLOX model, set path to model / class - name mappings here!
8
+ class ObjectDetectionConfig(BaseModel):
9
+ """Configuration for trained YOLOX object detection model."""
10
+
11
+ # Model path & hyperparameters
12
+ object_detection_model_path: str = "./models/yolox_custom-plates-2cls-0.1.onnx"
13
+ confidence_threshold: float = 0.50
14
+ nms_threshold: float = 0.65
15
+ input_shape: Tuple[int] = (640, 640)
16
+
17
+ # Class specific inputs
18
+ class_map: Dict = {0: 'license-plates', 1: 'License_Plate'}
19
+ display_map: Dict = {0: 'license-plates', 1: 'License_Plate'}
20
+ color_map: Dict = {0: (186, 223, 255), 1: (100, 255, 255)}
21
+
22
+ class Detection:
23
+ def __init__(
24
+ self,
25
+ points: np.ndarray,
26
+ class_id: Union[int, None] = None,
27
+ score: Union[float, None] = 0.0,
28
+ color: Tuple[int, int, int] = (100, 255, 255),
29
+ display_name: str = "Box",
30
+ centroid_radius: int = 5,
31
+ centroid_thickness: int = -1
32
+ ):
33
+ """
34
+ Represents an object detection in the scene.
35
+ Stores bounding box, class_id, and other attributes for tracking and visualization.
36
+ """
37
+ self.points_xyxy = points
38
+ self.class_id = class_id
39
+ self.score = score
40
+ self.color_bbox = color
41
+ self.color_centroid = color
42
+ self.radius_centroid = centroid_radius
43
+ self.thickness_centroid = centroid_thickness
44
+ self.centroid_location: str = "center"
45
+ self.display_name: str = display_name
46
+ self.track_id: int = None
47
+ self.id: int = None
48
+ self.active: bool = False
49
+ self.status: str = ""
50
+
51
+ def __repr__(self) -> str:
52
+ return f"Detection({str(self.display_name)})"
53
+
54
+ @property
55
+ def bbox_xyxy(self) -> np.ndarray:
56
+ return self.points_xyxy
57
+
58
+ @property
59
+ def size(self) -> float:
60
+ """Return the bounding box area in pixels."""
61
+ x1, y1, x2, y2 = self.points_xyxy
62
+ return (x2 - x1) * (y2 - y1)
63
+
64
+ def bbox_image(self, image: np.ndarray, buffer: int = 0) -> np.ndarray:
65
+ """Extract the image patch corresponding to this detection"s bounding box."""
66
+ x1, y1, x2, y2 = self.points_xyxy
67
+ height, width = image.shape[:2]
68
+ x1 = max(0, int(x1 - buffer))
69
+ y1 = max(0, int(y1 - buffer))
70
+ x2 = min(width, int(x2 + buffer))
71
+ y2 = min(height, int(y2 + buffer))
72
+ return image[y1:y2, x1:x2]
73
+
74
+ def centroid(self, location: str = None) -> np.ndarray:
75
+ """Get the centroid of the bounding box based on the chosen centroid location."""
76
+ if location is None:
77
+ location = self.centroid_location
78
+ x1, y1, x2, y2 = self.points_xyxy
79
+ if location == "center":
80
+ centroid_loc = [(x1 + x2) / 2, (y1 + y2) / 2]
81
+ elif location == "top":
82
+ centroid_loc = [(x1 + x2) / 2, y1]
83
+ elif location == "bottom":
84
+ centroid_loc = [(x1 + x2) / 2, y2]
85
+ elif location == "left":
86
+ centroid_loc = [x1, (y1 + y2) / 2]
87
+ elif location == "right":
88
+ centroid_loc = [x2, (y1 + y2) / 2]
89
+ elif location == "upper-left":
90
+ centroid_loc = [x1, y1]
91
+ elif location == "upper-right":
92
+ centroid_loc = [x2, y1]
93
+ elif location == "bottom-left":
94
+ centroid_loc = [x1, y2]
95
+ elif location == "bottom-right":
96
+ centroid_loc = [x2, y2]
97
+ else:
98
+ raise ValueError("Unsupported location type.")
99
+ return np.array([centroid_loc], dtype=np.float32)
100
+
101
+ def draw(
102
+ self,
103
+ image: np.ndarray,
104
+ draw_boxes: bool = True,
105
+ draw_centroids: bool = True,
106
+ draw_text: bool = True,
107
+ draw_projections: bool = False,
108
+ fill_text_background: bool = False,
109
+ box_display_type: Literal["minimal", "standard"] = "standard",
110
+ box_line_thickness: int = 2,
111
+ box_corner_length: int = 20,
112
+ obfuscate_classes: List[int] = [],
113
+ centroid_color: Union[Tuple[int, int, int], None] = None,
114
+ centroid_radius: Union[int, None] = None,
115
+ centroid_thickness: Union[int, None] = None,
116
+ text_position_xy: Tuple[int] = (25, 25),
117
+ text_scale: float = 0.8,
118
+ text_thickness: int = 2,
119
+ ) -> np.ndarray:
120
+ """Draw bounding boxes and centroids for the detection.
121
+
122
+ If fill_text_background is True, the text placed near the centroid is drawn over a blurred
123
+ background extracted from the image. Extra padding is added so the background box is taller.
124
+ """
125
+ image_processed = image.copy()
126
+
127
+ if draw_boxes:
128
+ object_bbox: np.ndarray = self.bbox_xyxy
129
+ bbox_color: Tuple[int, int, int] = self.color_bbox if self.color_bbox is not None else (100, 255, 255)
130
+ if object_bbox is not None:
131
+
132
+ x0 = int(object_bbox[0])
133
+ y0 = int(object_bbox[1])
134
+ x1 = int(object_bbox[2])
135
+ y1 = int(object_bbox[3])
136
+
137
+ if self.class_id in obfuscate_classes:
138
+ roi = image_processed[y0:y1, x0:x1]
139
+ if roi.size > 0:
140
+ image_processed[y0:y1, x0:x1] = cv2.GaussianBlur(roi, (61, 61), 0)
141
+
142
+ if box_display_type.strip().lower() == "minimal":
143
+ box_corner_length = int(
144
+ min(box_corner_length, (x1 - x0) / 2, (y1 - y0) / 2)
145
+ )
146
+ cv2.line(image_processed, (x0, y0), (x0 + box_corner_length, y0), color=bbox_color, thickness=box_line_thickness)
147
+ cv2.line(image_processed, (x0, y0), (x0, y0 + box_corner_length), color=bbox_color, thickness=box_line_thickness)
148
+ cv2.line(image_processed, (x1, y0), (x1 - box_corner_length, y0), color=bbox_color, thickness=box_line_thickness)
149
+ cv2.line(image_processed, (x1, y0), (x1, y0 + box_corner_length), color=bbox_color, thickness=box_line_thickness)
150
+ cv2.line(image_processed, (x0, y1), (x0 + box_corner_length, y1), color=bbox_color, thickness=box_line_thickness)
151
+ cv2.line(image_processed, (x0, y1), (x0, y1 - box_corner_length), color=bbox_color, thickness=box_line_thickness)
152
+ cv2.line(image_processed, (x1, y1), (x1 - box_corner_length, y1), color=bbox_color, thickness=box_line_thickness)
153
+ cv2.line(image_processed, (x1, y1), (x1, y1 - box_corner_length), color=bbox_color, thickness=box_line_thickness)
154
+
155
+ elif box_display_type.strip().lower() == "standard":
156
+ cv2.rectangle(
157
+ image_processed,
158
+ (x0, y0),
159
+ (x1, y1),
160
+ color=bbox_color,
161
+ thickness=box_line_thickness
162
+ )
163
+
164
+ if draw_projections:
165
+
166
+ projection_start_centroid: np.ndarray = self.centroid(location="bottom")[0]
167
+ if self.velocity is not None:
168
+ projection_end_centroid: np.array = np.array([self.centroid(location="bottom")[0] + self.velocity])[0]
169
+ else:
170
+ projection_end_centroid = projection_start_centroid
171
+ projection_start_coords: Tuple[int, int] = (int(projection_start_centroid[0]), int(projection_start_centroid[1]))
172
+ projection_end_coords: Tuple[int, int] = (int(projection_end_centroid[0]), int(projection_end_centroid[1]))
173
+
174
+ cv2.arrowedLine(
175
+ image_processed,
176
+ projection_start_coords,
177
+ projection_end_coords,
178
+ color=(100, 255, 255),
179
+ thickness=3,
180
+ tipLength=0.2
181
+ )
182
+
183
+ if draw_centroids:
184
+ centroid: np.ndarray = self.centroid()[0]
185
+ centroid_coords: Tuple[int, int] = (int(centroid[0]), int(centroid[1]))
186
+
187
+ if centroid_color is None:
188
+ centroid_color = self.color_centroid
189
+ if centroid_radius is None:
190
+ centroid_radius = self.radius_centroid
191
+ if centroid_thickness is None:
192
+ centroid_thickness = self.thickness_centroid
193
+
194
+ cv2.circle(
195
+ image_processed,
196
+ centroid_coords,
197
+ centroid_radius,
198
+ centroid_color,
199
+ centroid_thickness,
200
+ lineType=cv2.LINE_AA
201
+ )
202
+
203
+ if draw_text:
204
+
205
+ display_text: str = str(self.display_name).capitalize()
206
+ text_position: Tuple[int, int] = (
207
+ centroid_coords[0] + text_position_xy[0],
208
+ centroid_coords[1] + text_position_xy[1]
209
+ )
210
+
211
+ if hasattr(self, "status") and self.status:
212
+ display_text += f" ({self.status})"
213
+ if self.status == "Waiting":
214
+ display_text += f" ({int(self.queue_time_duration)}s)"
215
+
216
+ if fill_text_background:
217
+ font = cv2.FONT_HERSHEY_SIMPLEX
218
+ (text_width, text_height), baseline = cv2.getTextSize(display_text, font, text_scale, text_thickness)
219
+ pad_x = 0
220
+ pad_y = 10
221
+ # Calculate rectangle coordinates
222
+ rect_x1 = text_position[0] - pad_x
223
+ rect_y1 = text_position[1] - text_height - pad_y
224
+ rect_x2 = text_position[0] + text_width + pad_x
225
+ rect_y2 = text_position[1] + baseline + pad_y
226
+ # Ensure coordinates are within image boundaries
227
+ rect_x1 = max(0, rect_x1)
228
+ rect_y1 = max(0, rect_y1)
229
+ rect_x2 = min(image_processed.shape[1], rect_x2)
230
+ rect_y2 = min(image_processed.shape[0], rect_y2)
231
+ # Extract the region of interest and apply a Gaussian blur
232
+ roi = image_processed[rect_y1:rect_y2, rect_x1:rect_x2]
233
+ if roi.size > 0:
234
+ image_processed[rect_y1:rect_y2, rect_x1:rect_x2] = cv2.GaussianBlur(roi, (31, 31), 0)
235
+
236
+ cv2.putText(
237
+ image_processed,
238
+ display_text,
239
+ text_position,
240
+ fontFace=cv2.FONT_HERSHEY_SIMPLEX,
241
+ fontScale=text_scale,
242
+ color=centroid_color,
243
+ thickness=text_thickness,
244
+ lineType=cv2.LINE_AA
245
+ )
246
+
247
+ return image_processed
248
+
249
+ class YOLOXDetector:
250
+ def __init__(
251
+ self,
252
+ model_path: str,
253
+ input_shape: Tuple[int] = (640, 640),
254
+ confidence_threshold: float = 0.6,
255
+ nms_threshold: float = 0.65,
256
+ providers: List[str] = ["CoreMLExecutionProvider", "CUDAExecutionProvider", "CPUExecutionProvider"],
257
+ sess_options=ort.SessionOptions(),
258
+ ):
259
+ self.model_path: str = model_path
260
+ self.dims: Tuple[int] = input_shape
261
+ self.ratio: float = 1.0
262
+ self.confidence_threshold: float = confidence_threshold
263
+ self.nms_threshold: float = nms_threshold
264
+ self.classes: List[str] = ["license-plates", "License_Plate"]
265
+ self.categories: List[str] = ["DEFAULT" for _ in range(len(self.classes))]
266
+ self.providers: List[str] = providers
267
+ self.session = ort.InferenceSession(
268
+ self.model_path,
269
+ providers=self.providers,
270
+ sess_options=sess_options,
271
+ )
272
+
273
+ def nms(self, boxes, scores, nms_thr):
274
+ """Single class NMS implemented in Numpy."""
275
+ x1 = boxes[:, 0]
276
+ y1 = boxes[:, 1]
277
+ x2 = boxes[:, 2]
278
+ y2 = boxes[:, 3]
279
+
280
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
281
+ order = scores.argsort()[::-1]
282
+
283
+ keep = []
284
+ while order.size > 0:
285
+ i = order[0]
286
+ keep.append(i)
287
+ xx1 = np.maximum(x1[i], x1[order[1:]])
288
+ yy1 = np.maximum(y1[i], y1[order[1:]])
289
+ xx2 = np.minimum(x2[i], x2[order[1:]])
290
+ yy2 = np.minimum(y2[i], y2[order[1:]])
291
+
292
+ w = np.maximum(0.0, xx2 - xx1 + 1)
293
+ h = np.maximum(0.0, yy2 - yy1 + 1)
294
+ inter = w * h
295
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
296
+
297
+ inds = np.where(ovr <= nms_thr)[0]
298
+ order = order[inds + 1]
299
+
300
+ return keep
301
+
302
+ def multiclass_nms_class_aware(self, boxes, scores, nms_thr, score_thr):
303
+ """Multiclass NMS implemented in Numpy. Class-aware version."""
304
+ final_dets = []
305
+ num_classes = scores.shape[1]
306
+ for cls_ind in range(num_classes):
307
+ cls_scores = scores[:, cls_ind]
308
+ valid_score_mask = cls_scores > score_thr
309
+ if valid_score_mask.sum() == 0:
310
+ continue
311
+ else:
312
+ valid_scores = cls_scores[valid_score_mask]
313
+ valid_boxes = boxes[valid_score_mask]
314
+ keep = self.nms(valid_boxes, valid_scores, nms_thr)
315
+ if len(keep) > 0:
316
+ cls_inds = np.ones((len(keep), 1)) * cls_ind
317
+ dets = np.concatenate(
318
+ [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1
319
+ )
320
+ final_dets.append(dets)
321
+ if len(final_dets) == 0:
322
+ return None
323
+ return np.concatenate(final_dets, 0)
324
+
325
+
326
+ def multiclass_nms_class_agnostic(self, boxes, scores, nms_thr, score_thr):
327
+ """Multiclass NMS implemented in Numpy. Class-agnostic version."""
328
+ cls_inds = scores.argmax(1)
329
+ cls_scores = scores[np.arange(len(cls_inds)), cls_inds]
330
+
331
+ valid_score_mask = cls_scores > score_thr
332
+ if valid_score_mask.sum() == 0:
333
+ return None
334
+ valid_scores = cls_scores[valid_score_mask]
335
+ valid_boxes = boxes[valid_score_mask]
336
+ valid_cls_inds = cls_inds[valid_score_mask]
337
+ keep = self.nms(valid_boxes, valid_scores, nms_thr)
338
+ if keep:
339
+ dets = np.concatenate(
340
+ [valid_boxes[keep], valid_scores[keep, None], valid_cls_inds[keep, None]], 1
341
+ )
342
+ return dets
343
+
344
+ def multiclass_nms(self, boxes, scores, nms_thr, score_thr, class_agnostic=False):
345
+ """Multiclass NMS implemented in Numpy"""
346
+ if class_agnostic:
347
+ return self.multiclass_nms_class_agnostic(boxes, scores, nms_thr, score_thr)
348
+ else:
349
+ return self.multiclass_nms_class_aware(boxes, scores, nms_thr, score_thr)
350
+
351
+ def preprocess(self, image: np.ndarray, bgr2rgb: bool = False):
352
+ """Preprocess image for YOLOX model."""
353
+ if len(image.shape) == 3:
354
+ padded_image = np.ones((self.dims[0], self.dims[1], 3), dtype=np.uint8) * 114
355
+ else:
356
+ padded_image = np.ones(self.dims, dtype=np.uint8) * 114
357
+
358
+ if bgr2rgb:
359
+ padded_image = cv2.cvtColor(padded_image, cv2.COLOR_BGR2RGB)
360
+
361
+ self.ratio = min(self.dims[0] / image.shape[0], self.dims[1] / image.shape[1])
362
+ resized_image = cv2.resize(
363
+ image,
364
+ (int(image.shape[1] * self.ratio), int(image.shape[0] * self.ratio)),
365
+ interpolation=cv2.INTER_LINEAR,
366
+ ).astype(np.uint8)
367
+ padded_image[: int(image.shape[0] * self.ratio), : int(image.shape[1] * self.ratio)] = resized_image
368
+
369
+ padded_image = padded_image.transpose((2, 0, 1))
370
+ padded_image = np.ascontiguousarray(padded_image, dtype=np.float32)
371
+ return padded_image
372
+
373
+ def postprocess(self, outputs, p64=False):
374
+ """Post-process YOLOX model outputs into usable bounding boxes and scores."""
375
+ grids = []
376
+ expanded_strides = []
377
+ strides = [8, 16, 32] if not p64 else [8, 16, 32, 64]
378
+
379
+ hsizes = [self.dims[0] // stride for stride in strides]
380
+ wsizes = [self.dims[1] // stride for stride in strides]
381
+
382
+ for hsize, wsize, stride in zip(hsizes, wsizes, strides):
383
+ xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
384
+ grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
385
+ grids.append(grid)
386
+ shape = grid.shape[:2]
387
+ expanded_strides.append(np.full((*shape, 1), stride))
388
+
389
+ grids = np.concatenate(grids, 1)
390
+ expanded_strides = np.concatenate(expanded_strides, 1)
391
+ outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides
392
+ outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides
393
+
394
+ outputs = outputs[0]
395
+
396
+ boxes = outputs[:, :4]
397
+ scores = outputs[:, 4:5] * outputs[:, 5:]
398
+
399
+ boxes_xyxy = np.ones_like(boxes)
400
+ boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
401
+ boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
402
+ boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
403
+ boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
404
+ boxes_xyxy /= self.ratio
405
+ return boxes_xyxy, scores
406
+
407
+ def predict(self, image: np.ndarray):
408
+ """Run YOLOX detector on an image and return detected bounding boxes and scores."""
409
+ image = self.preprocess(image=image)
410
+ onnx_pred = self.session.run(None, {self.session.get_inputs()[0].name: np.expand_dims(image, axis=0)})[0]
411
+ boxes_xyxy, scores = self.postprocess(onnx_pred)
412
+ detections = self.multiclass_nms(
413
+ boxes=boxes_xyxy,
414
+ scores=scores,
415
+ nms_thr=self.nms_threshold,
416
+ score_thr=self.confidence_threshold,
417
+ class_agnostic=False if len(self.classes) > 1 else True
418
+ )
419
+ if detections is not None and len(detections) > 0:
420
+ final_boxes, final_scores, final_cls_inds = detections[:, :4], detections[:, 4], detections[:, 5]
421
+ else:
422
+ final_boxes, final_scores, final_cls_inds = np.empty((0, 4)), np.empty((0,)), np.empty((0,))
423
+ return final_boxes, final_scores, final_cls_inds
examples/Example_1.png ADDED

Git LFS Details

  • SHA256: ff7ba017483e3a3a5ec482108a75e4768ad9c39f447c8986f36e556687b9129a
  • Pointer size: 131 Bytes
  • Size of remote file: 460 kB
examples/Example_2.png ADDED

Git LFS Details

  • SHA256: 41576a9e4d92d96b10a99a0e06af51b2ab75d4f483916d851c6111de45d97f4f
  • Pointer size: 131 Bytes
  • Size of remote file: 395 kB
examples/Example_3.png ADDED

Git LFS Details

  • SHA256: 597d304c6d9d1065f4550383737d2b8861fbd660cc92913b51cf755b09d53e43
  • Pointer size: 131 Bytes
  • Size of remote file: 630 kB
examples/Example_4.png ADDED

Git LFS Details

  • SHA256: 52c778a4f6228586246dfa0b5c51bddc652afa8f844eb861240bc83744dde9e9
  • Pointer size: 131 Bytes
  • Size of remote file: 527 kB
models/yolox_custom-plates-2cls-0.1.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dcc44a86db71226aff4553663a38bf03170a2b1b59ce170788544f180409e2b
3
+ size 102790596