ytfeng commited on
Commit
6eef315
·
1 Parent(s): e5b568e

Add LPD_YuNet for License Plate Detection (#56)

Browse files

* add lpd_yunet

* add quantization and quantized model

* quantize with inc instead

* update benchmark results

README.md CHANGED
@@ -18,6 +18,7 @@ Guidelines:
18
  |-------|------------|----------------|--------------|-----------------|--------------|-------------|
19
  | [YuNet](./models/face_detection_yunet) | 160x120 | 1.45 | 6.22 | 12.18 | 4.04 | 86.69 |
20
  | [SFace](./models/face_recognition_sface) | 112x112 | 8.65 | 99.20 | 24.88 | 46.25 | --- |
 
21
  | [DB-IC15](./models/text_detection_db) | 640x480 | 142.91 | 2835.91 | 208.41 | --- | --- |
22
  | [DB-TD500](./models/text_detection_db) | 640x480 | 142.91 | 2841.71 | 210.51 | --- | --- |
23
  | [CRNN-EN](./models/text_recognition_crnn) | 100x32 | 50.21 | 234.32 | 196.15 | 125.30 | --- |
 
18
  |-------|------------|----------------|--------------|-----------------|--------------|-------------|
19
  | [YuNet](./models/face_detection_yunet) | 160x120 | 1.45 | 6.22 | 12.18 | 4.04 | 86.69 |
20
  | [SFace](./models/face_recognition_sface) | 112x112 | 8.65 | 99.20 | 24.88 | 46.25 | --- |
21
+ | [LPD-YuNet](./models/license_plate_detection_yunet/) | 320x240 | --- | 168.03 | 56.12 | 154.20\* | |
22
  | [DB-IC15](./models/text_detection_db) | 640x480 | 142.91 | 2835.91 | 208.41 | --- | --- |
23
  | [DB-TD500](./models/text_detection_db) | 640x480 | 142.91 | 2841.71 | 210.51 | --- | --- |
24
  | [CRNN-EN](./models/text_recognition_crnn) | 100x32 | 50.21 | 234.32 | 196.15 | 125.30 | --- |
benchmark/config/license_plate_detection_yunet.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Benchmark:
2
+ name: "License Plate Detection Benchmark"
3
+ type: "Detection"
4
+ data:
5
+ path: "benchmark/data/license_plate_detection"
6
+ files: ["1.jpg", "2.jpg", "3.jpg", "4.jpg"]
7
+ sizes: # [[w1, h1], ...], Omit to run at original scale
8
+ - [320, 240]
9
+ metric:
10
+ warmup: 30
11
+ repeat: 10
12
+ reduction: "median"
13
+ backend: "default"
14
+ target: "cpu"
15
+
16
+ Model:
17
+ name: "LPD_YuNet"
18
+ modelPath: "models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2022may.onnx"
19
+ confThreshold: 0.8
20
+ nmsThreshold: 0.3
21
+ topK: 5000
22
+ keepTopK: 750
benchmark/download_data.py CHANGED
@@ -201,6 +201,10 @@ data_downloaders = dict(
201
  url='https://drive.google.com/u/0/uc?id=1qScOzehV8OIzJJLuD_LMvZq15YcWd_VV&export=download',
202
  sha='c0d4f811d38c6f833364b9196a719307598213a1',
203
  filename='palm_detection.zip'),
 
 
 
 
204
  )
205
 
206
  if __name__ == '__main__':
 
201
  url='https://drive.google.com/u/0/uc?id=1qScOzehV8OIzJJLuD_LMvZq15YcWd_VV&export=download',
202
  sha='c0d4f811d38c6f833364b9196a719307598213a1',
203
  filename='palm_detection.zip'),
204
+ license_plate_detection=Downloader(name='license_plate_detection',
205
+ url='https://drive.google.com/u/0/uc?id=1cf9MEyUqMMy8lLeDGd1any6tM_SsSmny&export=download',
206
+ sha='997acb143ddc4531e6e41365fb7ad4722064564c',
207
+ filename='license_plate_detection.zip'),
208
  )
209
 
210
  if __name__ == '__main__':
models/__init__.py CHANGED
@@ -10,6 +10,7 @@ from .person_reid_youtureid.youtureid import YoutuReID
10
  from .image_classification_mobilenet.mobilenet_v1 import MobileNetV1
11
  from .image_classification_mobilenet.mobilenet_v2 import MobileNetV2
12
  from .palm_detection_mediapipe.mp_palmdet import MPPalmDet
 
13
 
14
  class Registery:
15
  def __init__(self, name):
@@ -35,4 +36,4 @@ MODELS.register(YoutuReID)
35
  MODELS.register(MobileNetV1)
36
  MODELS.register(MobileNetV2)
37
  MODELS.register(MPPalmDet)
38
-
 
10
  from .image_classification_mobilenet.mobilenet_v1 import MobileNetV1
11
  from .image_classification_mobilenet.mobilenet_v2 import MobileNetV2
12
  from .palm_detection_mediapipe.mp_palmdet import MPPalmDet
13
+ from .license_plate_detection_yunet.lpd_yunet import LPD_YuNet
14
 
15
  class Registery:
16
  def __init__(self, name):
 
36
  MODELS.register(MobileNetV1)
37
  MODELS.register(MobileNetV2)
38
  MODELS.register(MPPalmDet)
39
+ MODELS.register(LPD_YuNet)
models/license_plate_detection_yunet/LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright (c) 2022 WATRIX
191
+ Author: Dong Xu
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
models/license_plate_detection_yunet/README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # License Plate Detection with YuNet
2
+
3
+ This model is contributed by Dong Xu (徐栋) from [watrix.ai](watrix.ai) (银河水滴).
4
+
5
+ Please note that the model is trained with Chinese license plates, so the detection results of other license plates with this model may be limited.
6
+
7
+ ## Demo
8
+
9
+ Run the following command to try the demo:
10
+ ```shell
11
+ # detect on camera input
12
+ python demo.py
13
+ # detect on an image
14
+ python demo.py --input /path/to/image
15
+ ```
16
+
17
+ ## License
18
+ All files in this directory are licensed under [Apache 2.0 License](./LICENSE)
19
+
20
+ ## Reference
21
+
22
+ - https://github.com/ShiqiYu/libfacedetection.train
models/license_plate_detection_yunet/demo.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import numpy as np
4
+ import cv2 as cv
5
+
6
+ from lpd_yunet import LPD_YuNet
7
+
8
+ def str2bool(v):
9
+ if v.lower() in ['on', 'yes', 'true', 'y', 't']:
10
+ return True
11
+ elif v.lower() in ['off', 'no', 'false', 'n', 'f']:
12
+ return False
13
+ else:
14
+ raise NotImplementedError
15
+
16
+ backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]
17
+ targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]
18
+ help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
19
+ help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
20
+ try:
21
+ backends += [cv.dnn.DNN_BACKEND_TIMVX]
22
+ targets += [cv.dnn.DNN_TARGET_NPU]
23
+ help_msg_backends += "; {:d}: TIMVX"
24
+ help_msg_targets += "; {:d}: NPU"
25
+ except:
26
+ print('This version of OpenCV does not support TIM-VX and NPU. Visit https://gist.github.com/fengyuentau/5a7a5ba36328f2b763aea026c43fa45f for more information.')
27
+
28
+ parser = argparse.ArgumentParser(description='LPD-YuNet for License Plate Detection')
29
+ parser.add_argument('--input', '-i', type=str, help='Path to the input image. Omit for using default camera.')
30
+ parser.add_argument('--model', '-m', type=str, default='license_plate_detection_lpd_yunet_2022may.onnx', help='Path to the model.')
31
+ parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
32
+ parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
33
+ parser.add_argument('--conf_threshold', type=float, default=0.9, help='Filter out faces of confidence < conf_threshold.')
34
+ parser.add_argument('--nms_threshold', type=float, default=0.3, help='Suppress bounding boxes of iou >= nms_threshold.')
35
+ parser.add_argument('--top_k', type=int, default=5000, help='Keep top_k bounding boxes before NMS.')
36
+ parser.add_argument('--keep_top_k', type=int, default=750, help='Keep keep_top_k bounding boxes after NMS.')
37
+ parser.add_argument('--save', '-s', type=str2bool, default=False, help='Set true to save results. This flag is invalid when using camera.')
38
+ parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Set true to open a window for result visualization. This flag is invalid when using camera.')
39
+ args = parser.parse_args()
40
+
41
+ def visualize(image, dets, line_color=(0, 255, 0), text_color=(0, 0, 255), fps=None):
42
+ output = image.copy()
43
+
44
+ if fps is not None:
45
+ cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
46
+
47
+ for det in dets:
48
+ bbox = det[:-1].astype(np.int32)
49
+ x1, y1, x2, y2, x3, y3, x4, y4 = bbox
50
+
51
+ # Draw the border of license plate
52
+ cv.line(output, (x1, y1), (x2, y2), line_color, 2)
53
+ cv.line(output, (x2, y2), (x3, y3), line_color, 2)
54
+ cv.line(output, (x3, y3), (x4, y4), line_color, 2)
55
+ cv.line(output, (x4, y4), (x1, y1), line_color, 2)
56
+
57
+ return output
58
+
59
+ if __name__ == '__main__':
60
+ # Instantiate LPD-YuNet
61
+ model = LPD_YuNet(modelPath=args.model,
62
+ confThreshold=args.conf_threshold,
63
+ nmsThreshold=args.nms_threshold,
64
+ topK=args.top_k,
65
+ keepTopK=args.keep_top_k,
66
+ backendId=args.backend,
67
+ targetId=args.target)
68
+
69
+ # If input is an image
70
+ if args.input is not None:
71
+ image = cv.imread(args.input)
72
+ h, w, _ = image.shape
73
+
74
+ # Inference
75
+ model.setInputSize([w, h])
76
+ results = model.infer(image)
77
+
78
+ # Print results
79
+ print('{} license plates detected.'.format(results.shape[0]))
80
+
81
+ # Draw results on the input image
82
+ image = visualize(image, results)
83
+
84
+ # Save results if save is true
85
+ if args.save:
86
+ print('Resutls saved to result.jpg')
87
+ cv.imwrite('result.jpg', image)
88
+
89
+ # Visualize results in a new window
90
+ if args.vis:
91
+ cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)
92
+ cv.imshow(args.input, image)
93
+ cv.waitKey(0)
94
+ else: # Omit input to call default camera
95
+ deviceId = 0
96
+ cap = cv.VideoCapture(deviceId)
97
+ w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
98
+ h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
99
+ model.setInputSize([w, h])
100
+
101
+ tm = cv.TickMeter()
102
+ while cv.waitKey(1) < 0:
103
+ hasFrame, frame = cap.read()
104
+ if not hasFrame:
105
+ print('No frames grabbed!')
106
+ break
107
+
108
+ # Inference
109
+ tm.start()
110
+ results = model.infer(frame) # results is a tuple
111
+ tm.stop()
112
+
113
+ # Draw results on the input image
114
+ frame = visualize(frame, results, fps=tm.getFPS())
115
+
116
+ # Visualize results in a new Window
117
+ cv.imshow('LPD-YuNet Demo', frame)
118
+
119
+ tm.reset()
120
+
models/license_plate_detection_yunet/lpd_yunet.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+
3
+ import numpy as np
4
+ import cv2 as cv
5
+
6
+ class LPD_YuNet:
7
+ def __init__(self, modelPath, inputSize=[320, 240], confThreshold=0.8, nmsThreshold=0.3, topK=5000, keepTopK=750, backendId=0, targetId=0):
8
+ self.model_path = modelPath
9
+ self.input_size = np.array(inputSize)
10
+ self.confidence_threshold=confThreshold
11
+ self.nms_threshold = nmsThreshold
12
+ self.top_k = topK
13
+ self.keep_top_k = keepTopK
14
+ self.backend_id = backendId
15
+ self.target_id = targetId
16
+
17
+ self.output_names = ['loc', 'conf', 'iou']
18
+ self.min_sizes = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
19
+ self.steps = [8, 16, 32, 64]
20
+ self.variance = [0.1, 0.2]
21
+
22
+ # load model
23
+ self.model = cv.dnn.readNet(self.model_path)
24
+ # generate anchors/priorboxes
25
+ self._priorGen()
26
+
27
+ @property
28
+ def name(self):
29
+ return self.__class__.__name__
30
+
31
+ def setBackend(self, backendId):
32
+ self.backend_id = backendId
33
+ self.model.setPreferableBackend(self.backend_id)
34
+
35
+ def setTarget(self, targetId):
36
+ self.target_id = targetId
37
+ self.model.setPreferableTarget(self.target_id)
38
+
39
+ def setInputSize(self, inputSize):
40
+ self.input_size = inputSize
41
+ # re-generate anchors/priorboxes
42
+ self._priorGen()
43
+
44
+ def _preprocess(self, image):
45
+ return cv.dnn.blobFromImage(image)
46
+
47
+ def infer(self, image):
48
+ assert image.shape[0] == self.input_size[1], '{} (height of input image) != {} (preset height)'.format(image.shape[0], self.input_size[1])
49
+ assert image.shape[1] == self.input_size[0], '{} (width of input image) != {} (preset width)'.format(image.shape[1], self.input_size[0])
50
+
51
+ # Preprocess
52
+ inputBlob = self._preprocess(image)
53
+
54
+ # Forward
55
+ self.model.setInput(inputBlob)
56
+ outputBlob = self.model.forward(self.output_names)
57
+
58
+ # Postprocess
59
+ results = self._postprocess(outputBlob)
60
+
61
+ return results
62
+
63
+ def _postprocess(self, blob):
64
+ # Decode
65
+ dets = self._decode(blob)
66
+
67
+ # NMS
68
+ keepIdx = cv.dnn.NMSBoxes(
69
+ bboxes=dets[:, 0:4].tolist(),
70
+ scores=dets[:, -1].tolist(),
71
+ score_threshold=self.confidence_threshold,
72
+ nms_threshold=self.nms_threshold,
73
+ top_k=self.top_k
74
+ ) # box_num x class_num
75
+ if len(keepIdx) > 0:
76
+ dets = dets[keepIdx]
77
+ return dets[:self.keep_top_k]
78
+ else:
79
+ return np.empty(shape=(0, 9))
80
+
81
+ def _priorGen(self):
82
+ w, h = self.input_size
83
+ feature_map_2th = [int(int((h + 1) / 2) / 2),
84
+ int(int((w + 1) / 2) / 2)]
85
+ feature_map_3th = [int(feature_map_2th[0] / 2),
86
+ int(feature_map_2th[1] / 2)]
87
+ feature_map_4th = [int(feature_map_3th[0] / 2),
88
+ int(feature_map_3th[1] / 2)]
89
+ feature_map_5th = [int(feature_map_4th[0] / 2),
90
+ int(feature_map_4th[1] / 2)]
91
+ feature_map_6th = [int(feature_map_5th[0] / 2),
92
+ int(feature_map_5th[1] / 2)]
93
+
94
+ feature_maps = [feature_map_3th, feature_map_4th,
95
+ feature_map_5th, feature_map_6th]
96
+
97
+ priors = []
98
+ for k, f in enumerate(feature_maps):
99
+ min_sizes = self.min_sizes[k]
100
+ for i, j in product(range(f[0]), range(f[1])): # i->h, j->w
101
+ for min_size in min_sizes:
102
+ s_kx = min_size / w
103
+ s_ky = min_size / h
104
+
105
+ cx = (j + 0.5) * self.steps[k] / w
106
+ cy = (i + 0.5) * self.steps[k] / h
107
+
108
+ priors.append([cx, cy, s_kx, s_ky])
109
+ self.priors = np.array(priors, dtype=np.float32)
110
+
111
+ def _decode(self, blob):
112
+ loc, conf, iou = blob
113
+ # get score
114
+ cls_scores = conf[:, 1]
115
+ iou_scores = iou[:, 0]
116
+ # clamp
117
+ _idx = np.where(iou_scores < 0.)
118
+ iou_scores[_idx] = 0.
119
+ _idx = np.where(iou_scores > 1.)
120
+ iou_scores[_idx] = 1.
121
+ scores = np.sqrt(cls_scores * iou_scores)
122
+ scores = scores[:, np.newaxis]
123
+
124
+ scale = self.input_size
125
+
126
+ # get four corner points for bounding box
127
+ bboxes = np.hstack((
128
+ (self.priors[:, 0:2] + loc[:, 4: 6] * self.variance[0] * self.priors[:, 2:4]) * scale,
129
+ (self.priors[:, 0:2] + loc[:, 6: 8] * self.variance[0] * self.priors[:, 2:4]) * scale,
130
+ (self.priors[:, 0:2] + loc[:, 10:12] * self.variance[0] * self.priors[:, 2:4]) * scale,
131
+ (self.priors[:, 0:2] + loc[:, 12:14] * self.variance[0] * self.priors[:, 2:4]) * scale
132
+ ))
133
+
134
+ dets = np.hstack((bboxes, scores))
135
+ return dets
tools/quantize/inc_configs/lpd_yunet.yaml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2021 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ version: 1.0
17
+
18
+ model: # mandatory. used to specify model specific information.
19
+ name: lpd_yunet
20
+ framework: onnxrt_qlinearops # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension.
21
+
22
+ quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space.
23
+ approach: post_training_static_quant # optional. default value is post_training_static_quant.
24
+ calibration:
25
+ dataloader:
26
+ batch_size: 1
27
+ dataset:
28
+ dummy:
29
+ shape: [1, 3, 240, 320]
30
+ low: 0.0
31
+ high: 127.0
32
+ dtype: float32
33
+ label: True
34
+
35
+ tuning:
36
+ accuracy_criterion:
37
+ relative: 0.02 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%.
38
+ exit_policy:
39
+ timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit.
40
+ random_seed: 9527 # optional. random seed for deterministic tuning.
tools/quantize/quantize-inc.py CHANGED
@@ -1,10 +1,10 @@
1
  import os
2
  import sys
3
- import numpy as ny
4
  import cv2 as cv
5
 
6
  import onnx
7
- from neural_compressor.experimental import Quantization, common as nc_Quantization, nc_common
8
 
9
  class Quantize:
10
  def __init__(self, model_path, config_path, custom_dataset=None):
@@ -20,7 +20,7 @@ class Quantize:
20
  output_name = '{}-int8-quantized.onnx'.format(self.model_path[:-5])
21
 
22
  model = onnx.load(self.model_path)
23
- quantizer = nc_Quantization(self.config_path)
24
  if self.custom_dataset is not None:
25
  quantizer.calib_dataloader = common.DataLoader(self.custom_dataset)
26
  quantizer.model = common.Model(model)
@@ -28,8 +28,11 @@ class Quantize:
28
  q_model.save(output_name)
29
 
30
  class Dataset:
31
- def __init__(self, root):
32
  self.root = root
 
 
 
33
  self.image_list = self.load_image_list(self.root)
34
 
35
  def load_image_list(self, path):
@@ -37,11 +40,16 @@ class Dataset:
37
  for f in os.listdir(path):
38
  if not f.endswith('.jpg'):
39
  continue
40
- image_list.append(f)
41
  return image_list
42
 
43
  def __getitem__(self, idx):
44
  img = cv.imread(self.image_list[idx])
 
 
 
 
 
45
  return img, 1
46
 
47
  def __len__(self):
@@ -54,7 +62,10 @@ models=dict(
54
  config_path='./inc_configs/mobilenet.yaml'),
55
  mppalm_det=Quantize(model_path='../../models/palm_detection_mediapipe/palm_detection_mediapipe_2022may.onnx',
56
  config_path='./inc_configs/mppalmdet.yaml',
57
- custom_dataset=Dataset(root='../../benchmark/data/palm_detection'))
 
 
 
58
  )
59
 
60
  if __name__ == '__main__':
 
1
  import os
2
  import sys
3
+ import numpy as np
4
  import cv2 as cv
5
 
6
  import onnx
7
+ from neural_compressor.experimental import Quantization, common
8
 
9
  class Quantize:
10
  def __init__(self, model_path, config_path, custom_dataset=None):
 
20
  output_name = '{}-int8-quantized.onnx'.format(self.model_path[:-5])
21
 
22
  model = onnx.load(self.model_path)
23
+ quantizer = Quantization(self.config_path)
24
  if self.custom_dataset is not None:
25
  quantizer.calib_dataloader = common.DataLoader(self.custom_dataset)
26
  quantizer.model = common.Model(model)
 
28
  q_model.save(output_name)
29
 
30
  class Dataset:
31
+ def __init__(self, root, size=None, toTensor=False):
32
  self.root = root
33
+ self.size = size
34
+ self.toTensor = toTensor
35
+
36
  self.image_list = self.load_image_list(self.root)
37
 
38
  def load_image_list(self, path):
 
40
  for f in os.listdir(path):
41
  if not f.endswith('.jpg'):
42
  continue
43
+ image_list.append(os.path.join(path, f))
44
  return image_list
45
 
46
  def __getitem__(self, idx):
47
  img = cv.imread(self.image_list[idx])
48
+ if self.size:
49
+ img = cv.resize(img, dsize=self.size)
50
+ if self.toTensor:
51
+ img = img.transpose(2, 0, 1) # hwc -> chw
52
+ img = img.astype(np.float32)
53
  return img, 1
54
 
55
  def __len__(self):
 
62
  config_path='./inc_configs/mobilenet.yaml'),
63
  mppalm_det=Quantize(model_path='../../models/palm_detection_mediapipe/palm_detection_mediapipe_2022may.onnx',
64
  config_path='./inc_configs/mppalmdet.yaml',
65
+ custom_dataset=Dataset(root='../../benchmark/data/palm_detection')),
66
+ lpd_yunet=Quantize(model_path='../../models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2022may.onnx',
67
+ config_path='./inc_configs/lpd_yunet.yaml',
68
+ custom_dataset=Dataset(root='../../benchmark/data/license_plate_detection', size=(320, 240), toTensor=True)),
69
  )
70
 
71
  if __name__ == '__main__':