fengyuentau commited on
Commit
ce28923
·
0 Parent(s):

Benchmark framework implementation and 3 models added:

Browse files

* benchmark framework: benchmarks based on configs

* added impl and benchmark for YuNet (face detection)

* added impl and benchmark for DB (text detection)

* added impl and benchmark for CRNN (text recognition)

Files changed (4) hide show
  1. LICENSE +21 -0
  2. README.md +22 -0
  3. demo.py +122 -0
  4. yunet.py +149 -0
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Shiqi Yu <[email protected]>
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YuNet
2
+
3
+ YuNet is a light-weight, fast and accurate face detection model, which achieves 0.834(AP_easy), 0.824(AP_medium), 0.708(AP_hard) on the WIDER Face validation set.
4
+
5
+ ## Demo
6
+
7
+ Run the following command to try the demo:
8
+ ```shell
9
+ # detect on camera input
10
+ python demo.py
11
+ # detect on an image
12
+ python demo.py --input /path/to/image
13
+ ```
14
+
15
+ ## License
16
+
17
+ All files in this directory are licensed under [MIT License](./LICENSE).
18
+
19
+ ## Reference
20
+
21
+ - https://github.com/ShiqiYu/libfacedetection
22
+ - https://github.com/ShiqiYu/libfacedetection.train
demo.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is part of OpenCV Zoo project.
2
+ # It is subject to the license terms in the LICENSE file found in the same directory.
3
+ #
4
+ # Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
5
+ # Third party copyrights are property of their respective owners.
6
+
7
+ import argparse
8
+
9
+ import numpy as np
10
+ import cv2 as cv
11
+
12
+ from yunet import YuNet
13
+
14
+ def str2bool(v):
15
+ if v.lower() in ['on', 'yes', 'true', 'y', 't']:
16
+ return True
17
+ elif v.lower() in ['off', 'no', 'false', 'n', 'f']:
18
+ return False
19
+ else:
20
+ raise NotImplementedError
21
+
22
+ parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
23
+ parser.add_argument('--input', '-i', type=str, help='Path to the input image. Omit for using default camera.')
24
+ parser.add_argument('--model', '-m', type=str, default='face_detection_yunet.onnx', help='Path to the model.')
25
+ parser.add_argument('--conf_threshold', type=float, default=0.9, help='Filter out faces of confidence < conf_threshold.')
26
+ parser.add_argument('--nms_threshold', type=float, default=0.3, help='Suppress bounding boxes of iou >= nms_threshold.')
27
+ parser.add_argument('--top_k', type=int, default=5000, help='Keep top_k bounding boxes before NMS.')
28
+ parser.add_argument('--keep_top_k', type=int, default=750, help='Keep keep_top_k bounding boxes after NMS.')
29
+ parser.add_argument('--save', '-s', type=str, default=False, help='Set true to save results. This flag is invalid when using camera.')
30
+ parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Set true to open a window for result visualization. This flag is invalid when using camera.')
31
+ args = parser.parse_args()
32
+
33
+ def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None):
34
+ output = image.copy()
35
+ landmark_color = [
36
+ (255, 0, 0), # right eye
37
+ ( 0, 0, 255), # left eye
38
+ ( 0, 255, 0), # nose tip
39
+ (255, 0, 255), # right mouth corner
40
+ ( 0, 255, 255) # left mouth corner
41
+ ]
42
+
43
+ if fps is not None:
44
+ cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
45
+
46
+ for det in results:
47
+ bbox = det[0:4].astype(np.int32)
48
+ cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), box_color, 2)
49
+
50
+ conf = det[-1]
51
+ cv.putText(output, '{:.4f}'.format(conf), (bbox[0], bbox[1]+12), cv.FONT_HERSHEY_DUPLEX, 0.5, text_color)
52
+
53
+ landmarks = det[4:14].astype(np.int32).reshape((5,2))
54
+ for idx, landmark in enumerate(landmarks):
55
+ cv.circle(output, landmark, 2, landmark_color[idx], 2)
56
+
57
+ return output
58
+
59
+ if __name__ == '__main__':
60
+ # Instantiate YuNet
61
+ model = YuNet(modelPath=args.model,
62
+ inputSize=[320, 320],
63
+ confThreshold=args.conf_threshold,
64
+ nmsThreshold=args.nms_threshold,
65
+ topK=args.top_k,
66
+ keepTopK=args.keep_top_k)
67
+
68
+ # If input is an image
69
+ if args.input is not None:
70
+ image = cv.imread(args.input)
71
+ h, w, _ = image.shape
72
+
73
+ # Inference
74
+ model.setInputSize([w, h])
75
+ results = model.infer(image)
76
+
77
+ # Print results
78
+ print('{} faces detected.'.format(results.shape[0]))
79
+ for idx, det in enumerate(results):
80
+ print('{}: [{:.0f}, {:.0f}] [{:.0f}, {:.0f}], {:.2f}'.format(
81
+ idx, det[0], det[1], det[2], det[3], det[-1])
82
+ )
83
+
84
+ # Draw results on the input image
85
+ image = visualize(image, results)
86
+
87
+ # Save results if save is true
88
+ if args.save:
89
+ print('Resutls saved to result.jpg\n')
90
+ cv.imwrite('result.jpg', image)
91
+
92
+ # Visualize results in a new window
93
+ if args.vis:
94
+ cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)
95
+ cv.imshow(args.input, image)
96
+ cv.waitKey(0)
97
+ else: # Omit input to call default camera
98
+ deviceId = 0
99
+ cap = cv.VideoCapture(deviceId)
100
+ w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
101
+ h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
102
+ model.setInputSize([w, h])
103
+
104
+ tm = cv.TickMeter()
105
+ while cv.waitKey(1) < 0:
106
+ hasFrame, frame = cap.read()
107
+ if not hasFrame:
108
+ print('No frames grabbed!')
109
+ break
110
+
111
+ # Inference
112
+ tm.start()
113
+ results = model.infer(frame) # results is a tuple
114
+ tm.stop()
115
+
116
+ # Draw results on the input image
117
+ frame = visualize(frame, results, fps=tm.getFPS())
118
+
119
+ # Visualize results in a new Window
120
+ cv.imshow('YuNet Demo', frame)
121
+
122
+ tm.reset()
yunet.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is part of OpenCV Zoo project.
2
+ # It is subject to the license terms in the LICENSE file found in the same directory.
3
+ #
4
+ # Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
5
+ # Third party copyrights are property of their respective owners.
6
+
7
+ from itertools import product
8
+
9
+ import numpy as np
10
+ import cv2 as cv
11
+
12
+ class YuNet:
13
+ def __init__(self, modelPath, inputSize=[320, 320], confThreshold=0.6, nmsThreshold=0.3, topK=5000, keepTopK=750):
14
+ self._modelPath = modelPath
15
+ self._model = cv.dnn.readNet(self._modelPath)
16
+
17
+ self._inputNames = ''
18
+ self._outputNames = ['loc', 'conf', 'iou']
19
+ self._inputSize = inputSize # [w, h]
20
+ self._confThreshold = confThreshold
21
+ self._nmsThreshold = nmsThreshold
22
+ self._topK = topK
23
+ self._keepTopK = keepTopK
24
+
25
+ self._min_sizes = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
26
+ self._steps = [8, 16, 32, 64]
27
+ self._variance = [0.1, 0.2]
28
+
29
+ # Generate priors
30
+ self._priorGen()
31
+
32
+ @property
33
+ def name(self):
34
+ return self.__class__.__name__
35
+
36
+ def setBackend(self, backend):
37
+ self._model.setPreferableBackend(backend)
38
+
39
+ def setTarget(self, target):
40
+ self._model.setPreferableTarget(target)
41
+
42
+ def setInputSize(self, input_size):
43
+ self._inputSize = input_size # [w, h]
44
+
45
+ # Regenerate priors
46
+ self._priorGen()
47
+
48
+ def _preprocess(self, image):
49
+ return cv.dnn.blobFromImage(image)
50
+
51
+ def infer(self, image):
52
+ assert image.shape[0] == self._inputSize[1], '{} (height of input image) != {} (preset height)'.format(image.shape[0], self._inputSize[1])
53
+ assert image.shape[1] == self._inputSize[0], '{} (width of input image) != {} (preset width)'.format(image.shape[1], self._inputSize[0])
54
+
55
+ # Preprocess
56
+ inputBlob = self._preprocess(image)
57
+
58
+ # Forward
59
+ self._model.setInput(inputBlob, self._inputNames)
60
+ outputBlob = self._model.forward(self._outputNames)
61
+
62
+ # Postprocess
63
+ results = self._postprocess(outputBlob)
64
+
65
+ return results
66
+
67
+ def _postprocess(self, outputBlob):
68
+ # Decode
69
+ dets = self._decode(outputBlob)
70
+
71
+ # NMS
72
+ keepIdx = cv.dnn.NMSBoxes(
73
+ bboxes=dets[:, 0:4].tolist(),
74
+ scores=dets[:, -1].tolist(),
75
+ score_threshold=self._confThreshold,
76
+ nms_threshold=self._nmsThreshold,
77
+ top_k=self._topK
78
+ ) # box_num x class_num
79
+ if len(keepIdx) > 0:
80
+ dets = dets[keepIdx]
81
+ dets = np.squeeze(dets, axis=1)
82
+ return dets[:self._keepTopK]
83
+ else:
84
+ return np.empty(shape=(0, 15))
85
+
86
+ def _priorGen(self):
87
+ w, h = self._inputSize
88
+ feature_map_2th = [int(int((h + 1) / 2) / 2),
89
+ int(int((w + 1) / 2) / 2)]
90
+ feature_map_3th = [int(feature_map_2th[0] / 2),
91
+ int(feature_map_2th[1] / 2)]
92
+ feature_map_4th = [int(feature_map_3th[0] / 2),
93
+ int(feature_map_3th[1] / 2)]
94
+ feature_map_5th = [int(feature_map_4th[0] / 2),
95
+ int(feature_map_4th[1] / 2)]
96
+ feature_map_6th = [int(feature_map_5th[0] / 2),
97
+ int(feature_map_5th[1] / 2)]
98
+
99
+ feature_maps = [feature_map_3th, feature_map_4th,
100
+ feature_map_5th, feature_map_6th]
101
+
102
+ priors = []
103
+ for k, f in enumerate(feature_maps):
104
+ min_sizes = self._min_sizes[k]
105
+ for i, j in product(range(f[0]), range(f[1])): # i->h, j->w
106
+ for min_size in min_sizes:
107
+ s_kx = min_size / w
108
+ s_ky = min_size / h
109
+
110
+ cx = (j + 0.5) * self._steps[k] / w
111
+ cy = (i + 0.5) * self._steps[k] / h
112
+
113
+ priors.append([cx, cy, s_kx, s_ky])
114
+ self.priors = np.array(priors, dtype=np.float32)
115
+
116
+ def _decode(self, outputBlob):
117
+ loc, conf, iou = outputBlob
118
+ # get score
119
+ cls_scores = conf[:, 1]
120
+ iou_scores = iou[:, 0]
121
+ # clamp
122
+ _idx = np.where(iou_scores < 0.)
123
+ iou_scores[_idx] = 0.
124
+ _idx = np.where(iou_scores > 1.)
125
+ iou_scores[_idx] = 1.
126
+ scores = np.sqrt(cls_scores * iou_scores)
127
+ scores = scores[:, np.newaxis]
128
+
129
+ scale = np.array(self._inputSize)
130
+
131
+ # get bboxes
132
+ bboxes = np.hstack((
133
+ (self.priors[:, 0:2] + loc[:, 0:2] * self._variance[0] * self.priors[:, 2:4]) * scale,
134
+ (self.priors[:, 2:4] * np.exp(loc[:, 2:4] * self._variance)) * scale
135
+ ))
136
+ # (x_c, y_c, w, h) -> (x1, y1, w, h)
137
+ bboxes[:, 0:2] -= bboxes[:, 2:4] / 2
138
+
139
+ # get landmarks
140
+ landmarks = np.hstack((
141
+ (self.priors[:, 0:2] + loc[:, 4: 6] * self._variance[0] * self.priors[:, 2:4]) * scale,
142
+ (self.priors[:, 0:2] + loc[:, 6: 8] * self._variance[0] * self.priors[:, 2:4]) * scale,
143
+ (self.priors[:, 0:2] + loc[:, 8:10] * self._variance[0] * self.priors[:, 2:4]) * scale,
144
+ (self.priors[:, 0:2] + loc[:, 10:12] * self._variance[0] * self.priors[:, 2:4]) * scale,
145
+ (self.priors[:, 0:2] + loc[:, 12:14] * self._variance[0] * self.priors[:, 2:4]) * scale
146
+ ))
147
+
148
+ dets = np.hstack((bboxes, landmarks, scores))
149
+ return dets