Yuantao Feng commited on
Commit
39e569f
·
1 Parent(s): 5ac585d

Update to OpenCV APIs (YuNet -> FaceDetectorYN, SFace -> FaceRecognizerSF) (#6)

Browse files
Files changed (2) hide show
  1. demo.py +1 -3
  2. yunet.py +39 -122
demo.py CHANGED
@@ -25,7 +25,6 @@ parser.add_argument('--model', '-m', type=str, default='face_detection_yunet.onn
25
  parser.add_argument('--conf_threshold', type=float, default=0.9, help='Filter out faces of confidence < conf_threshold.')
26
  parser.add_argument('--nms_threshold', type=float, default=0.3, help='Suppress bounding boxes of iou >= nms_threshold.')
27
  parser.add_argument('--top_k', type=int, default=5000, help='Keep top_k bounding boxes before NMS.')
28
- parser.add_argument('--keep_top_k', type=int, default=750, help='Keep keep_top_k bounding boxes after NMS.')
29
  parser.add_argument('--save', '-s', type=str, default=False, help='Set true to save results. This flag is invalid when using camera.')
30
  parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Set true to open a window for result visualization. This flag is invalid when using camera.')
31
  args = parser.parse_args()
@@ -62,8 +61,7 @@ if __name__ == '__main__':
62
  inputSize=[320, 320],
63
  confThreshold=args.conf_threshold,
64
  nmsThreshold=args.nms_threshold,
65
- topK=args.top_k,
66
- keepTopK=args.keep_top_k)
67
 
68
  # If input is an image
69
  if args.input is not None:
 
25
  parser.add_argument('--conf_threshold', type=float, default=0.9, help='Filter out faces of confidence < conf_threshold.')
26
  parser.add_argument('--nms_threshold', type=float, default=0.3, help='Suppress bounding boxes of iou >= nms_threshold.')
27
  parser.add_argument('--top_k', type=int, default=5000, help='Keep top_k bounding boxes before NMS.')
 
28
  parser.add_argument('--save', '-s', type=str, default=False, help='Set true to save results. This flag is invalid when using camera.')
29
  parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Set true to open a window for result visualization. This flag is invalid when using camera.')
30
  args = parser.parse_args()
 
61
  inputSize=[320, 320],
62
  confThreshold=args.conf_threshold,
63
  nmsThreshold=args.nms_threshold,
64
+ topK=args.top_k)
 
65
 
66
  # If input is an image
67
  if args.input is not None:
yunet.py CHANGED
@@ -10,140 +10,57 @@ import numpy as np
10
  import cv2 as cv
11
 
12
  class YuNet:
13
- def __init__(self, modelPath, inputSize=[320, 320], confThreshold=0.6, nmsThreshold=0.3, topK=5000, keepTopK=750):
14
  self._modelPath = modelPath
15
- self._model = cv.dnn.readNet(self._modelPath)
16
-
17
- self._inputNames = ''
18
- self._outputNames = ['loc', 'conf', 'iou']
19
- self._inputSize = inputSize # [w, h]
20
  self._confThreshold = confThreshold
21
  self._nmsThreshold = nmsThreshold
22
  self._topK = topK
23
- self._keepTopK = keepTopK
24
-
25
- self._min_sizes = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
26
- self._steps = [8, 16, 32, 64]
27
- self._variance = [0.1, 0.2]
28
 
29
- # Generate priors
30
- self._priorGen()
 
 
 
 
 
 
 
31
 
32
  @property
33
  def name(self):
34
  return self.__class__.__name__
35
 
36
- def setBackend(self, backend):
37
- self._model.setPreferableBackend(backend)
38
-
39
- def setTarget(self, target):
40
- self._model.setPreferableTarget(target)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  def setInputSize(self, input_size):
43
- self._inputSize = input_size # [w, h]
44
-
45
- # Regenerate priors
46
- self._priorGen()
47
-
48
- def _preprocess(self, image):
49
- return cv.dnn.blobFromImage(image)
50
 
51
  def infer(self, image):
52
- assert image.shape[0] == self._inputSize[1], '{} (height of input image) != {} (preset height)'.format(image.shape[0], self._inputSize[1])
53
- assert image.shape[1] == self._inputSize[0], '{} (width of input image) != {} (preset width)'.format(image.shape[1], self._inputSize[0])
54
-
55
- # Preprocess
56
- inputBlob = self._preprocess(image)
57
-
58
  # Forward
59
- self._model.setInput(inputBlob, self._inputNames)
60
- outputBlob = self._model.forward(self._outputNames)
61
-
62
- # Postprocess
63
- results = self._postprocess(outputBlob)
64
-
65
- return results
66
-
67
- def _postprocess(self, outputBlob):
68
- # Decode
69
- dets = self._decode(outputBlob)
70
-
71
- # NMS
72
- keepIdx = cv.dnn.NMSBoxes(
73
- bboxes=dets[:, 0:4].tolist(),
74
- scores=dets[:, -1].tolist(),
75
- score_threshold=self._confThreshold,
76
- nms_threshold=self._nmsThreshold,
77
- top_k=self._topK
78
- ) # box_num x class_num
79
- if len(keepIdx) > 0:
80
- dets = dets[keepIdx]
81
- dets = np.squeeze(dets, axis=1)
82
- return dets[:self._keepTopK]
83
- else:
84
- return np.empty(shape=(0, 15))
85
-
86
- def _priorGen(self):
87
- w, h = self._inputSize
88
- feature_map_2th = [int(int((h + 1) / 2) / 2),
89
- int(int((w + 1) / 2) / 2)]
90
- feature_map_3th = [int(feature_map_2th[0] / 2),
91
- int(feature_map_2th[1] / 2)]
92
- feature_map_4th = [int(feature_map_3th[0] / 2),
93
- int(feature_map_3th[1] / 2)]
94
- feature_map_5th = [int(feature_map_4th[0] / 2),
95
- int(feature_map_4th[1] / 2)]
96
- feature_map_6th = [int(feature_map_5th[0] / 2),
97
- int(feature_map_5th[1] / 2)]
98
-
99
- feature_maps = [feature_map_3th, feature_map_4th,
100
- feature_map_5th, feature_map_6th]
101
-
102
- priors = []
103
- for k, f in enumerate(feature_maps):
104
- min_sizes = self._min_sizes[k]
105
- for i, j in product(range(f[0]), range(f[1])): # i->h, j->w
106
- for min_size in min_sizes:
107
- s_kx = min_size / w
108
- s_ky = min_size / h
109
-
110
- cx = (j + 0.5) * self._steps[k] / w
111
- cy = (i + 0.5) * self._steps[k] / h
112
-
113
- priors.append([cx, cy, s_kx, s_ky])
114
- self.priors = np.array(priors, dtype=np.float32)
115
-
116
- def _decode(self, outputBlob):
117
- loc, conf, iou = outputBlob
118
- # get score
119
- cls_scores = conf[:, 1]
120
- iou_scores = iou[:, 0]
121
- # clamp
122
- _idx = np.where(iou_scores < 0.)
123
- iou_scores[_idx] = 0.
124
- _idx = np.where(iou_scores > 1.)
125
- iou_scores[_idx] = 1.
126
- scores = np.sqrt(cls_scores * iou_scores)
127
- scores = scores[:, np.newaxis]
128
-
129
- scale = np.array(self._inputSize)
130
-
131
- # get bboxes
132
- bboxes = np.hstack((
133
- (self.priors[:, 0:2] + loc[:, 0:2] * self._variance[0] * self.priors[:, 2:4]) * scale,
134
- (self.priors[:, 2:4] * np.exp(loc[:, 2:4] * self._variance)) * scale
135
- ))
136
- # (x_c, y_c, w, h) -> (x1, y1, w, h)
137
- bboxes[:, 0:2] -= bboxes[:, 2:4] / 2
138
-
139
- # get landmarks
140
- landmarks = np.hstack((
141
- (self.priors[:, 0:2] + loc[:, 4: 6] * self._variance[0] * self.priors[:, 2:4]) * scale,
142
- (self.priors[:, 0:2] + loc[:, 6: 8] * self._variance[0] * self.priors[:, 2:4]) * scale,
143
- (self.priors[:, 0:2] + loc[:, 8:10] * self._variance[0] * self.priors[:, 2:4]) * scale,
144
- (self.priors[:, 0:2] + loc[:, 10:12] * self._variance[0] * self.priors[:, 2:4]) * scale,
145
- (self.priors[:, 0:2] + loc[:, 12:14] * self._variance[0] * self.priors[:, 2:4]) * scale
146
- ))
147
-
148
- dets = np.hstack((bboxes, landmarks, scores))
149
- return dets
 
10
  import cv2 as cv
11
 
12
  class YuNet:
13
+ def __init__(self, modelPath, inputSize=[320, 320], confThreshold=0.6, nmsThreshold=0.3, topK=5000, backendId=0, targetId=0):
14
  self._modelPath = modelPath
15
+ self._inputSize = tuple(inputSize) # [w, h]
 
 
 
 
16
  self._confThreshold = confThreshold
17
  self._nmsThreshold = nmsThreshold
18
  self._topK = topK
19
+ self._backendId = backendId
20
+ self._targetId = targetId
 
 
 
21
 
22
+ self._model = cv.FaceDetectorYN.create(
23
+ model=self._modelPath,
24
+ config="",
25
+ input_size=self._inputSize,
26
+ score_threshold=self._confThreshold,
27
+ nms_threshold=self._nmsThreshold,
28
+ top_k=self._topK,
29
+ backend_id=self._backendId,
30
+ target_id=self._targetId)
31
 
32
  @property
33
  def name(self):
34
  return self.__class__.__name__
35
 
36
+ def setBackend(self, backendId):
37
+ self._backendId = backendId
38
+ self._model = cv.FaceDetectorYN.create(
39
+ model=self._modelPath,
40
+ config="",
41
+ input_size=self._inputSize,
42
+ score_threshold=self._confThreshold,
43
+ nms_threshold=self._nmsThreshold,
44
+ top_k=self._topK,
45
+ backend_id=self._backendId,
46
+ target_id=self._targetId)
47
+
48
+ def setTarget(self, targetId):
49
+ self._targetId = targetId
50
+ self._model = cv.FaceDetectorYN.create(
51
+ model=self._modelPath,
52
+ config="",
53
+ input_size=self._inputSize,
54
+ score_threshold=self._confThreshold,
55
+ nms_threshold=self._nmsThreshold,
56
+ top_k=self._topK,
57
+ backend_id=self._backendId,
58
+ target_id=self._targetId)
59
 
60
  def setInputSize(self, input_size):
61
+ self._model.setInputSize(tuple(input_size))
 
 
 
 
 
 
62
 
63
  def infer(self, image):
 
 
 
 
 
 
64
  # Forward
65
+ faces = self._model.detect(image)
66
+ return faces[1]