Use YuNet of fixed input shape to avoid 'parseShape' error (#45)
Browse files* replace with yunet of fixed input shape
* update quantized yunet
* update yunet filename used in scripts
* add a note message for https://github.com/opencv/opencv_zoo/issues/44
benchmark/config/face_detection_yunet.yaml
CHANGED
@@ -16,7 +16,8 @@ Benchmark:
|
|
16 |
|
17 |
Model:
|
18 |
name: "YuNet"
|
19 |
-
modelPath: "models/face_detection_yunet/
|
20 |
confThreshold: 0.6
|
21 |
nmsThreshold: 0.3
|
22 |
-
topK: 5000
|
|
|
|
16 |
|
17 |
Model:
|
18 |
name: "YuNet"
|
19 |
+
modelPath: "models/face_detection_yunet/face_detection_yunet_2022mar.onnx"
|
20 |
confThreshold: 0.6
|
21 |
nmsThreshold: 0.3
|
22 |
+
topK: 5000
|
23 |
+
|
models/face_detection_yunet/README.md
CHANGED
@@ -5,6 +5,7 @@ YuNet is a light-weight, fast and accurate face detection model, which achieves
|
|
5 |
Notes:
|
6 |
- Model source: [here](https://github.com/ShiqiYu/libfacedetection.train/blob/a61a428929148171b488f024b5d6774f93cdbc13/tasks/task1/onnx/yunet.onnx).
|
7 |
- For details on training this model, please visit https://github.com/ShiqiYu/libfacedetection.train.
|
|
|
8 |
|
9 |
## Demo
|
10 |
|
|
|
5 |
Notes:
|
6 |
- Model source: [here](https://github.com/ShiqiYu/libfacedetection.train/blob/a61a428929148171b488f024b5d6774f93cdbc13/tasks/task1/onnx/yunet.onnx).
|
7 |
- For details on training this model, please visit https://github.com/ShiqiYu/libfacedetection.train.
|
8 |
+
- This ONNX model has fixed input shape, but OpenCV DNN infers on the exact shape of input image. See https://github.com/opencv/opencv_zoo/issues/44 for more information.
|
9 |
|
10 |
## Demo
|
11 |
|
models/face_detection_yunet/demo.py
CHANGED
@@ -33,7 +33,7 @@ except:
|
|
33 |
|
34 |
parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
|
35 |
parser.add_argument('--input', '-i', type=str, help='Path to the input image. Omit for using default camera.')
|
36 |
-
parser.add_argument('--model', '-m', type=str, default='
|
37 |
parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
|
38 |
parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
|
39 |
parser.add_argument('--conf_threshold', type=float, default=0.9, help='Filter out faces of confidence < conf_threshold.')
|
|
|
33 |
|
34 |
parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
|
35 |
parser.add_argument('--input', '-i', type=str, help='Path to the input image. Omit for using default camera.')
|
36 |
+
parser.add_argument('--model', '-m', type=str, default='face_detection_yunet_2022mar.onnx', help='Path to the model.')
|
37 |
parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
|
38 |
parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
|
39 |
parser.add_argument('--conf_threshold', type=float, default=0.9, help='Filter out faces of confidence < conf_threshold.')
|
tools/quantize/quantize.py
CHANGED
@@ -78,8 +78,9 @@ class Quantize:
|
|
78 |
|
79 |
|
80 |
models=dict(
|
81 |
-
yunet=Quantize(model_path='../../models/face_detection_yunet/
|
82 |
-
calibration_image_dir='../../benchmark/data/face_detection'
|
|
|
83 |
sface=Quantize(model_path='../../models/face_recognition_sface/face_recognition_sface_2021dec.onnx',
|
84 |
calibration_image_dir='../../benchmark/data/face_recognition',
|
85 |
transforms=Compose([Resize(size=(112, 112))])),
|
|
|
78 |
|
79 |
|
80 |
models=dict(
|
81 |
+
yunet=Quantize(model_path='../../models/face_detection_yunet/face_detection_yunet_2022mar.onnx',
|
82 |
+
calibration_image_dir='../../benchmark/data/face_detection',
|
83 |
+
transforms=Compose([Resize(size=(160, 120))])),
|
84 |
sface=Quantize(model_path='../../models/face_recognition_sface/face_recognition_sface_2021dec.onnx',
|
85 |
calibration_image_dir='../../benchmark/data/face_recognition',
|
86 |
transforms=Compose([Resize(size=(112, 112))])),
|