Wwupup
commited on
Commit
·
41c69c8
1
Parent(s):
af3dd88
update yunet to v2 (#151)
Browse files
models/face_detection_yunet/README.md
CHANGED
@@ -13,8 +13,8 @@ Results of accuracy evaluation with [tools/eval](../../tools/eval).
|
|
13 |
|
14 |
| Models | Easy AP | Medium AP | Hard AP |
|
15 |
| ----------- | ------- | --------- | ------- |
|
16 |
-
| YuNet | 0.
|
17 |
-
| YuNet quant | 0.
|
18 |
|
19 |
\*: 'quant' stands for 'quantized'.
|
20 |
|
|
|
13 |
|
14 |
| Models | Easy AP | Medium AP | Hard AP |
|
15 |
| ----------- | ------- | --------- | ------- |
|
16 |
+
| YuNet | 0.8871 | 0.8710 | 0.7681 |
|
17 |
+
| YuNet quant | 0.8838 | 0.8683 | 0.7676 |
|
18 |
|
19 |
\*: 'quant' stands for 'quantized'.
|
20 |
|
models/face_detection_yunet/demo.cpp
CHANGED
@@ -112,7 +112,7 @@ int main(int argc, char** argv)
|
|
112 |
cv::CommandLineParser parser(argc, argv,
|
113 |
"{help h | | Print this message}"
|
114 |
"{input i | | Set input to a certain image, omit if using camera}"
|
115 |
-
"{model m |
|
116 |
"{backend b | opencv | Set DNN backend}"
|
117 |
"{target t | cpu | Set DNN target}"
|
118 |
"{save s | false | Whether to save result image or not}"
|
|
|
112 |
cv::CommandLineParser parser(argc, argv,
|
113 |
"{help h | | Print this message}"
|
114 |
"{input i | | Set input to a certain image, omit if using camera}"
|
115 |
+
"{model m | face_detection_yunet_2023mar.onnx | Set path to the model}"
|
116 |
"{backend b | opencv | Set DNN backend}"
|
117 |
"{target t | cpu | Set DNN target}"
|
118 |
"{save s | false | Whether to save result image or not}"
|
models/face_detection_yunet/demo.py
CHANGED
@@ -27,8 +27,8 @@ backend_target_pairs = [
|
|
27 |
parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
|
28 |
parser.add_argument('--input', '-i', type=str,
|
29 |
help='Usage: Set input to a certain image, omit if using camera.')
|
30 |
-
parser.add_argument('--model', '-m', type=str, default='
|
31 |
-
help="Usage: Set model type, defaults to '
|
32 |
parser.add_argument('--backend_target', '-bt', type=int, default=0,
|
33 |
help='''Choose one of the backend-target pair to run this demo:
|
34 |
{:d}: (default) OpenCV implementation + CPU,
|
|
|
27 |
parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
|
28 |
parser.add_argument('--input', '-i', type=str,
|
29 |
help='Usage: Set input to a certain image, omit if using camera.')
|
30 |
+
parser.add_argument('--model', '-m', type=str, default='face_detection_yunet_2023mar.onnx',
|
31 |
+
help="Usage: Set model type, defaults to 'face_detection_yunet_2023mar.onnx'.")
|
32 |
parser.add_argument('--backend_target', '-bt', type=int, default=0,
|
33 |
help='''Choose one of the backend-target pair to run this demo:
|
34 |
{:d}: (default) OpenCV implementation + CPU,
|
models/face_recognition_sface/demo.py
CHANGED
@@ -57,7 +57,7 @@ if __name__ == '__main__':
|
|
57 |
backendId=backend_id,
|
58 |
targetId=target_id)
|
59 |
# Instantiate YuNet for face detection
|
60 |
-
detector = YuNet(modelPath='../face_detection_yunet/
|
61 |
inputSize=[320, 320],
|
62 |
confThreshold=0.9,
|
63 |
nmsThreshold=0.3,
|
|
|
57 |
backendId=backend_id,
|
58 |
targetId=target_id)
|
59 |
# Instantiate YuNet for face detection
|
60 |
+
detector = YuNet(modelPath='../face_detection_yunet/face_detection_yunet_2023mar.onnx',
|
61 |
inputSize=[320, 320],
|
62 |
confThreshold=0.9,
|
63 |
nmsThreshold=0.3,
|
models/facial_expression_recognition/demo.py
CHANGED
@@ -86,7 +86,7 @@ if __name__ == '__main__':
|
|
86 |
backend_id = backend_target_pairs[args.backend_target][0]
|
87 |
target_id = backend_target_pairs[args.backend_target][1]
|
88 |
|
89 |
-
detect_model = YuNet(modelPath='../face_detection_yunet/
|
90 |
|
91 |
fer_model = FacialExpressionRecog(modelPath=args.model,
|
92 |
backendId=backend_id,
|
|
|
86 |
backend_id = backend_target_pairs[args.backend_target][0]
|
87 |
target_id = backend_target_pairs[args.backend_target][1]
|
88 |
|
89 |
+
detect_model = YuNet(modelPath='../face_detection_yunet/face_detection_yunet_2023mar.onnx')
|
90 |
|
91 |
fer_model = FacialExpressionRecog(modelPath=args.model,
|
92 |
backendId=backend_id,
|
tools/eval/eval.py
CHANGED
@@ -54,14 +54,14 @@ models = dict(
|
|
54 |
yunet=dict(
|
55 |
name="YuNet",
|
56 |
topic="face_detection",
|
57 |
-
modelPath=os.path.join(root_dir, "models/face_detection_yunet/
|
58 |
topK=5000,
|
59 |
confThreshold=0.3,
|
60 |
nmsThreshold=0.45),
|
61 |
yunet_q=dict(
|
62 |
name="YuNet",
|
63 |
topic="face_detection",
|
64 |
-
modelPath=os.path.join(root_dir, "models/face_detection_yunet/
|
65 |
topK=5000,
|
66 |
confThreshold=0.3,
|
67 |
nmsThreshold=0.45),
|
|
|
54 |
yunet=dict(
|
55 |
name="YuNet",
|
56 |
topic="face_detection",
|
57 |
+
modelPath=os.path.join(root_dir, "models/face_detection_yunet/face_detection_yunet_2023mar.onnx"),
|
58 |
topK=5000,
|
59 |
confThreshold=0.3,
|
60 |
nmsThreshold=0.45),
|
61 |
yunet_q=dict(
|
62 |
name="YuNet",
|
63 |
topic="face_detection",
|
64 |
+
modelPath=os.path.join(root_dir, "models/face_detection_yunet/face_detection_yunet_2023mar_int8.onnx"),
|
65 |
topK=5000,
|
66 |
confThreshold=0.3,
|
67 |
nmsThreshold=0.45),
|