replace pphumanseg with a simplified one (#143)
Browse files* replace pphumanseg with a simplified one
* update demo default
models/human_segmentation_pphumanseg/demo.py
CHANGED
@@ -33,7 +33,7 @@ except:
|
|
33 |
|
34 |
parser = argparse.ArgumentParser(description='PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)')
|
35 |
parser.add_argument('--input', '-i', type=str, help='Usage: Set input path to a certain image, omit if using camera.')
|
36 |
-
parser.add_argument('--model', '-m', type=str, default='
|
37 |
parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
|
38 |
parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
|
39 |
parser.add_argument('--save', '-s', type=str, default=False, help='Usage: Set “True” to save a file with results. Invalid in case of camera input. Default will be set to “False”.')
|
@@ -153,4 +153,3 @@ if __name__ == '__main__':
|
|
153 |
cv.imshow('PPHumanSeg Demo', frame)
|
154 |
|
155 |
tm.reset()
|
156 |
-
|
|
|
33 |
|
34 |
parser = argparse.ArgumentParser(description='PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)')
|
35 |
parser.add_argument('--input', '-i', type=str, help='Usage: Set input path to a certain image, omit if using camera.')
|
36 |
+
parser.add_argument('--model', '-m', type=str, default='human_segmentation_pphumanseg_2023mar.onnx', help='Usage: Set model path, defaults to human_segmentation_pphumanseg_2023mar.onnx.')
|
37 |
parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
|
38 |
parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
|
39 |
parser.add_argument('--save', '-s', type=str, default=False, help='Usage: Set “True” to save a file with results. Invalid in case of camera input. Default will be set to “False”.')
|
|
|
153 |
cv.imshow('PPHumanSeg Demo', frame)
|
154 |
|
155 |
tm.reset()
|
|
tools/quantize/quantize-ort.py
CHANGED
@@ -91,7 +91,7 @@ models=dict(
|
|
91 |
sface=Quantize(model_path='../../models/face_recognition_sface/face_recognition_sface_2021dec.onnx',
|
92 |
calibration_image_dir='../../benchmark/data/face_recognition',
|
93 |
transforms=Compose([Resize(size=(112, 112))])),
|
94 |
-
|
95 |
calibration_image_dir='../../benchmark/data/human_segmentation',
|
96 |
transforms=Compose([Resize(size=(192, 192))])),
|
97 |
ppresnet50=Quantize(model_path='../../models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx',
|
|
|
91 |
sface=Quantize(model_path='../../models/face_recognition_sface/face_recognition_sface_2021dec.onnx',
|
92 |
calibration_image_dir='../../benchmark/data/face_recognition',
|
93 |
transforms=Compose([Resize(size=(112, 112))])),
|
94 |
+
pphumanseg=Quantize(model_path='../../models/human_segmentation_pphumanseg/human_segmentation_pphumanseg_2023mar.onnx',
|
95 |
calibration_image_dir='../../benchmark/data/human_segmentation',
|
96 |
transforms=Compose([Resize(size=(192, 192))])),
|
97 |
ppresnet50=Quantize(model_path='../../models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx',
|