bump lpd_yunet opset version to 13 (#142)
Browse files
tools/quantize/quantize-inc.py
CHANGED
@@ -130,9 +130,6 @@ models = dict(
|
|
130 |
mp_handpose=Quantize(model_path='../../models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2022may.onnx',
|
131 |
config_path='./inc_configs/mp_handpose.yaml',
|
132 |
custom_dataset=Dataset(root='../../benchmark/data/palm_detection', dim='hwc', swapRB=True, mean=127.5, std=127.5, toFP32=True)),
|
133 |
-
lpd_yunet=Quantize(model_path='../../models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2022may.onnx',
|
134 |
-
config_path='./inc_configs/lpd_yunet.yaml',
|
135 |
-
custom_dataset=Dataset(root='../../benchmark/data/license_plate_detection', size=(320, 240), dim='chw', toFP32=True)),
|
136 |
fer=Quantize(model_path='../../models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july.onnx',
|
137 |
config_path='./inc_configs/fer.yaml',
|
138 |
custom_dataset=FerDataset(root='../../benchmark/data/facial_expression_recognition/fer_calibration', size=(112, 112), toFP32=True, swapRB=True, scale=1./255, mean=0.5, std=0.5),
|
|
|
130 |
mp_handpose=Quantize(model_path='../../models/handpose_estimation_mediapipe/handpose_estimation_mediapipe_2022may.onnx',
|
131 |
config_path='./inc_configs/mp_handpose.yaml',
|
132 |
custom_dataset=Dataset(root='../../benchmark/data/palm_detection', dim='hwc', swapRB=True, mean=127.5, std=127.5, toFP32=True)),
|
|
|
|
|
|
|
133 |
fer=Quantize(model_path='../../models/facial_expression_recognition/facial_expression_recognition_mobilefacenet_2022july.onnx',
|
134 |
config_path='./inc_configs/fer.yaml',
|
135 |
custom_dataset=FerDataset(root='../../benchmark/data/facial_expression_recognition/fer_calibration', size=(112, 112), toFP32=True, swapRB=True, scale=1./255, mean=0.5, std=0.5),
|
tools/quantize/quantize-ort.py
CHANGED
@@ -116,6 +116,9 @@ models=dict(
|
|
116 |
calibration_image_dir='path/to/dataset',
|
117 |
transforms=Compose([HandAlign("mp_handpose"), Resize(size=(224, 224)), Normalize(std=[255, 255, 255]),
|
118 |
ColorConvert(ctype=cv.COLOR_BGR2RGB)]), data_dim='hwc'),
|
|
|
|
|
|
|
119 |
)
|
120 |
|
121 |
if __name__ == '__main__':
|
|
|
116 |
calibration_image_dir='path/to/dataset',
|
117 |
transforms=Compose([HandAlign("mp_handpose"), Resize(size=(224, 224)), Normalize(std=[255, 255, 255]),
|
118 |
ColorConvert(ctype=cv.COLOR_BGR2RGB)]), data_dim='hwc'),
|
119 |
+
lpd_yunet=Quantize(model_path='../../models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2023mar.onnx',
|
120 |
+
calibration_image_dir='../../benchmark/data/license_plate_detection',
|
121 |
+
transforms=Compose([Resize(size=(320, 240))])),
|
122 |
)
|
123 |
|
124 |
if __name__ == '__main__':
|