ytfeng commited on
Commit
83c563e
·
1 Parent(s): 237ca2e

Add the missing yaml config for quantizing MP-PalmDet and improve quantized MP-PalmDet (#60)

Browse files
models/palm_detection_mediapipe/README.md CHANGED
@@ -17,8 +17,6 @@ python demo.py
17
  python demo.py -i /path/to/image
18
  ```
19
 
20
- NOTE: For the quantized model, you will need to install OpenCV 4.6.0 to have asymmetric paddings support for quantized convolution layer in OpenCV. Score threshold needs to be adjusted as well for the quantized model, which is empirically 0.49.
21
-
22
  ### Example outputs
23
 
24
  ![webcam demo](./examples/mppalmdet_demo.gif)
 
17
  python demo.py -i /path/to/image
18
  ```
19
 
 
 
20
  ### Example outputs
21
 
22
  ![webcam demo](./examples/mppalmdet_demo.gif)
tools/quantize/inc_configs/mp_palmdet.yaml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2021 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ version: 1.0
17
+
18
+ model: # mandatory. used to specify model specific information.
19
+ name: mp_palmdet
20
+ framework: onnxrt_qlinearops # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension.
21
+
22
+ quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space.
23
+ approach: post_training_static_quant # optional. default value is post_training_static_quant.
24
+ calibration:
25
+ dataloader:
26
+ batch_size: 1
27
+ dataset:
28
+ dummy:
29
+ shape: [1, 256, 256, 3]
30
+ low: -1.0
31
+ high: 1.0
32
+ dtype: float32
33
+ label: True
34
+
35
+ tuning:
36
+ accuracy_criterion:
37
+ relative: 0.02 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%.
38
+ exit_policy:
39
+ timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit.
40
+ random_seed: 9527 # optional. random seed for deterministic tuning.
tools/quantize/quantize-inc.py CHANGED
@@ -28,10 +28,14 @@ class Quantize:
28
  q_model.save(output_name)
29
 
30
  class Dataset:
31
- def __init__(self, root, size=None, toTensor=False):
32
  self.root = root
33
  self.size = size
34
- self.toTensor = toTensor
 
 
 
 
35
 
36
  self.image_list = self.load_image_list(self.root)
37
 
@@ -45,11 +49,22 @@ class Dataset:
45
 
46
  def __getitem__(self, idx):
47
  img = cv.imread(self.image_list[idx])
 
 
 
 
48
  if self.size:
49
  img = cv.resize(img, dsize=self.size)
50
- if self.toTensor:
51
- img = img.transpose(2, 0, 1) # hwc -> chw
52
  img = img.astype(np.float32)
 
 
 
 
 
 
 
53
  return img, 1
54
 
55
  def __len__(self):
@@ -57,15 +72,15 @@ class Dataset:
57
 
58
  models=dict(
59
  mobilenetv1=Quantize(model_path='../../models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx',
60
- config_path='./inc_configs/mobilenet.yaml'),
61
  mobilenetv2=Quantize(model_path='../../models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx',
62
- config_path='./inc_configs/mobilenet.yaml'),
63
- mppalm_det=Quantize(model_path='../../models/palm_detection_mediapipe/palm_detection_mediapipe_2022may.onnx',
64
- config_path='./inc_configs/mppalmdet.yaml',
65
- custom_dataset=Dataset(root='../../benchmark/data/palm_detection')),
66
  lpd_yunet=Quantize(model_path='../../models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2022may.onnx',
67
  config_path='./inc_configs/lpd_yunet.yaml',
68
- custom_dataset=Dataset(root='../../benchmark/data/license_plate_detection', size=(320, 240), toTensor=True)),
69
  )
70
 
71
  if __name__ == '__main__':
 
28
  q_model.save(output_name)
29
 
30
  class Dataset:
31
+ def __init__(self, root, size=None, dim='chw', mean=0.0, std=1.0, swapRB=False, toFP32=False):
32
  self.root = root
33
  self.size = size
34
+ self.dim = dim
35
+ self.mean = mean
36
+ self.std = std
37
+ self.swapRB = swapRB
38
+ self.toFP32 = toFP32
39
 
40
  self.image_list = self.load_image_list(self.root)
41
 
 
49
 
50
  def __getitem__(self, idx):
51
  img = cv.imread(self.image_list[idx])
52
+
53
+ if self.swapRB:
54
+ img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
55
+
56
  if self.size:
57
  img = cv.resize(img, dsize=self.size)
58
+
59
+ if self.toFP32:
60
  img = img.astype(np.float32)
61
+
62
+ img = img - self.mean
63
+ img = img / self.std
64
+
65
+ if self.dim == 'chw':
66
+ img = img.transpose(2, 0, 1) # hwc -> chw
67
+
68
  return img, 1
69
 
70
  def __len__(self):
 
72
 
73
  models=dict(
74
  mobilenetv1=Quantize(model_path='../../models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx',
75
+ config_path='./inc_configs/mobilenet.yaml'),
76
  mobilenetv2=Quantize(model_path='../../models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx',
77
+ config_path='./inc_configs/mobilenet.yaml'),
78
+ mp_palmdet=Quantize(model_path='../../models/palm_detection_mediapipe/palm_detection_mediapipe_2022may.onnx',
79
+ config_path='./inc_configs/mp_palmdet.yaml',
80
+ custom_dataset=Dataset(root='../../benchmark/data/palm_detection', dim='hwc', swapRB=True, mean=127.5, std=127.5, toFP32=True)),
81
  lpd_yunet=Quantize(model_path='../../models/license_plate_detection_yunet/license_plate_detection_lpd_yunet_2022may.onnx',
82
  config_path='./inc_configs/lpd_yunet.yaml',
83
+ custom_dataset=Dataset(root='../../benchmark/data/license_plate_detection', size=(320, 240), dim='chw', toFP32=True)),
84
  )
85
 
86
  if __name__ == '__main__':