add evaluation framework and imagenet evaluation (#69)
Browse files- benchmark/config/image_classification_mobilenetv1.yaml +1 -1
- benchmark/config/image_classification_mobilenetv2.yaml +1 -1
- benchmark/config/image_classification_ppresnet.yaml +1 -1
- models/image_classification_mobilenet/README.md +12 -0
- models/image_classification_mobilenet/mobilenet_v1.py +20 -8
- models/image_classification_mobilenet/mobilenet_v2.py +20 -8
- models/image_classification_ppresnet/README.md +11 -1
- models/image_classification_ppresnet/ppresnet.py +22 -7
- tools/eval/README.md +55 -0
- tools/eval/datasets/__init__.py +15 -0
- tools/eval/datasets/imagenet.py +64 -0
- tools/eval/eval.py +89 -0
benchmark/config/image_classification_mobilenetv1.yaml
CHANGED
@@ -17,4 +17,4 @@ Benchmark:
|
|
17 |
Model:
|
18 |
name: "MobileNetV1"
|
19 |
modelPath: "models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx"
|
20 |
-
|
|
|
17 |
Model:
|
18 |
name: "MobileNetV1"
|
19 |
modelPath: "models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx"
|
20 |
+
|
benchmark/config/image_classification_mobilenetv2.yaml
CHANGED
@@ -17,4 +17,4 @@ Benchmark:
|
|
17 |
Model:
|
18 |
name: "MobileNetV2"
|
19 |
modelPath: "models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx"
|
20 |
-
|
|
|
17 |
Model:
|
18 |
name: "MobileNetV2"
|
19 |
modelPath: "models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx"
|
20 |
+
|
benchmark/config/image_classification_ppresnet.yaml
CHANGED
@@ -17,4 +17,4 @@ Benchmark:
|
|
17 |
Model:
|
18 |
name: "PPResNet"
|
19 |
modelPath: "models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx"
|
20 |
-
|
|
|
17 |
Model:
|
18 |
name: "PPResNet"
|
19 |
modelPath: "models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx"
|
20 |
+
|
models/image_classification_mobilenet/README.md
CHANGED
@@ -4,6 +4,17 @@ MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applicatio
|
|
4 |
|
5 |
MobileNetV2: Inverted Residuals and Linear Bottlenecks
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
## Demo
|
8 |
|
9 |
Run the following command to try the demo:
|
@@ -24,3 +35,4 @@ All files in this directory are licensed under [Apache 2.0 License](./LICENSE).
|
|
24 |
- MobileNet V2: https://arxiv.org/abs/1801.04381
|
25 |
- MobileNet V1 weight and scripts for training: https://github.com/wjc852456/pytorch-mobilenet-v1
|
26 |
- MobileNet V2 weight: https://github.com/onnx/models/tree/main/vision/classification/mobilenet
|
|
|
|
4 |
|
5 |
MobileNetV2: Inverted Residuals and Linear Bottlenecks
|
6 |
|
7 |
+
Results of accuracy evaluation with [tools/eval](../../tools/eval).
|
8 |
+
|
9 |
+
| Models | Top-1 Accuracy | Top-5 Accuracy |
|
10 |
+
| ------ | -------------- | -------------- |
|
11 |
+
| MobileNet V1 | 67.64 | 87.97 |
|
12 |
+
| MobileNet V1 quant | 55.53 | 78.74 |
|
13 |
+
| MobileNet V2 | 69.44 | 89.23 |
|
14 |
+
| MobileNet V2 quant | 68.37 | 88.56 |
|
15 |
+
|
16 |
+
\*: 'quant' stands for 'quantized'.
|
17 |
+
|
18 |
## Demo
|
19 |
|
20 |
Run the following command to try the demo:
|
|
|
35 |
- MobileNet V2: https://arxiv.org/abs/1801.04381
|
36 |
- MobileNet V1 weight and scripts for training: https://github.com/wjc852456/pytorch-mobilenet-v1
|
37 |
- MobileNet V2 weight: https://github.com/onnx/models/tree/main/vision/classification/mobilenet
|
38 |
+
|
models/image_classification_mobilenet/mobilenet_v1.py
CHANGED
@@ -2,9 +2,11 @@ import numpy as np
|
|
2 |
import cv2 as cv
|
3 |
|
4 |
class MobileNetV1:
|
5 |
-
def __init__(self, modelPath, labelPath, backendId=0, targetId=0):
|
6 |
self.model_path = modelPath
|
7 |
self.label_path = labelPath
|
|
|
|
|
8 |
self.backend_id = backendId
|
9 |
self.target_id = targetId
|
10 |
|
@@ -23,9 +25,10 @@ class MobileNetV1:
|
|
23 |
|
24 |
def _load_labels(self):
|
25 |
labels = []
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
29 |
return labels
|
30 |
|
31 |
@property
|
@@ -61,9 +64,18 @@ class MobileNetV1:
|
|
61 |
return results
|
62 |
|
63 |
def _postprocess(self, output_blob):
|
64 |
-
|
65 |
for o in output_blob:
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
|
|
2 |
import cv2 as cv
|
3 |
|
4 |
class MobileNetV1:
|
5 |
+
def __init__(self, modelPath, labelPath=None, topK=1, backendId=0, targetId=0):
|
6 |
self.model_path = modelPath
|
7 |
self.label_path = labelPath
|
8 |
+
assert topK >= 1
|
9 |
+
self.top_k = topK
|
10 |
self.backend_id = backendId
|
11 |
self.target_id = targetId
|
12 |
|
|
|
25 |
|
26 |
def _load_labels(self):
|
27 |
labels = []
|
28 |
+
if self.label_path is not None:
|
29 |
+
with open(self.label_path, 'r') as f:
|
30 |
+
for line in f:
|
31 |
+
labels.append(line.strip())
|
32 |
return labels
|
33 |
|
34 |
@property
|
|
|
64 |
return results
|
65 |
|
66 |
def _postprocess(self, output_blob):
|
67 |
+
batched_class_id_list = []
|
68 |
for o in output_blob:
|
69 |
+
class_id_list = o.argsort()[::-1][:self.top_k]
|
70 |
+
batched_class_id_list.append(class_id_list)
|
71 |
+
if len(self.labels) > 0:
|
72 |
+
batched_predicted_labels = []
|
73 |
+
for class_id_list in batched_class_id_list:
|
74 |
+
predicted_labels = []
|
75 |
+
for class_id in class_id_list:
|
76 |
+
predicted_labels.append(self._labels[class_id])
|
77 |
+
batched_predicted_labels.append(predicted_labels)
|
78 |
+
return batched_predicted_labels
|
79 |
+
else:
|
80 |
+
return batched_class_id_list
|
81 |
|
models/image_classification_mobilenet/mobilenet_v2.py
CHANGED
@@ -2,9 +2,11 @@ import numpy as np
|
|
2 |
import cv2 as cv
|
3 |
|
4 |
class MobileNetV2:
|
5 |
-
def __init__(self, modelPath, labelPath, backendId=0, targetId=0):
|
6 |
self.model_path = modelPath
|
7 |
self.label_path = labelPath
|
|
|
|
|
8 |
self.backend_id = backendId
|
9 |
self.target_id = targetId
|
10 |
|
@@ -23,9 +25,10 @@ class MobileNetV2:
|
|
23 |
|
24 |
def _load_labels(self):
|
25 |
labels = []
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
29 |
return labels
|
30 |
|
31 |
@property
|
@@ -61,9 +64,18 @@ class MobileNetV2:
|
|
61 |
return results
|
62 |
|
63 |
def _postprocess(self, output_blob):
|
64 |
-
|
65 |
for o in output_blob:
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
|
|
2 |
import cv2 as cv
|
3 |
|
4 |
class MobileNetV2:
|
5 |
+
def __init__(self, modelPath, labelPath=None, topK=1, backendId=0, targetId=0):
|
6 |
self.model_path = modelPath
|
7 |
self.label_path = labelPath
|
8 |
+
assert topK >= 1
|
9 |
+
self.top_k = topK
|
10 |
self.backend_id = backendId
|
11 |
self.target_id = targetId
|
12 |
|
|
|
25 |
|
26 |
def _load_labels(self):
|
27 |
labels = []
|
28 |
+
if self.label_path is not None:
|
29 |
+
with open(self.label_path, 'r') as f:
|
30 |
+
for line in f:
|
31 |
+
labels.append(line.strip())
|
32 |
return labels
|
33 |
|
34 |
@property
|
|
|
64 |
return results
|
65 |
|
66 |
def _postprocess(self, output_blob):
|
67 |
+
batched_class_id_list = []
|
68 |
for o in output_blob:
|
69 |
+
class_id_list = o.argsort()[::-1][:self.top_k]
|
70 |
+
batched_class_id_list.append(class_id_list)
|
71 |
+
if len(self.labels) > 0:
|
72 |
+
batched_predicted_labels = []
|
73 |
+
for class_id_list in batched_class_id_list:
|
74 |
+
predicted_labels = []
|
75 |
+
for class_id in class_id_list:
|
76 |
+
predicted_labels.append(self._labels[class_id])
|
77 |
+
batched_predicted_labels.append(predicted_labels)
|
78 |
+
return batched_predicted_labels
|
79 |
+
else:
|
80 |
+
return batched_class_id_list
|
81 |
|
models/image_classification_ppresnet/README.md
CHANGED
@@ -4,6 +4,15 @@ Deep Residual Learning for Image Recognition
|
|
4 |
|
5 |
This model is ported from [PaddleHub](https://github.com/PaddlePaddle/PaddleHub) using [this script from OpenCV](https://github.com/opencv/opencv/blob/master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle/paddle_resnet50.py).
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
## Demo
|
8 |
|
9 |
Run the following command to try the demo:
|
@@ -19,4 +28,5 @@ All files in this directory are licensed under [Apache 2.0 License](./LICENSE).
|
|
19 |
|
20 |
- https://arxiv.org/abs/1512.03385
|
21 |
- https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle
|
22 |
-
- https://github.com/PaddlePaddle/PaddleHub
|
|
|
|
4 |
|
5 |
This model is ported from [PaddleHub](https://github.com/PaddlePaddle/PaddleHub) using [this script from OpenCV](https://github.com/opencv/opencv/blob/master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle/paddle_resnet50.py).
|
6 |
|
7 |
+
Results of accuracy evaluation with [tools/eval](../../tools/eval).
|
8 |
+
|
9 |
+
| Models | Top-1 Accuracy | Top-5 Accuracy |
|
10 |
+
| ------ | -------------- | -------------- |
|
11 |
+
| PP-ResNet | 82.28 | 96.15 |
|
12 |
+
| PP-ResNet quant | 0.22 | 0.96 |
|
13 |
+
|
14 |
+
\*: 'quant' stands for 'quantized'.
|
15 |
+
|
16 |
## Demo
|
17 |
|
18 |
Run the following command to try the demo:
|
|
|
28 |
|
29 |
- https://arxiv.org/abs/1512.03385
|
30 |
- https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/paddlepaddle
|
31 |
+
- https://github.com/PaddlePaddle/PaddleHub
|
32 |
+
|
models/image_classification_ppresnet/ppresnet.py
CHANGED
@@ -9,9 +9,11 @@ import numpy as np
|
|
9 |
import cv2 as cv
|
10 |
|
11 |
class PPResNet:
|
12 |
-
def __init__(self, modelPath, labelPath, backendId=0, targetId=0):
|
13 |
self._modelPath = modelPath
|
14 |
self._labelPath = labelPath
|
|
|
|
|
15 |
self._backendId = backendId
|
16 |
self._targetId = targetId
|
17 |
|
@@ -30,9 +32,10 @@ class PPResNet:
|
|
30 |
|
31 |
def _load_labels(self):
|
32 |
labels = []
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
36 |
return labels
|
37 |
|
38 |
@property
|
@@ -65,11 +68,23 @@ class PPResNet:
|
|
65 |
outputBlob = self._model.forward(self._outputNames)
|
66 |
|
67 |
# Postprocess
|
68 |
-
results = self._postprocess(outputBlob)
|
69 |
|
70 |
return results
|
71 |
|
72 |
def _postprocess(self, outputBlob):
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
|
|
9 |
import cv2 as cv
|
10 |
|
11 |
class PPResNet:
|
12 |
+
def __init__(self, modelPath, labelPath=None, topK=1, backendId=0, targetId=0):
|
13 |
self._modelPath = modelPath
|
14 |
self._labelPath = labelPath
|
15 |
+
assert topK >= 1
|
16 |
+
self._topK = topK
|
17 |
self._backendId = backendId
|
18 |
self._targetId = targetId
|
19 |
|
|
|
32 |
|
33 |
def _load_labels(self):
|
34 |
labels = []
|
35 |
+
if self._labelPath is not None:
|
36 |
+
with open(self._labelPath, 'r') as f:
|
37 |
+
for line in f:
|
38 |
+
labels.append(line.strip())
|
39 |
return labels
|
40 |
|
41 |
@property
|
|
|
68 |
outputBlob = self._model.forward(self._outputNames)
|
69 |
|
70 |
# Postprocess
|
71 |
+
results = self._postprocess(outputBlob[0])
|
72 |
|
73 |
return results
|
74 |
|
75 |
def _postprocess(self, outputBlob):
|
76 |
+
batched_class_id_list = []
|
77 |
+
for ob in outputBlob:
|
78 |
+
class_id_list = ob.argsort()[::-1][:self._topK]
|
79 |
+
batched_class_id_list.append(class_id_list)
|
80 |
+
if len(self._labels) > 0:
|
81 |
+
batched_predicted_labels = []
|
82 |
+
for class_id_list in batched_class_id_list:
|
83 |
+
predicted_labels = []
|
84 |
+
for class_id in class_id_list:
|
85 |
+
predicted_labels.append(self._labels[class_id])
|
86 |
+
batched_predicted_labels.append(predicted_labels)
|
87 |
+
return batched_predicted_labels
|
88 |
+
else:
|
89 |
+
return batched_class_id_list
|
90 |
|
tools/eval/README.md
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Accuracy evaluation of models in OpenCV Zoo
|
2 |
+
|
3 |
+
Make sure you have the following packages installed:
|
4 |
+
|
5 |
+
```shell
|
6 |
+
pip install tqdm
|
7 |
+
```
|
8 |
+
|
9 |
+
Generally speaking, evaluation can be done with the following command:
|
10 |
+
|
11 |
+
```shell
|
12 |
+
python eval.py -m model_name -d dataset_name -dr dataset_root_dir
|
13 |
+
```
|
14 |
+
|
15 |
+
Supported datasets:
|
16 |
+
- [ImageNet](./datasets/imagenet.py)
|
17 |
+
|
18 |
+
## ImageNet
|
19 |
+
|
20 |
+
### Prepare data
|
21 |
+
|
22 |
+
Please visit https://image-net.org/ to download the ImageNet dataset and [the labels from caffe](http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz). Organize files as follow:
|
23 |
+
|
24 |
+
```shell
|
25 |
+
$ tree -L 2 /path/to/imagenet
|
26 |
+
.
|
27 |
+
├── caffe_ilsvrc12
|
28 |
+
│ ├── det_synset_words.txt
|
29 |
+
│ ├── imagenet.bet.pickle
|
30 |
+
│ ├── imagenet_mean.binaryproto
|
31 |
+
│ ├── synsets.txt
|
32 |
+
│ ├── synset_words.txt
|
33 |
+
│ ├── test.txt
|
34 |
+
│ ├── train.txt
|
35 |
+
│ └── val.txt
|
36 |
+
├── caffe_ilsvrc12.tar.gz
|
37 |
+
├── ILSVRC
|
38 |
+
│ ├── Annotations
|
39 |
+
│ ├── Data
|
40 |
+
│ └── ImageSets
|
41 |
+
├── imagenet_object_localization_patched2019.tar.gz
|
42 |
+
├── LOC_sample_submission.csv
|
43 |
+
├── LOC_synset_mapping.txt
|
44 |
+
├── LOC_train_solution.csv
|
45 |
+
└── LOC_val_solution.csv
|
46 |
+
```
|
47 |
+
|
48 |
+
### Evaluation
|
49 |
+
|
50 |
+
Run evaluation with the following command:
|
51 |
+
|
52 |
+
```shell
|
53 |
+
python eval.py -m mobilenet -d imagenet -dr /path/to/imagenet
|
54 |
+
```
|
55 |
+
|
tools/eval/datasets/__init__.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .imagenet import ImageNet
|
2 |
+
|
3 |
+
class Registery:
|
4 |
+
def __init__(self, name):
|
5 |
+
self._name = name
|
6 |
+
self._dict = dict()
|
7 |
+
|
8 |
+
def get(self, key):
|
9 |
+
return self._dict[key]
|
10 |
+
|
11 |
+
def register(self, item):
|
12 |
+
self._dict[item.__name__] = item
|
13 |
+
|
14 |
+
DATASETS = Registery("Datasets")
|
15 |
+
DATASETS.register(ImageNet)
|
tools/eval/datasets/imagenet.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import cv2 as cv
|
5 |
+
|
6 |
+
from tqdm import tqdm
|
7 |
+
|
8 |
+
class ImageNet:
|
9 |
+
def __init__(self, root, size=224):
|
10 |
+
self.root = root
|
11 |
+
self.size = size
|
12 |
+
self.top1_acc = -1
|
13 |
+
self.top5_acc = -1
|
14 |
+
|
15 |
+
self.root_val = os.path.join(self.root, "ILSVRC", "Data", "CLS-LOC", "val")
|
16 |
+
self.val_label_file = os.path.join(self.root, "caffe_ilsvrc12", "val.txt")
|
17 |
+
|
18 |
+
self.val_label = self.load_label(self.val_label_file)
|
19 |
+
|
20 |
+
@property
|
21 |
+
def name(self):
|
22 |
+
return self.__class__.__name__
|
23 |
+
|
24 |
+
def load_label(self, label_file):
|
25 |
+
label = list()
|
26 |
+
with open(label_file, "r") as f:
|
27 |
+
for line in f:
|
28 |
+
line = line.strip()
|
29 |
+
key, value = line.split()
|
30 |
+
|
31 |
+
key = os.path.join(self.root_val, key)
|
32 |
+
value = int(value)
|
33 |
+
|
34 |
+
label.append([key, value])
|
35 |
+
|
36 |
+
return label
|
37 |
+
|
38 |
+
def eval(self, model):
|
39 |
+
top_1_hits = 0
|
40 |
+
top_5_hits = 0
|
41 |
+
pbar = tqdm(self.val_label)
|
42 |
+
for fn, label in pbar:
|
43 |
+
pbar.set_description("Evaluating {} with {} val set".format(model.name, self.name))
|
44 |
+
|
45 |
+
img = cv.imread(fn)
|
46 |
+
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
|
47 |
+
img = cv.resize(img, dsize=(256, 256))
|
48 |
+
img = img[16:240, 16:240, :]
|
49 |
+
|
50 |
+
pred = model.infer(img)
|
51 |
+
if label == pred[0][0]:
|
52 |
+
top_1_hits += 1
|
53 |
+
if label in pred[0]:
|
54 |
+
top_5_hits += 1
|
55 |
+
|
56 |
+
self.top1_acc = top_1_hits/(len(self.val_label) * 1.0)
|
57 |
+
self.top5_acc = top_5_hits/(len(self.val_label) * 1.0)
|
58 |
+
|
59 |
+
def get_result(self):
|
60 |
+
return self.top1_acc, self.top5_acc
|
61 |
+
|
62 |
+
def print_result(self):
|
63 |
+
print("Top-1 Accuracy: {:.2f}%; Top-5 Accuracy: {:.2f}%".format(self.top1_acc*100, self.top5_acc*100))
|
64 |
+
|
tools/eval/eval.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import argparse
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import cv2 as cv
|
7 |
+
|
8 |
+
from datasets import DATASETS
|
9 |
+
|
10 |
+
if "PYTHONPATH" in os.environ:
|
11 |
+
root_dir = os.environ["PYTHONPATH"]
|
12 |
+
else:
|
13 |
+
root_dir = os.path.join("..", "..")
|
14 |
+
sys.path.append(root_dir)
|
15 |
+
from models import MODELS
|
16 |
+
|
17 |
+
parser = argparse.ArgumentParser("Evaluation with OpenCV on different models in the zoo.")
|
18 |
+
parser.add_argument("--model", "-m", type=str, required=True, help="model name")
|
19 |
+
parser.add_argument("--dataset", "-d", type=str, required=True, help="Dataset name")
|
20 |
+
parser.add_argument("--dataset_root", "-dr", type=str, required=True, help="Root directory of given dataset")
|
21 |
+
args = parser.parse_args()
|
22 |
+
|
23 |
+
models = dict(
|
24 |
+
mobilenetv1=dict(
|
25 |
+
name="MobileNetV1",
|
26 |
+
topic="image_classification",
|
27 |
+
modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr.onnx"),
|
28 |
+
topK=5),
|
29 |
+
mobilenetv1_q=dict(
|
30 |
+
name="MobileNetV1",
|
31 |
+
topic="image_classification",
|
32 |
+
modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv1_2022apr-int8-quantized.onnx"),
|
33 |
+
topK=5),
|
34 |
+
mobilenetv2=dict(
|
35 |
+
name="MobileNetV2",
|
36 |
+
topic="image_classification",
|
37 |
+
modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr.onnx"),
|
38 |
+
topK=5),
|
39 |
+
mobilenetv2_q=dict(
|
40 |
+
name="MobileNetV2",
|
41 |
+
topic="image_classification",
|
42 |
+
modelPath=os.path.join(root_dir, "models/image_classification_mobilenet/image_classification_mobilenetv2_2022apr-int8-quantized.onnx"),
|
43 |
+
topK=5),
|
44 |
+
ppresnet=dict(
|
45 |
+
name="PPResNet",
|
46 |
+
topic="image_classification",
|
47 |
+
modelPath=os.path.join(root_dir, "models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx"),
|
48 |
+
topK=5),
|
49 |
+
ppresnet_q=dict(
|
50 |
+
name="PPResNet",
|
51 |
+
topic="image_classification",
|
52 |
+
modelPath=os.path.join(root_dir, "models/image_classification_ppresnet/image_classification_ppresnet50_2022jan-act_int8-wt_int8-quantized.onnx"),
|
53 |
+
topK=5),
|
54 |
+
)
|
55 |
+
|
56 |
+
datasets = dict(
|
57 |
+
imagenet=dict(
|
58 |
+
name="ImageNet",
|
59 |
+
topic="image_classification",
|
60 |
+
size=224),
|
61 |
+
)
|
62 |
+
|
63 |
+
def main(args):
|
64 |
+
# Instantiate model
|
65 |
+
model_key = args.model.lower()
|
66 |
+
assert model_key in models
|
67 |
+
|
68 |
+
model_name = models[model_key].pop("name")
|
69 |
+
model_topic = models[model_key].pop("topic")
|
70 |
+
model = MODELS.get(model_name)(**models[model_key])
|
71 |
+
|
72 |
+
# Instantiate dataset
|
73 |
+
dataset_key = args.dataset.lower()
|
74 |
+
assert dataset_key in datasets
|
75 |
+
|
76 |
+
dataset_name = datasets[dataset_key].pop("name")
|
77 |
+
dataset_topic = datasets[dataset_key].pop("topic")
|
78 |
+
dataset = DATASETS.get(dataset_name)(root=args.dataset_root, **datasets[dataset_key])
|
79 |
+
|
80 |
+
# Check if model_topic matches dataset_topic
|
81 |
+
assert model_topic == dataset_topic
|
82 |
+
|
83 |
+
# Run evaluation
|
84 |
+
dataset.eval(model)
|
85 |
+
dataset.print_result()
|
86 |
+
|
87 |
+
|
88 |
+
if __name__ == "__main__":
|
89 |
+
main(args)
|