File size: 4,899 Bytes
42310ef 0199e9f 42310ef 0199e9f 00c0329 18103a9 00c0329 42310ef 0199e9f 00c0329 0199e9f 00c0329 0199e9f 42310ef de9c40f 42310ef de9c40f 42310ef 86cd32d 42310ef 00c0329 42310ef 23d8387 0199e9f 23d8387 0199e9f 42310ef 00c0329 42310ef 00c0329 42310ef e0b3895 42310ef 0199e9f 42310ef de9c40f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import os
import argparse
import yaml
import numpy as np
import cv2 as cv
from models import MODELS
from utils import METRICS, DATALOADERS
parser = argparse.ArgumentParser("Benchmarks for OpenCV Zoo.")
parser.add_argument('--cfg', '-c', type=str,
help='Benchmarking on the given config.')
args = parser.parse_args()
def build_from_cfg(cfg, registery, key=None, name=None):
if key is not None:
obj_name = cfg.pop(key)
obj = registery.get(obj_name)
return obj(**cfg)
elif name is not None:
obj = registery.get(name)
return obj(**cfg)
else:
raise NotImplementedError()
def prepend_pythonpath(cfg):
for k, v in cfg.items():
if isinstance(v, dict):
prepend_pythonpath(v)
else:
if 'path' in k.lower():
cfg[k] = os.path.join(os.environ['PYTHONPATH'], v)
class Benchmark:
def __init__(self, **kwargs):
self._type = kwargs.pop('type', None)
if self._type is None:
self._type = 'Base'
print('Benchmark[\'type\'] is omitted, set to \'Base\' by default.')
self._data_dict = kwargs.pop('data', None)
assert self._data_dict, 'Benchmark[\'data\'] cannot be empty and must have path and files.'
if 'type' in self._data_dict:
self._dataloader = build_from_cfg(self._data_dict, registery=DATALOADERS, key='type')
else:
self._dataloader = build_from_cfg(self._data_dict, registery=DATALOADERS, name=self._type)
self._metric_dict = kwargs.pop('metric', None)
assert self._metric_dict, 'Benchmark[\'metric\'] cannot be empty.'
if 'type' in self._metric_dict:
self._metric = build_from_cfg(self._metric_dict, registery=METRICS, key='type')
else:
self._metric = build_from_cfg(self._metric_dict, registery=METRICS, name=self._type)
backend_id = kwargs.pop('backend', 'default')
available_backends = dict(
default=cv.dnn.DNN_BACKEND_DEFAULT,
# halide=cv.dnn.DNN_BACKEND_HALIDE,
# inference_engine=cv.dnn.DNN_BACKEND_INFERENCE_ENGINE,
opencv=cv.dnn.DNN_BACKEND_OPENCV,
# vkcom=cv.dnn.DNN_BACKEND_VKCOM,
cuda=cv.dnn.DNN_BACKEND_CUDA,
)
target_id = kwargs.pop('target', 'cpu')
available_targets = dict(
cpu=cv.dnn.DNN_TARGET_CPU,
# opencl=cv.dnn.DNN_TARGET_OPENCL,
# opencl_fp16=cv.dnn.DNN_TARGET_OPENCL_FP16,
# myriad=cv.dnn.DNN_TARGET_MYRIAD,
# vulkan=cv.dnn.DNN_TARGET_VULKAN,
# fpga=cv.dnn.DNN_TARGET_FPGA,
cuda=cv.dnn.DNN_TARGET_CUDA,
cuda_fp16=cv.dnn.DNN_TARGET_CUDA_FP16,
# hddl=cv.dnn.DNN_TARGET_HDDL,
)
# add extra backends & targets
try:
available_backends['timvx'] = cv.dnn.DNN_BACKEND_TIMVX
available_targets['npu'] = cv.dnn.DNN_TARGET_NPU
except:
print('OpenCV is not compiled with TIM-VX backend enbaled. See https://github.com/opencv/opencv/wiki/TIM-VX-Backend-For-Running-OpenCV-On-NPU for more details on how to enable TIM-VX backend.')
self._backend = available_backends[backend_id]
self._target = available_targets[target_id]
self._benchmark_results = dict()
def run(self, model):
model.setBackend(self._backend)
model.setTarget(self._target)
for idx, data in enumerate(self._dataloader):
filename, input_data = data[:2]
if filename not in self._benchmark_results:
self._benchmark_results[filename] = dict()
if isinstance(input_data, np.ndarray):
size = [input_data.shape[1], input_data.shape[0]]
else:
size = input_data.getFrameSize()
self._benchmark_results[filename][str(size)] = self._metric.forward(model, *data[1:])
def printResults(self):
for imgName, results in self._benchmark_results.items():
print(' image: {}'.format(imgName))
total_latency = 0
for key, latency in results.items():
total_latency += latency
print(' {}, latency ({}): {:.4f} ms'.format(key, self._metric.getReduction(), latency))
if __name__ == '__main__':
assert args.cfg.endswith('yaml'), 'Currently support configs of yaml format only.'
with open(args.cfg, 'r') as f:
cfg = yaml.safe_load(f)
# prepend PYTHONPATH to each path
prepend_pythonpath(cfg)
# Instantiate benchmarking
benchmark = Benchmark(**cfg['Benchmark'])
# Instantiate model
model = build_from_cfg(cfg=cfg['Model'], registery=MODELS, key='name')
# Run benchmarking
print('Benchmarking {}:'.format(model.name))
benchmark.run(model)
benchmark.printResults()
|