ytfeng commited on
Commit
2c09648
·
1 Parent(s): 8d3a72f

Limit combinations of backends and targets in demos and benchmark (#145)

Browse files

* limit backend and target combination in demos and benchmark

* simpler version checking

Files changed (2) hide show
  1. demo.py +41 -32
  2. lpd_yunet.py +2 -4
demo.py CHANGED
@@ -5,37 +5,44 @@ import cv2 as cv
5
 
6
  from lpd_yunet import LPD_YuNet
7
 
8
- def str2bool(v):
9
- if v.lower() in ['on', 'yes', 'true', 'y', 't']:
10
- return True
11
- elif v.lower() in ['off', 'no', 'false', 'n', 'f']:
12
- return False
13
- else:
14
- raise NotImplementedError
15
-
16
- backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]
17
- targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]
18
- help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
19
- help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
20
- try:
21
- backends += [cv.dnn.DNN_BACKEND_TIMVX]
22
- targets += [cv.dnn.DNN_TARGET_NPU]
23
- help_msg_backends += "; {:d}: TIMVX"
24
- help_msg_targets += "; {:d}: NPU"
25
- except:
26
- print('This version of OpenCV does not support TIM-VX and NPU. Visit https://github.com/opencv/opencv/wiki/TIM-VX-Backend-For-Running-OpenCV-On-NPU for more information.')
27
 
28
  parser = argparse.ArgumentParser(description='LPD-YuNet for License Plate Detection')
29
- parser.add_argument('--input', '-i', type=str, help='Usage: Set path to the input image. Omit for using default camera.')
30
- parser.add_argument('--model', '-m', type=str, default='license_plate_detection_lpd_yunet_2022may.onnx', help='Usage: Set model path, defaults to license_plate_detection_lpd_yunet_2022may.onnx.')
31
- parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
32
- parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
33
- parser.add_argument('--conf_threshold', type=float, default=0.9, help='Usage: Set the minimum needed confidence for the model to identify a license plate, defaults to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.')
34
- parser.add_argument('--nms_threshold', type=float, default=0.3, help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3. Suppress bounding boxes of iou >= nms_threshold.')
35
- parser.add_argument('--top_k', type=int, default=5000, help='Usage: Keep top_k bounding boxes before NMS.')
36
- parser.add_argument('--keep_top_k', type=int, default=750, help='Usage: Keep keep_top_k bounding boxes after NMS.')
37
- parser.add_argument('--save', '-s', type=str2bool, default=False, help='Usage: Set “True” to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input. Default will be set to “False”.')
38
- parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Usage: Default will be set to “True” and will open a new window to show results. Set to “False” to stop visualizations from being shown. Invalid in case of camera input.')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  args = parser.parse_args()
40
 
41
  def visualize(image, dets, line_color=(0, 255, 0), text_color=(0, 0, 255), fps=None):
@@ -57,14 +64,17 @@ def visualize(image, dets, line_color=(0, 255, 0), text_color=(0, 0, 255), fps=N
57
  return output
58
 
59
  if __name__ == '__main__':
 
 
 
60
  # Instantiate LPD-YuNet
61
  model = LPD_YuNet(modelPath=args.model,
62
  confThreshold=args.conf_threshold,
63
  nmsThreshold=args.nms_threshold,
64
  topK=args.top_k,
65
  keepTopK=args.keep_top_k,
66
- backendId=args.backend,
67
- targetId=args.target)
68
 
69
  # If input is an image
70
  if args.input is not None:
@@ -117,4 +127,3 @@ if __name__ == '__main__':
117
  cv.imshow('LPD-YuNet Demo', frame)
118
 
119
  tm.reset()
120
-
 
5
 
6
  from lpd_yunet import LPD_YuNet
7
 
8
+ # Check OpenCV version
9
+ assert cv.__version__ >= "4.7.0", \
10
+ "Please install latest opencv-python to try this demo: python3 -m pip install --upgrade opencv-python"
11
+
12
+ # Valid combinations of backends and targets
13
+ backend_target_pairs = [
14
+ [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
15
+ [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA],
16
+ [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16],
17
+ [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU],
18
+ [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU]
19
+ ]
 
 
 
 
 
 
 
20
 
21
  parser = argparse.ArgumentParser(description='LPD-YuNet for License Plate Detection')
22
+ parser.add_argument('--input', '-i', type=str,
23
+ help='Usage: Set path to the input image. Omit for using default camera.')
24
+ parser.add_argument('--model', '-m', type=str, default='license_plate_detection_lpd_yunet_2023mar.onnx',
25
+ help='Usage: Set model path, defaults to license_plate_detection_lpd_yunet_2023mar.onnx.')
26
+ parser.add_argument('--backend_target', '-bt', type=int, default=0,
27
+ help='''Choose one of the backend-target pair to run this demo:
28
+ {:d}: (default) OpenCV implementation + CPU,
29
+ {:d}: CUDA + GPU (CUDA),
30
+ {:d}: CUDA + GPU (CUDA FP16),
31
+ {:d}: TIM-VX + NPU,
32
+ {:d}: CANN + NPU
33
+ '''.format(*[x for x in range(len(backend_target_pairs))]))
34
+ parser.add_argument('--conf_threshold', type=float, default=0.9,
35
+ help='Usage: Set the minimum needed confidence for the model to identify a license plate, defaults to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.')
36
+ parser.add_argument('--nms_threshold', type=float, default=0.3,
37
+ help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3. Suppress bounding boxes of iou >= nms_threshold.')
38
+ parser.add_argument('--top_k', type=int, default=5000,
39
+ help='Usage: Keep top_k bounding boxes before NMS.')
40
+ parser.add_argument('--keep_top_k', type=int, default=750,
41
+ help='Usage: Keep keep_top_k bounding boxes after NMS.')
42
+ parser.add_argument('--save', '-s', action='store_true',
43
+ help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.')
44
+ parser.add_argument('--vis', '-v', action='store_true',
45
+ help='Usage: Specify to open a new window to show results. Invalid in case of camera input.')
46
  args = parser.parse_args()
47
 
48
  def visualize(image, dets, line_color=(0, 255, 0), text_color=(0, 0, 255), fps=None):
 
64
  return output
65
 
66
  if __name__ == '__main__':
67
+ backend_id = backend_target_pairs[args.backend_target][0]
68
+ target_id = backend_target_pairs[args.backend_target][1]
69
+
70
  # Instantiate LPD-YuNet
71
  model = LPD_YuNet(modelPath=args.model,
72
  confThreshold=args.conf_threshold,
73
  nmsThreshold=args.nms_threshold,
74
  topK=args.top_k,
75
  keepTopK=args.keep_top_k,
76
+ backendId=backend_id,
77
+ targetId=target_id)
78
 
79
  # If input is an image
80
  if args.input is not None:
 
127
  cv.imshow('LPD-YuNet Demo', frame)
128
 
129
  tm.reset()
 
lpd_yunet.py CHANGED
@@ -28,12 +28,10 @@ class LPD_YuNet:
28
  def name(self):
29
  return self.__class__.__name__
30
 
31
- def setBackend(self, backendId):
32
  self.backend_id = backendId
33
- self.model.setPreferableBackend(self.backend_id)
34
-
35
- def setTarget(self, targetId):
36
  self.target_id = targetId
 
37
  self.model.setPreferableTarget(self.target_id)
38
 
39
  def setInputSize(self, inputSize):
 
28
  def name(self):
29
  return self.__class__.__name__
30
 
31
+ def setBackendAndTarget(self, backendId, targetId):
32
  self.backend_id = backendId
 
 
 
33
  self.target_id = targetId
34
+ self.model.setPreferableBackend(self.backend_id)
35
  self.model.setPreferableTarget(self.target_id)
36
 
37
  def setInputSize(self, inputSize):