ytfeng commited on
Commit
7c978be
·
1 Parent(s): 55d9fad

Remove DaSiamRPN since we have its superseder VitTrack now (#213)

Browse files
README.md CHANGED
@@ -71,9 +71,9 @@ Some examples are listed below. You can find more in the directory of each model
71
 
72
  ![yolox demo](./models/object_detection_yolox/example_outputs/3_res.jpg)
73
 
74
- ### Object Tracking with [DaSiamRPN](./models/object_tracking_dasiamrpn/)
75
 
76
- ![webcam demo](./models/object_tracking_dasiamrpn/example_outputs/dasiamrpn_demo.gif)
77
 
78
  ### Palm Detection with [MP-PalmDet](./models/palm_detection_mediapipe/)
79
 
 
71
 
72
  ![yolox demo](./models/object_detection_yolox/example_outputs/3_res.jpg)
73
 
74
+ ### Object Tracking with [VitTrack](./models/object_tracking_vittrack/)
75
 
76
+ ![webcam demo](./models/object_tracking_vittrack/example_outputs/vittrack_demo.gif)
77
 
78
  ### Palm Detection with [MP-PalmDet](./models/palm_detection_mediapipe/)
79
 
benchmark/README.md CHANGED
@@ -31,7 +31,7 @@ python benchmark.py --all --fp32
31
 
32
  # All configs but exclude some of them (fill with config name keywords, not sensitive to upper/lower case, seperate with colons)
33
  python benchmark.py --all --cfg_exclude wechat
34
- python benchmark.py --all --cfg_exclude wechat:dasiamrpn
35
 
36
  # All configs but exclude some of the models (fill with exact model names, sensitive to upper/lower case, seperate with colons)
37
  python benchmark.py --all --model_exclude license_plate_detection_lpd_yunet_2023mar_int8.onnx:human_segmentation_pphumanseg_2023mar_int8.onnx
@@ -94,7 +94,6 @@ mean median min input size model
94
  46.10 47.53 43.06 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
95
  144.89 149.58 125.71 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
96
  143.83 146.39 119.75 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
97
- 23.43 22.82 20.90 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
98
  12.99 13.11 12.14 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
99
  12.64 12.44 10.82 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
100
  12.64 11.83 11.03 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
@@ -149,7 +148,6 @@ mean median min input size model
149
  212.90 212.93 209.55 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
150
  1690.06 2303.34 1480.63 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
151
  1489.54 1435.48 1308.12 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
152
- 564.90 580.35 527.49 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
153
  356.63 357.29 354.42 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
154
  217.52 229.39 101.61 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
155
  198.63 198.25 196.68 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
@@ -205,7 +203,6 @@ mean median min input size model
205
  216.18 216.19 214.30 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
206
  1207.83 1208.71 1203.64 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
207
  1236.98 1250.21 1203.64 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
208
- 456.79 456.90 445.83 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
209
  124.89 125.25 124.53 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
210
  107.99 109.82 94.05 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
211
  108.41 108.33 107.91 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
@@ -244,7 +241,6 @@ mean median min input size model
244
  54.24 55.24 52.87 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx']
245
  63.63 63.43 63.32 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx']
246
  371.45 378.00 366.39 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
247
- 77.50 77.73 76.16 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
248
  33.85 33.90 33.61 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
249
  38.16 37.33 37.10 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
250
  91.65 91.98 89.90 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx']
@@ -275,7 +271,6 @@ mean median min input size model
275
  366.46 366.88 363.46 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx']
276
  163.06 163.34 161.77 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx']
277
  301.10 311.52 297.74 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
278
- 53.34 54.30 51.79 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
279
  149.37 149.95 148.01 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
280
  153.89 153.96 153.43 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
281
  44.29 44.03 43.62 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx']
@@ -323,7 +318,6 @@ mean median min input size model
323
  212.69 262.75 170.88 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
324
  1110.87 1112.27 1085.31 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
325
  1128.73 1157.12 1085.31 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
326
- 382.57 464.42 354.66 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
327
  147.01 144.01 139.27 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
328
  119.70 118.95 94.09 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
329
  107.63 107.09 105.61 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
@@ -404,7 +398,6 @@ mean median min input size model
404
  322.98 323.45 312.13 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
405
  1875.33 1877.53 1871.26 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
406
  1989.04 2005.25 1871.26 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
407
- 637.54 640.61 626.98 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
408
  159.80 159.62 159.40 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
409
  152.18 152.86 145.56 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
410
  145.83 145.77 145.45 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
@@ -426,7 +419,7 @@ mean median min input size model
426
  NPU (CANN):
427
 
428
  ```
429
- $ python3 benchmark.py --all --fp32 --cfg_exclude wechat:dasiamrpn:crnn --model_exclude pose_estimation_mediapipe_2023mar.onnx --cfg_overwrite_backend_target 4
430
  Benchmarking ...
431
  backend=cv.dnn.DNN_BACKEND_CANN
432
  target=cv.dnn.DNN_TARGET_NPU
@@ -485,7 +478,6 @@ mean median min input size model
485
  1903.82 1962.71 1533.79 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
486
  37604.10 37569.30 37502.48 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
487
  24229.20 25577.94 13483.54 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
488
- 14860.23 14988.15 14769.91 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
489
  1133.44 1131.54 1124.83 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
490
  883.96 919.07 655.33 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
491
  1430.98 1424.55 1415.68 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
@@ -541,7 +533,6 @@ mean median min input size model
541
  117.28 150.31 83.33 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
542
  553.58 558.76 535.47 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
543
  594.18 592.64 535.47 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
544
- 138.82 151.00 113.82 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
545
  56.35 55.73 55.25 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
546
  57.07 57.19 55.25 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
547
  47.94 48.41 47.05 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
@@ -596,7 +587,6 @@ mean median min input size model
596
  406.28 416.58 385.68 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
597
  2608.90 2612.42 2597.93 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
598
  2609.88 2609.39 2597.93 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
599
- 809.55 814.66 794.67 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
600
  228.95 228.74 228.35 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
601
  227.97 228.61 226.76 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
602
  192.29 192.26 191.74 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
@@ -653,7 +643,6 @@ mean median min input size model
653
  3002.36 3047.94 2655.38 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
654
  50678.08 50651.82 50651.19 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
655
  36249.71 37771.22 24606.37 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
656
- 19974.99 19984.80 19948.63 [1280, 720] DaSiamRPN with ['object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx', 'object_tracking_dasiamrpn_kernel_r1_2021nov.onnx', 'object_tracking_dasiamrpn_model_2021nov.onnx']
657
  1502.15 1501.98 1500.99 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
658
  1300.15 1320.44 1137.60 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
659
  1993.05 1993.98 1991.86 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
@@ -680,9 +669,9 @@ Specs: [details_cn](https://doc.rvspace.org/VisionFive2/PB/VisionFive_2/specific
680
 
681
  CPU:
682
  <!-- config wechat is excluded due to it needs building with opencv_contrib -->
683
- <!-- config dasiam is excluded due to opencv cannot find ffmpeg and its components -->
684
  ```
685
- $ python3 benchmark.py --all --cfg_exclude wechat:dasiam
686
  Benchmarking ...
687
  backend=cv.dnn.DNN_BACKEND_OPENCV
688
  target=cv.dnn.DNN_TARGET_CPU
 
31
 
32
  # All configs but exclude some of them (fill with config name keywords, not sensitive to upper/lower case, seperate with colons)
33
  python benchmark.py --all --cfg_exclude wechat
34
+ python benchmark.py --all --cfg_exclude wechat:crnn
35
 
36
  # All configs but exclude some of the models (fill with exact model names, sensitive to upper/lower case, seperate with colons)
37
  python benchmark.py --all --model_exclude license_plate_detection_lpd_yunet_2023mar_int8.onnx:human_segmentation_pphumanseg_2023mar_int8.onnx
 
94
  46.10 47.53 43.06 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
95
  144.89 149.58 125.71 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
96
  143.83 146.39 119.75 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
 
97
  12.99 13.11 12.14 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
98
  12.64 12.44 10.82 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
99
  12.64 11.83 11.03 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
 
148
  212.90 212.93 209.55 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
149
  1690.06 2303.34 1480.63 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
150
  1489.54 1435.48 1308.12 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
 
151
  356.63 357.29 354.42 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
152
  217.52 229.39 101.61 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
153
  198.63 198.25 196.68 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
 
203
  216.18 216.19 214.30 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
204
  1207.83 1208.71 1203.64 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
205
  1236.98 1250.21 1203.64 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
 
206
  124.89 125.25 124.53 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
207
  107.99 109.82 94.05 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
208
  108.41 108.33 107.91 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
 
241
  54.24 55.24 52.87 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx']
242
  63.63 63.43 63.32 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx']
243
  371.45 378.00 366.39 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
 
244
  33.85 33.90 33.61 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
245
  38.16 37.33 37.10 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
246
  91.65 91.98 89.90 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx']
 
271
  366.46 366.88 363.46 [320, 240] LPD_YuNet with ['license_plate_detection_lpd_yunet_2023mar.onnx']
272
  163.06 163.34 161.77 [416, 416] NanoDet with ['object_detection_nanodet_2022nov.onnx']
273
  301.10 311.52 297.74 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
 
274
  149.37 149.95 148.01 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
275
  153.89 153.96 153.43 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
276
  44.29 44.03 43.62 [128, 256] YoutuReID with ['person_reid_youtu_2021nov.onnx']
 
318
  212.69 262.75 170.88 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
319
  1110.87 1112.27 1085.31 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
320
  1128.73 1157.12 1085.31 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
 
321
  147.01 144.01 139.27 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
322
  119.70 118.95 94.09 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
323
  107.63 107.09 105.61 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
 
398
  322.98 323.45 312.13 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
399
  1875.33 1877.53 1871.26 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
400
  1989.04 2005.25 1871.26 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
 
401
  159.80 159.62 159.40 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
402
  152.18 152.86 145.56 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
403
  145.83 145.77 145.45 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
 
419
  NPU (CANN):
420
 
421
  ```
422
+ $ python3 benchmark.py --all --fp32 --cfg_exclude wechat:crnn --model_exclude pose_estimation_mediapipe_2023mar.onnx --cfg_overwrite_backend_target 4
423
  Benchmarking ...
424
  backend=cv.dnn.DNN_BACKEND_CANN
425
  target=cv.dnn.DNN_TARGET_NPU
 
478
  1903.82 1962.71 1533.79 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
479
  37604.10 37569.30 37502.48 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
480
  24229.20 25577.94 13483.54 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
 
481
  1133.44 1131.54 1124.83 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
482
  883.96 919.07 655.33 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
483
  1430.98 1424.55 1415.68 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
 
533
  117.28 150.31 83.33 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
534
  553.58 558.76 535.47 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
535
  594.18 592.64 535.47 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
 
536
  56.35 55.73 55.25 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
537
  57.07 57.19 55.25 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
538
  47.94 48.41 47.05 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
 
587
  406.28 416.58 385.68 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
588
  2608.90 2612.42 2597.93 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
589
  2609.88 2609.39 2597.93 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
 
590
  228.95 228.74 228.35 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
591
  227.97 228.61 226.76 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
592
  192.29 192.26 191.74 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
 
643
  3002.36 3047.94 2655.38 [416, 416] NanoDet with ['object_detection_nanodet_2022nov_int8.onnx']
644
  50678.08 50651.82 50651.19 [640, 640] YoloX with ['object_detection_yolox_2022nov.onnx']
645
  36249.71 37771.22 24606.37 [640, 640] YoloX with ['object_detection_yolox_2022nov_int8.onnx']
 
646
  1502.15 1501.98 1500.99 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb.onnx']
647
  1300.15 1320.44 1137.60 [192, 192] MPPalmDet with ['palm_detection_mediapipe_2023feb_int8.onnx']
648
  1993.05 1993.98 1991.86 [224, 224] MPPersonDet with ['person_detection_mediapipe_2023mar.onnx']
 
669
 
670
  CPU:
671
  <!-- config wechat is excluded due to it needs building with opencv_contrib -->
672
+ <!-- config vittrack is excluded due to opencv cannot find ffmpeg and its components -->
673
  ```
674
+ $ python3 benchmark.py --all --cfg_exclude wechat:vittrack
675
  Benchmarking ...
676
  backend=cv.dnn.DNN_BACKEND_OPENCV
677
  target=cv.dnn.DNN_TARGET_CPU
benchmark/color_table.svg CHANGED
benchmark/config/object_tracking_dasiamrpn.yaml DELETED
@@ -1,14 +0,0 @@
1
- Benchmark:
2
- name: "Object Tracking Benchmark"
3
- type: "Tracking"
4
- data:
5
- type: "TrackingVideoLoader"
6
- path: "data/object_tracking"
7
- files: ["throw_cup.mp4"]
8
- metric:
9
- type: "Tracking"
10
- backend: "default"
11
- target: "cpu"
12
-
13
- Model:
14
- name: "DaSiamRPN"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
benchmark/table_config.yaml CHANGED
@@ -122,13 +122,6 @@ Models:
122
  acceptable_time: 100
123
  keyword: "WeChatQRCode"
124
 
125
- - name: "DaSiamRPN"
126
- task: "Object Tracking"
127
- input_size: "1280x720"
128
- folder: "object_tracking_dasiamrpn"
129
- acceptable_time: 3000
130
- keyword: "object_tracking_dasiamrpn"
131
-
132
  - name: "YoutuReID"
133
  task: "Person Re-Identification"
134
  input_size: "128x256"
 
122
  acceptable_time: 100
123
  keyword: "WeChatQRCode"
124
 
 
 
 
 
 
 
 
125
  - name: "YoutuReID"
126
  task: "Person Re-Identification"
127
  input_size: "128x256"
models/__init__.py CHANGED
@@ -11,7 +11,6 @@ from .human_segmentation_pphumanseg.pphumanseg import PPHumanSeg
11
  from .person_detection_mediapipe.mp_persondet import MPPersonDet
12
  from .pose_estimation_mediapipe.mp_pose import MPPose
13
  from .qrcode_wechatqrcode.wechatqrcode import WeChatQRCode
14
- from .object_tracking_dasiamrpn.dasiamrpn import DaSiamRPN
15
  from .person_reid_youtureid.youtureid import YoutuReID
16
  from .image_classification_mobilenet.mobilenet import MobileNet
17
  from .palm_detection_mediapipe.mp_palmdet import MPPalmDet
@@ -85,7 +84,6 @@ MODELS.register(PPHumanSeg)
85
  MODELS.register(MPPersonDet)
86
  MODELS.register(MPPose)
87
  MODELS.register(WeChatQRCode)
88
- MODELS.register(DaSiamRPN)
89
  MODELS.register(YoutuReID)
90
  MODELS.register(MobileNet)
91
  MODELS.register(MPPalmDet)
 
11
  from .person_detection_mediapipe.mp_persondet import MPPersonDet
12
  from .pose_estimation_mediapipe.mp_pose import MPPose
13
  from .qrcode_wechatqrcode.wechatqrcode import WeChatQRCode
 
14
  from .person_reid_youtureid.youtureid import YoutuReID
15
  from .image_classification_mobilenet.mobilenet import MobileNet
16
  from .palm_detection_mediapipe.mp_palmdet import MPPalmDet
 
84
  MODELS.register(MPPersonDet)
85
  MODELS.register(MPPose)
86
  MODELS.register(WeChatQRCode)
 
87
  MODELS.register(YoutuReID)
88
  MODELS.register(MobileNet)
89
  MODELS.register(MPPalmDet)
models/object_tracking_dasiamrpn/LICENSE DELETED
@@ -1,202 +0,0 @@
1
-
2
- Apache License
3
- Version 2.0, January 2004
4
- http://www.apache.org/licenses/
5
-
6
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
-
8
- 1. Definitions.
9
-
10
- "License" shall mean the terms and conditions for use, reproduction,
11
- and distribution as defined by Sections 1 through 9 of this document.
12
-
13
- "Licensor" shall mean the copyright owner or entity authorized by
14
- the copyright owner that is granting the License.
15
-
16
- "Legal Entity" shall mean the union of the acting entity and all
17
- other entities that control, are controlled by, or are under common
18
- control with that entity. For the purposes of this definition,
19
- "control" means (i) the power, direct or indirect, to cause the
20
- direction or management of such entity, whether by contract or
21
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
- outstanding shares, or (iii) beneficial ownership of such entity.
23
-
24
- "You" (or "Your") shall mean an individual or Legal Entity
25
- exercising permissions granted by this License.
26
-
27
- "Source" form shall mean the preferred form for making modifications,
28
- including but not limited to software source code, documentation
29
- source, and configuration files.
30
-
31
- "Object" form shall mean any form resulting from mechanical
32
- transformation or translation of a Source form, including but
33
- not limited to compiled object code, generated documentation,
34
- and conversions to other media types.
35
-
36
- "Work" shall mean the work of authorship, whether in Source or
37
- Object form, made available under the License, as indicated by a
38
- copyright notice that is included in or attached to the work
39
- (an example is provided in the Appendix below).
40
-
41
- "Derivative Works" shall mean any work, whether in Source or Object
42
- form, that is based on (or derived from) the Work and for which the
43
- editorial revisions, annotations, elaborations, or other modifications
44
- represent, as a whole, an original work of authorship. For the purposes
45
- of this License, Derivative Works shall not include works that remain
46
- separable from, or merely link (or bind by name) to the interfaces of,
47
- the Work and Derivative Works thereof.
48
-
49
- "Contribution" shall mean any work of authorship, including
50
- the original version of the Work and any modifications or additions
51
- to that Work or Derivative Works thereof, that is intentionally
52
- submitted to Licensor for inclusion in the Work by the copyright owner
53
- or by an individual or Legal Entity authorized to submit on behalf of
54
- the copyright owner. For the purposes of this definition, "submitted"
55
- means any form of electronic, verbal, or written communication sent
56
- to the Licensor or its representatives, including but not limited to
57
- communication on electronic mailing lists, source code control systems,
58
- and issue tracking systems that are managed by, or on behalf of, the
59
- Licensor for the purpose of discussing and improving the Work, but
60
- excluding communication that is conspicuously marked or otherwise
61
- designated in writing by the copyright owner as "Not a Contribution."
62
-
63
- "Contributor" shall mean Licensor and any individual or Legal Entity
64
- on behalf of whom a Contribution has been received by Licensor and
65
- subsequently incorporated within the Work.
66
-
67
- 2. Grant of Copyright License. Subject to the terms and conditions of
68
- this License, each Contributor hereby grants to You a perpetual,
69
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
- copyright license to reproduce, prepare Derivative Works of,
71
- publicly display, publicly perform, sublicense, and distribute the
72
- Work and such Derivative Works in Source or Object form.
73
-
74
- 3. Grant of Patent License. Subject to the terms and conditions of
75
- this License, each Contributor hereby grants to You a perpetual,
76
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
- (except as stated in this section) patent license to make, have made,
78
- use, offer to sell, sell, import, and otherwise transfer the Work,
79
- where such license applies only to those patent claims licensable
80
- by such Contributor that are necessarily infringed by their
81
- Contribution(s) alone or by combination of their Contribution(s)
82
- with the Work to which such Contribution(s) was submitted. If You
83
- institute patent litigation against any entity (including a
84
- cross-claim or counterclaim in a lawsuit) alleging that the Work
85
- or a Contribution incorporated within the Work constitutes direct
86
- or contributory patent infringement, then any patent licenses
87
- granted to You under this License for that Work shall terminate
88
- as of the date such litigation is filed.
89
-
90
- 4. Redistribution. You may reproduce and distribute copies of the
91
- Work or Derivative Works thereof in any medium, with or without
92
- modifications, and in Source or Object form, provided that You
93
- meet the following conditions:
94
-
95
- (a) You must give any other recipients of the Work or
96
- Derivative Works a copy of this License; and
97
-
98
- (b) You must cause any modified files to carry prominent notices
99
- stating that You changed the files; and
100
-
101
- (c) You must retain, in the Source form of any Derivative Works
102
- that You distribute, all copyright, patent, trademark, and
103
- attribution notices from the Source form of the Work,
104
- excluding those notices that do not pertain to any part of
105
- the Derivative Works; and
106
-
107
- (d) If the Work includes a "NOTICE" text file as part of its
108
- distribution, then any Derivative Works that You distribute must
109
- include a readable copy of the attribution notices contained
110
- within such NOTICE file, excluding those notices that do not
111
- pertain to any part of the Derivative Works, in at least one
112
- of the following places: within a NOTICE text file distributed
113
- as part of the Derivative Works; within the Source form or
114
- documentation, if provided along with the Derivative Works; or,
115
- within a display generated by the Derivative Works, if and
116
- wherever such third-party notices normally appear. The contents
117
- of the NOTICE file are for informational purposes only and
118
- do not modify the License. You may add Your own attribution
119
- notices within Derivative Works that You distribute, alongside
120
- or as an addendum to the NOTICE text from the Work, provided
121
- that such additional attribution notices cannot be construed
122
- as modifying the License.
123
-
124
- You may add Your own copyright statement to Your modifications and
125
- may provide additional or different license terms and conditions
126
- for use, reproduction, or distribution of Your modifications, or
127
- for any such Derivative Works as a whole, provided Your use,
128
- reproduction, and distribution of the Work otherwise complies with
129
- the conditions stated in this License.
130
-
131
- 5. Submission of Contributions. Unless You explicitly state otherwise,
132
- any Contribution intentionally submitted for inclusion in the Work
133
- by You to the Licensor shall be under the terms and conditions of
134
- this License, without any additional terms or conditions.
135
- Notwithstanding the above, nothing herein shall supersede or modify
136
- the terms of any separate license agreement you may have executed
137
- with Licensor regarding such Contributions.
138
-
139
- 6. Trademarks. This License does not grant permission to use the trade
140
- names, trademarks, service marks, or product names of the Licensor,
141
- except as required for reasonable and customary use in describing the
142
- origin of the Work and reproducing the content of the NOTICE file.
143
-
144
- 7. Disclaimer of Warranty. Unless required by applicable law or
145
- agreed to in writing, Licensor provides the Work (and each
146
- Contributor provides its Contributions) on an "AS IS" BASIS,
147
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
- implied, including, without limitation, any warranties or conditions
149
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
- PARTICULAR PURPOSE. You are solely responsible for determining the
151
- appropriateness of using or redistributing the Work and assume any
152
- risks associated with Your exercise of permissions under this License.
153
-
154
- 8. Limitation of Liability. In no event and under no legal theory,
155
- whether in tort (including negligence), contract, or otherwise,
156
- unless required by applicable law (such as deliberate and grossly
157
- negligent acts) or agreed to in writing, shall any Contributor be
158
- liable to You for damages, including any direct, indirect, special,
159
- incidental, or consequential damages of any character arising as a
160
- result of this License or out of the use or inability to use the
161
- Work (including but not limited to damages for loss of goodwill,
162
- work stoppage, computer failure or malfunction, or any and all
163
- other commercial damages or losses), even if such Contributor
164
- has been advised of the possibility of such damages.
165
-
166
- 9. Accepting Warranty or Additional Liability. While redistributing
167
- the Work or Derivative Works thereof, You may choose to offer,
168
- and charge a fee for, acceptance of support, warranty, indemnity,
169
- or other liability obligations and/or rights consistent with this
170
- License. However, in accepting such obligations, You may act only
171
- on Your own behalf and on Your sole responsibility, not on behalf
172
- of any other Contributor, and only if You agree to indemnify,
173
- defend, and hold each Contributor harmless for any liability
174
- incurred by, or claims asserted against, such Contributor by reason
175
- of your accepting any such warranty or additional liability.
176
-
177
- END OF TERMS AND CONDITIONS
178
-
179
- APPENDIX: How to apply the Apache License to your work.
180
-
181
- To apply the Apache License to your work, attach the following
182
- boilerplate notice, with the fields enclosed by brackets "[]"
183
- replaced with your own identifying information. (Don't include
184
- the brackets!) The text should be enclosed in the appropriate
185
- comment syntax for the file format. We also recommend that a
186
- file or class name and description of purpose be included on the
187
- same "printed page" as the copyright notice for easier
188
- identification within third-party archives.
189
-
190
- Copyright [yyyy] [name of copyright owner]
191
-
192
- Licensed under the Apache License, Version 2.0 (the "License");
193
- you may not use this file except in compliance with the License.
194
- You may obtain a copy of the License at
195
-
196
- http://www.apache.org/licenses/LICENSE-2.0
197
-
198
- Unless required by applicable law or agreed to in writing, software
199
- distributed under the License is distributed on an "AS IS" BASIS,
200
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
- See the License for the specific language governing permissions and
202
- limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/object_tracking_dasiamrpn/README.md DELETED
@@ -1,37 +0,0 @@
1
- # DaSiamRPN
2
-
3
- [Distractor-aware Siamese Networks for Visual Object Tracking](https://arxiv.org/abs/1808.06048)
4
-
5
- Note:
6
-
7
- - Model source: [opencv/samples/dnn/diasiamrpn_tracker.cpp](https://github.com/opencv/opencv/blob/ceb94d52a104c0c1287a43dfa6ba72705fb78ac1/samples/dnn/dasiamrpn_tracker.cpp#L5-L7)
8
- - Visit https://github.com/foolwood/DaSiamRPN for training details.
9
-
10
- ## Demo
11
-
12
- Run the following command to try the demo:
13
-
14
- ```shell
15
- # track on camera input
16
- python demo.py
17
- # track on video input
18
- python demo.py --input /path/to/video -v
19
-
20
- # get help regarding various parameters
21
- python demo.py --help
22
- ```
23
-
24
- ### Example outputs
25
-
26
- ![webcam demo](./example_outputs/dasiamrpn_demo.gif)
27
-
28
- ## License
29
-
30
- All files in this directory are licensed under [Apache 2.0 License](./LICENSE).
31
-
32
- ## Reference:
33
-
34
- - DaSiamRPN Official Repository: https://github.com/foolwood/DaSiamRPN
35
- - Paper: https://arxiv.org/abs/1808.06048
36
- - OpenCV API `TrackerDaSiamRPN` Doc: https://docs.opencv.org/4.x/de/d93/classcv_1_1TrackerDaSiamRPN.html
37
- - OpenCV Sample: https://github.com/opencv/opencv/blob/4.x/samples/dnn/dasiamrpn_tracker.cpp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/object_tracking_dasiamrpn/dasiamrpn.py DELETED
@@ -1,48 +0,0 @@
1
- # This file is part of OpenCV Zoo project.
2
- # It is subject to the license terms in the LICENSE file found in the same directory.
3
- #
4
- # Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
5
- # Third party copyrights are property of their respective owners.
6
-
7
- import numpy as np
8
- import cv2 as cv
9
-
10
- class DaSiamRPN:
11
- def __init__(self, kernel_cls1_path, kernel_r1_path, model_path, backend_id=0, target_id=0):
12
- self._model_path = model_path
13
- self._kernel_cls1_path = kernel_cls1_path
14
- self._kernel_r1_path = kernel_r1_path
15
- self._backend_id = backend_id
16
- self._target_id = target_id
17
-
18
- self._param = cv.TrackerDaSiamRPN_Params()
19
- self._param.model = self._model_path
20
- self._param.kernel_cls1 = self._kernel_cls1_path
21
- self._param.kernel_r1 = self._kernel_r1_path
22
- self._param.backend = self._backend_id
23
- self._param.target = self._target_id
24
- self._model = cv.TrackerDaSiamRPN.create(self._param)
25
-
26
- @property
27
- def name(self):
28
- return self.__class__.__name__
29
-
30
- def setBackendAndTarget(self, backendId, targetId):
31
- self._backend_id = backendId
32
- self._target_id = targetId
33
-
34
- self._param = cv.TrackerDaSiamRPN_Params()
35
- self._param.model = self._model_path
36
- self._param.kernel_cls1 = self._kernel_cls1_path
37
- self._param.kernel_r1 = self._kernel_r1_path
38
- self._param.backend = self._backend_id
39
- self._param.target = self._target_id
40
- self._model = cv.TrackerDaSiamRPN.create(self._param)
41
-
42
- def init(self, image, roi):
43
- self._model.init(image, roi)
44
-
45
- def infer(self, image):
46
- isLocated, bbox = self._model.update(image)
47
- score = self._model.getTrackingScore()
48
- return isLocated, bbox, score
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/object_tracking_dasiamrpn/demo.py DELETED
@@ -1,118 +0,0 @@
1
- # This file is part of OpenCV Zoo project.
2
- # It is subject to the license terms in the LICENSE file found in the same directory.
3
- #
4
- # Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
5
- # Third party copyrights are property of their respective owners.
6
-
7
- import argparse
8
-
9
- import numpy as np
10
- import cv2 as cv
11
-
12
- from dasiamrpn import DaSiamRPN
13
-
14
- # Check OpenCV version
15
- assert cv.__version__ >= "4.8.0", \
16
- "Please install latest opencv-python to try this demo: python3 -m pip install --upgrade opencv-python"
17
-
18
- # Valid combinations of backends and targets
19
- backend_target_pairs = [
20
- [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
21
- [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA],
22
- [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16],
23
- [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU],
24
- [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU]
25
- ]
26
-
27
- parser = argparse.ArgumentParser(
28
- description="Distractor-aware Siamese Networks for Visual Object Tracking (https://arxiv.org/abs/1808.06048)")
29
- parser.add_argument('--input', '-i', type=str,
30
- help='Usage: Set path to the input video. Omit for using default camera.')
31
- parser.add_argument('--model_path', type=str, default='object_tracking_dasiamrpn_model_2021nov.onnx',
32
- help='Usage: Set model path, defaults to object_tracking_dasiamrpn_model_2021nov.onnx.')
33
- parser.add_argument('--kernel_cls1_path', type=str, default='object_tracking_dasiamrpn_kernel_cls1_2021nov.onnx',
34
- help='Usage: Set path to dasiamrpn_kernel_cls1.onnx.')
35
- parser.add_argument('--kernel_r1_path', type=str, default='object_tracking_dasiamrpn_kernel_r1_2021nov.onnx',
36
- help='Usage: Set path to dasiamrpn_kernel_r1.onnx.')
37
- parser.add_argument('--backend_target', '-bt', type=int, default=0,
38
- help='''Choose one of the backend-target pair to run this demo:
39
- {:d}: (default) OpenCV implementation + CPU,
40
- {:d}: CUDA + GPU (CUDA),
41
- {:d}: CUDA + GPU (CUDA FP16),
42
- {:d}: TIM-VX + NPU,
43
- {:d}: CANN + NPU
44
- '''.format(*[x for x in range(len(backend_target_pairs))]))
45
- parser.add_argument('--save', '-s', action='store_true',
46
- help='Usage: Specify to save a file with results. Invalid in case of camera input.')
47
- parser.add_argument('--vis', '-v', action='store_true',
48
- help='Usage: Specify to open a new window to show results. Invalid in case of camera input.')
49
- args = parser.parse_args()
50
-
51
- def visualize(image, bbox, score, isLocated, fps=None, box_color=(0, 255, 0),text_color=(0, 255, 0), fontScale = 1, fontSize = 1):
52
- output = image.copy()
53
- h, w, _ = output.shape
54
-
55
- if fps is not None:
56
- cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 30), cv.FONT_HERSHEY_DUPLEX, fontScale, text_color, fontSize)
57
-
58
- if isLocated and score >= 0.6:
59
- # bbox: Tuple of length 4
60
- x, y, w, h = bbox
61
- cv.rectangle(output, (x, y), (x+w, y+h), box_color, 2)
62
- cv.putText(output, '{:.2f}'.format(score), (x, y+20), cv.FONT_HERSHEY_DUPLEX, fontScale, text_color, fontSize)
63
- else:
64
- text_size, baseline = cv.getTextSize('Target lost!', cv.FONT_HERSHEY_DUPLEX, fontScale, fontSize)
65
- text_x = int((w - text_size[0]) / 2)
66
- text_y = int((h - text_size[1]) / 2)
67
- cv.putText(output, 'Target lost!', (text_x, text_y), cv.FONT_HERSHEY_DUPLEX, fontScale, (0, 0, 255), fontSize)
68
-
69
- return output
70
-
71
- if __name__ == '__main__':
72
- backend_id = backend_target_pairs[args.backend_target][0]
73
- target_id = backend_target_pairs[args.backend_target][1]
74
-
75
- # Instantiate DaSiamRPN
76
- model = DaSiamRPN(
77
- kernel_cls1_path=args.kernel_cls1_path,
78
- kernel_r1_path=args.kernel_r1_path,
79
- model_path=args.model_path,
80
- backend_id=backend_id,
81
- target_id=target_id)
82
-
83
- # Read from args.input
84
- _input = args.input
85
- if args.input is None:
86
- device_id = 0
87
- _input = device_id
88
- video = cv.VideoCapture(_input)
89
-
90
- # Select an object
91
- has_frame, first_frame = video.read()
92
- if not has_frame:
93
- print('No frames grabbed!')
94
- exit()
95
- first_frame_copy = first_frame.copy()
96
- cv.putText(first_frame_copy, "1. Drag a bounding box to track.", (0, 15), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
97
- cv.putText(first_frame_copy, "2. Press ENTER to confirm", (0, 35), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
98
- roi = cv.selectROI('DaSiamRPN Demo', first_frame_copy)
99
- print("Selected ROI: {}".format(roi))
100
-
101
- # Init tracker with ROI
102
- model.init(first_frame, roi)
103
-
104
- # Track frame by frame
105
- tm = cv.TickMeter()
106
- while cv.waitKey(1) < 0:
107
- has_frame, frame = video.read()
108
- if not has_frame:
109
- print('End of video')
110
- break
111
- # Inference
112
- tm.start()
113
- isLocated, bbox, score = model.infer(frame)
114
- tm.stop()
115
- # Visualize
116
- frame = visualize(frame, bbox, score, isLocated, fps=tm.getFPS())
117
- cv.imshow('DaSiamRPN Demo', frame)
118
- tm.reset()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tools/quantize/quantize-ort.py CHANGED
@@ -102,7 +102,7 @@ models=dict(
102
  ppresnet50=Quantize(model_path='../../models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx',
103
  calibration_image_dir='../../benchmark/data/image_classification',
104
  transforms=Compose([Resize(size=(224, 224))])),
105
- # TBD: DaSiamRPN
106
  youtureid=Quantize(model_path='../../models/person_reid_youtureid/person_reid_youtu_2021nov.onnx',
107
  calibration_image_dir='../../benchmark/data/person_reid',
108
  transforms=Compose([Resize(size=(128, 256))])),
 
102
  ppresnet50=Quantize(model_path='../../models/image_classification_ppresnet/image_classification_ppresnet50_2022jan.onnx',
103
  calibration_image_dir='../../benchmark/data/image_classification',
104
  transforms=Compose([Resize(size=(224, 224))])),
105
+ # TBD: VitTrack
106
  youtureid=Quantize(model_path='../../models/person_reid_youtureid/person_reid_youtu_2021nov.onnx',
107
  calibration_image_dir='../../benchmark/data/person_reid',
108
  transforms=Compose([Resize(size=(128, 256))])),