lpylpy0514 commited on
Commit
435e017
·
1 Parent(s): 556a873

Merge pull request #194 from lpylpy0514:main

Browse files

Add VIT track model and demo #194

GSOC Realtime tracking model
opencv repo PR link is [here](https://github.com/opencv/opencv/pull/24201)

.gitattributes CHANGED
@@ -20,3 +20,4 @@
20
  *.gif filter=lfs diff=lfs merge=lfs -text
21
  *.png filter=lfs diff=lfs merge=lfs -text
22
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
20
  *.gif filter=lfs diff=lfs merge=lfs -text
21
  *.png filter=lfs diff=lfs merge=lfs -text
22
  *.webp filter=lfs diff=lfs merge=lfs -text
23
+
models/object_tracking_vittrack/README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # VIT tracker
2
+
3
+ VIT tracker(vision transformer tracker) is a much better model for real-time object tracking. VIT tracker can achieve speeds exceeding nanotrack by 20% in single-threaded mode with ARM chip, and the advantage becomes even more pronounced in multi-threaded mode. In addition, on the dataset, vit tracker demonstrates better performance compared to nanotrack. Moreover, vit trackerprovides confidence values during the tracking process, which can be used to determine if the tracking is currently lost.
4
+
5
+ video demo: https://youtu.be/MJiPnu1ZQRI
6
+ In target tracking tasks, the score is an important indicator that can indicate whether the current target is lost. In the video, vit tracker can track the target and display the current score in the upper left corner of the video. When the target is lost, the score drops significantly. While nanotrack will only return 0.9 score in any situation, so that we cannot determine whether the target is lost.
7
+
8
+ **NOTE: OpenCV > 4.8.0**
9
+
10
+
11
+ # speed test
12
+
13
+ NOTE: The speed below is tested by **onnxruntime** because opencv has poor support for the transformer architecture for now.
14
+
15
+ ONNX speed test on ARM platform(apple M2)(ms):
16
+
17
+ | thread nums | 1 | 2 | 3 | 4 |
18
+ | ----------- | ---- | ---- | ---- | ------------- |
19
+ | nanotrack | 5.25 | 4.86 | 4.72 | 4.49 |
20
+ | vit tracker | 4.18 | 2.41 | 1.97 | **1.46 (3X)** |
21
+
22
+ ONNX speed test on x86 platform(intel i3 10105)(ms):
23
+
24
+ | thread nums | 1 | 2 | 3 | 4 |
25
+ | ----------- | ---- | ---- | ---- | ---- |
26
+ | nanotrack | 3.20 | 2.75 | 2.46 | 2.55 |
27
+ | vit tracker | 3.84 | 2.37 | 2.10 | 2.01 |
28
+
29
+ # performance test
30
+
31
+ preformance test on lasot dataset(AUC is the most important data. Higher AUC means better tracker):
32
+
33
+ | LASOT | AUC | P | Pnorm |
34
+ | ----------- | ---- | ---- | ----- |
35
+ | nanotrack | 46.8 | 45.0 | 43.3 |
36
+ | vit tracker | 48.6 | 44.8 | 54.7 |
models/object_tracking_vittrack/demo.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2 as cv
2
+ import argparse
3
+
4
+ # Check OpenCV version
5
+ assert cv.__version__ > "4.8.0", \
6
+ "Please install latest opencv-python to try this demo: python3 -m pip install --upgrade opencv-python"
7
+
8
+ parser = argparse.ArgumentParser(
9
+ description="VIT track opencv API")
10
+ parser.add_argument('--input', '-i', type=str,
11
+ help='Usage: Set path to the input video. Omit for using default camera.')
12
+ parser.add_argument('--model_path', type=str, default='vitTracker.onnx',
13
+ help='Usage: Set model path, defaults to vitTracker.onnx.')
14
+ args = parser.parse_args()
15
+
16
+ def visualize(image, bbox, score, isLocated, fps=None, box_color=(0, 255, 0),text_color=(0, 255, 0), fontScale = 1, fontSize = 1):
17
+ output = image.copy()
18
+ h, w, _ = output.shape
19
+
20
+ if fps is not None:
21
+ cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 30), cv.FONT_HERSHEY_DUPLEX, fontScale, text_color, fontSize)
22
+
23
+ if isLocated and score >= 0.3:
24
+ # bbox: Tuple of length 4
25
+ x, y, w, h = bbox
26
+ cv.rectangle(output, (x, y), (x+w, y+h), box_color, 2)
27
+ cv.putText(output, '{:.2f}'.format(score), (x, y+20), cv.FONT_HERSHEY_DUPLEX, fontScale, text_color, fontSize)
28
+ else:
29
+ text_size, baseline = cv.getTextSize('Target lost!', cv.FONT_HERSHEY_DUPLEX, fontScale, fontSize)
30
+ text_x = int((w - text_size[0]) / 2)
31
+ text_y = int((h - text_size[1]) / 2)
32
+ cv.putText(output, 'Target lost!', (text_x, text_y), cv.FONT_HERSHEY_DUPLEX, fontScale, (0, 0, 255), fontSize)
33
+
34
+ return output
35
+
36
+ if __name__ == '__main__':
37
+
38
+ params = cv.TrackerVit_Params()
39
+ params.net = args.model_path
40
+ model = cv.TrackerVit_create(params)
41
+
42
+ # Read from args.input
43
+ _input = args.input
44
+ if args.input is None:
45
+ device_id = 0
46
+ _input = device_id
47
+ video = cv.VideoCapture(_input)
48
+
49
+ # Select an object
50
+ has_frame, first_frame = video.read()
51
+ if not has_frame:
52
+ print('No frames grabbed!')
53
+ exit()
54
+ first_frame_copy = first_frame.copy()
55
+ cv.putText(first_frame_copy, "1. Drag a bounding box to track.", (0, 15), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
56
+ cv.putText(first_frame_copy, "2. Press ENTER to confirm", (0, 35), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
57
+ roi = cv.selectROI('vitTrack Demo', first_frame_copy)
58
+ print("Selected ROI: {}".format(roi))
59
+
60
+ # Init tracker with ROI
61
+ model.init(first_frame, roi)
62
+
63
+ # Track frame by frame
64
+ tm = cv.TickMeter()
65
+ while cv.waitKey(1) < 0:
66
+ has_frame, frame = video.read()
67
+ if not has_frame:
68
+ print('End of video')
69
+ break
70
+ # Inference
71
+ tm.start()
72
+ isLocated, bbox = model.update(frame)
73
+ score = model.getTrackingScore()
74
+ tm.stop()
75
+ # Visualize
76
+ frame = visualize(frame, bbox, score, isLocated, fps=tm.getFPS())
77
+ cv.imshow('vittrack Demo', frame)
78
+ tm.reset()
79
+