Shad0ws commited on
Commit
d8d92d2
·
1 Parent(s): 8a40e87

Upload 266 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. 2.mp4 +3 -0
  3. 4767618542647708593.mp4 +3 -0
  4. Pipfile +28 -0
  5. Pipfile.lock +1153 -0
  6. Shibuya_Crossing_FullHD.mp4 +3 -0
  7. app.py +237 -0
  8. deep_list.py +337 -0
  9. deep_sort_pytorch/.gitignore +13 -0
  10. deep_sort_pytorch/LICENSE +21 -0
  11. deep_sort_pytorch/configs/deep_sort.yaml +10 -0
  12. deep_sort_pytorch/deep_sort/README.md +3 -0
  13. deep_sort_pytorch/deep_sort/__init__.py +21 -0
  14. deep_sort_pytorch/deep_sort/deep/__init__.py +0 -0
  15. deep_sort_pytorch/deep_sort/deep/checkpoint/.gitkeep +0 -0
  16. deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7 +3 -0
  17. deep_sort_pytorch/deep_sort/deep/evaluate.py +13 -0
  18. deep_sort_pytorch/deep_sort/deep/feature_extractor.py +54 -0
  19. deep_sort_pytorch/deep_sort/deep/model.py +109 -0
  20. deep_sort_pytorch/deep_sort/deep/original_model.py +111 -0
  21. deep_sort_pytorch/deep_sort/deep/test.py +80 -0
  22. deep_sort_pytorch/deep_sort/deep/train.jpg +0 -0
  23. deep_sort_pytorch/deep_sort/deep/train.py +206 -0
  24. deep_sort_pytorch/deep_sort/deep_sort.py +113 -0
  25. deep_sort_pytorch/deep_sort/sort/__init__.py +0 -0
  26. deep_sort_pytorch/deep_sort/sort/detection.py +49 -0
  27. deep_sort_pytorch/deep_sort/sort/iou_matching.py +82 -0
  28. deep_sort_pytorch/deep_sort/sort/kalman_filter.py +229 -0
  29. deep_sort_pytorch/deep_sort/sort/linear_assignment.py +192 -0
  30. deep_sort_pytorch/deep_sort/sort/nn_matching.py +176 -0
  31. deep_sort_pytorch/deep_sort/sort/preprocessing.py +73 -0
  32. deep_sort_pytorch/deep_sort/sort/track.py +169 -0
  33. deep_sort_pytorch/deep_sort/sort/tracker.py +143 -0
  34. deep_sort_pytorch/utils/__init__.py +0 -0
  35. deep_sort_pytorch/utils/asserts.py +13 -0
  36. deep_sort_pytorch/utils/draw.py +36 -0
  37. deep_sort_pytorch/utils/evaluation.py +103 -0
  38. deep_sort_pytorch/utils/io.py +133 -0
  39. deep_sort_pytorch/utils/json_logger.py +383 -0
  40. deep_sort_pytorch/utils/log.py +17 -0
  41. deep_sort_pytorch/utils/parser.py +39 -0
  42. deep_sort_pytorch/utils/tools.py +39 -0
  43. detect_sort.py +256 -0
  44. drift_frames/frame_0.png +0 -0
  45. drift_frames/frame_1.png +0 -0
  46. drift_frames/frame_10.png +3 -0
  47. drift_frames/frame_100.png +0 -0
  48. drift_frames/frame_101.png +0 -0
  49. drift_frames/frame_102.png +0 -0
  50. drift_frames/frame_103.png +0 -0
.gitattributes CHANGED
@@ -32,3 +32,38 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ 2.mp4 filter=lfs diff=lfs merge=lfs -text
36
+ 4767618542647708593.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7 filter=lfs diff=lfs merge=lfs -text
38
+ drift_frames/frame_10.png filter=lfs diff=lfs merge=lfs -text
39
+ drift_frames/frame_11.png filter=lfs diff=lfs merge=lfs -text
40
+ drift_frames/frame_12.png filter=lfs diff=lfs merge=lfs -text
41
+ drift_frames/frame_13.png filter=lfs diff=lfs merge=lfs -text
42
+ drift_frames/frame_14.png filter=lfs diff=lfs merge=lfs -text
43
+ drift_frames/frame_22.png filter=lfs diff=lfs merge=lfs -text
44
+ drift_frames/frame_23.png filter=lfs diff=lfs merge=lfs -text
45
+ drift_frames/frame_24.png filter=lfs diff=lfs merge=lfs -text
46
+ drift_frames/frame_25.png filter=lfs diff=lfs merge=lfs -text
47
+ drift_frames/frame_26.png filter=lfs diff=lfs merge=lfs -text
48
+ drift_frames/frame_27.png filter=lfs diff=lfs merge=lfs -text
49
+ drift_frames/frame_28.png filter=lfs diff=lfs merge=lfs -text
50
+ drift_frames/frame_29.png filter=lfs diff=lfs merge=lfs -text
51
+ drift_frames/frame_30.png filter=lfs diff=lfs merge=lfs -text
52
+ drift_frames/frame_31.png filter=lfs diff=lfs merge=lfs -text
53
+ drift_frames/frame_32.png filter=lfs diff=lfs merge=lfs -text
54
+ drift_frames/frame_33.png filter=lfs diff=lfs merge=lfs -text
55
+ drift_frames/frame_34.png filter=lfs diff=lfs merge=lfs -text
56
+ drift_frames/frame_35.png filter=lfs diff=lfs merge=lfs -text
57
+ drift_frames/frame_7.png filter=lfs diff=lfs merge=lfs -text
58
+ drift_frames/frame_8.png filter=lfs diff=lfs merge=lfs -text
59
+ drift_frames/frame_9.png filter=lfs diff=lfs merge=lfs -text
60
+ runs/detect/exp15/traffic360p.mp4 filter=lfs diff=lfs merge=lfs -text
61
+ runs/detect/exp16/traffic360p-india.mp4 filter=lfs diff=lfs merge=lfs -text
62
+ runs/detect/exp17/traffic720p.mp4 filter=lfs diff=lfs merge=lfs -text
63
+ runs/detect/exp18/Shibuya_Crossing_FullHD.mp4 filter=lfs diff=lfs merge=lfs -text
64
+ Shibuya_Crossing_FullHD.mp4 filter=lfs diff=lfs merge=lfs -text
65
+ traffic360p-india.mp4 filter=lfs diff=lfs merge=lfs -text
66
+ traffic360p.mp4 filter=lfs diff=lfs merge=lfs -text
67
+ traffic480p-india-mini.mp4 filter=lfs diff=lfs merge=lfs -text
68
+ traffic720p-india.mp4 filter=lfs diff=lfs merge=lfs -text
69
+ traffic720p.mp4 filter=lfs diff=lfs merge=lfs -text
2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e820d03d895ce5f401f1bfa77454a976ae99697d037bfe8090a8ca88e5cd9564
3
+ size 4232570
4767618542647708593.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c2c2091a9e9bfb5bd7d96ae8c6d98acaefc9a1d38e948a12e6c26a394a576ec
3
+ size 2164085
Pipfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[source]]
2
+ url = "https://pypi.org/simple"
3
+ verify_ssl = true
4
+ name = "pypi"
5
+
6
+ [packages]
7
+ matplotlib = ">=3.2.2"
8
+ numpy = ">=1.18.5"
9
+ opencv-python = ">=4.1.2"
10
+ pillow = ">=7.1.2"
11
+ pyyaml = ">=5.3.1"
12
+ requests = ">=2.23.0"
13
+ scipy = ">=1.4.1"
14
+ torch = "==1.9.1"
15
+ torchvision = "==0.10.1"
16
+ tqdm = ">=4.41.0"
17
+ tensorboard = ">=2.4.1"
18
+ pandas = ">=1.1.4"
19
+ seaborn = ">=0.11.0"
20
+ thop = "*"
21
+ streamlit = "*"
22
+ psutil = "*"
23
+ easydict = "*"
24
+
25
+ [dev-packages]
26
+
27
+ [requires]
28
+ python_version = "3.9"
Pipfile.lock ADDED
@@ -0,0 +1,1153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_meta": {
3
+ "hash": {
4
+ "sha256": "bcee359cb3d56e767d60524e9e5bbf69c0a1636cd9461f3ce6618e87468b6c81"
5
+ },
6
+ "pipfile-spec": 6,
7
+ "requires": {
8
+ "python_version": "3.9"
9
+ },
10
+ "sources": [
11
+ {
12
+ "name": "pypi",
13
+ "url": "https://pypi.org/simple",
14
+ "verify_ssl": true
15
+ }
16
+ ]
17
+ },
18
+ "default": {
19
+ "absl-py": {
20
+ "hashes": [
21
+ "sha256:5d15f85b8cc859c6245bc9886ba664460ed96a6fee895416caa37d669ee74a9a",
22
+ "sha256:f568809938c49abbda89826223c992b630afd23c638160ad7840cfe347710d97"
23
+ ],
24
+ "markers": "python_version >= '3.6'",
25
+ "version": "==1.2.0"
26
+ },
27
+ "altair": {
28
+ "hashes": [
29
+ "sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a",
30
+ "sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026"
31
+ ],
32
+ "markers": "python_version >= '3.7'",
33
+ "version": "==4.2.0"
34
+ },
35
+ "attrs": {
36
+ "hashes": [
37
+ "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6",
38
+ "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"
39
+ ],
40
+ "markers": "python_version >= '3.5'",
41
+ "version": "==22.1.0"
42
+ },
43
+ "blinker": {
44
+ "hashes": [
45
+ "sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36",
46
+ "sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462"
47
+ ],
48
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
49
+ "version": "==1.5"
50
+ },
51
+ "cachetools": {
52
+ "hashes": [
53
+ "sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757",
54
+ "sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db"
55
+ ],
56
+ "markers": "python_version ~= '3.7'",
57
+ "version": "==5.2.0"
58
+ },
59
+ "certifi": {
60
+ "hashes": [
61
+ "sha256:36973885b9542e6bd01dea287b2b4b3b21236307c56324fcc3f1160f2d655ed5",
62
+ "sha256:e232343de1ab72c2aa521b625c80f699e356830fd0e2c620b465b304b17b0516"
63
+ ],
64
+ "markers": "python_version >= '3.6'",
65
+ "version": "==2022.9.14"
66
+ },
67
+ "charset-normalizer": {
68
+ "hashes": [
69
+ "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845",
70
+ "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"
71
+ ],
72
+ "markers": "python_version >= '3.6'",
73
+ "version": "==2.1.1"
74
+ },
75
+ "click": {
76
+ "hashes": [
77
+ "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e",
78
+ "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"
79
+ ],
80
+ "markers": "python_version >= '3.7'",
81
+ "version": "==8.1.3"
82
+ },
83
+ "colorama": {
84
+ "hashes": [
85
+ "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da",
86
+ "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"
87
+ ],
88
+ "markers": "platform_system == 'Windows'",
89
+ "version": "==0.4.5"
90
+ },
91
+ "commonmark": {
92
+ "hashes": [
93
+ "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60",
94
+ "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"
95
+ ],
96
+ "version": "==0.9.1"
97
+ },
98
+ "cycler": {
99
+ "hashes": [
100
+ "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3",
101
+ "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"
102
+ ],
103
+ "markers": "python_version >= '3.6'",
104
+ "version": "==0.11.0"
105
+ },
106
+ "decorator": {
107
+ "hashes": [
108
+ "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330",
109
+ "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"
110
+ ],
111
+ "markers": "python_version >= '3.5'",
112
+ "version": "==5.1.1"
113
+ },
114
+ "easydict": {
115
+ "hashes": [
116
+ "sha256:3f3f0dab07c299f0f4df032db1f388d985bb57fa4c5be30acd25c5f9a516883b"
117
+ ],
118
+ "index": "pypi",
119
+ "version": "==1.9"
120
+ },
121
+ "entrypoints": {
122
+ "hashes": [
123
+ "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4",
124
+ "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"
125
+ ],
126
+ "markers": "python_version >= '3.6'",
127
+ "version": "==0.4"
128
+ },
129
+ "fonttools": {
130
+ "hashes": [
131
+ "sha256:88d48ef24486137c864dc56707b4b54ef8a97ab9162c2721ec61434baf1c4d13",
132
+ "sha256:b6d86ffd0a5f83d3da6a34d5f99a90398638e423cd6a8d93c5808af703432c7f"
133
+ ],
134
+ "markers": "python_version >= '3.7'",
135
+ "version": "==4.37.2"
136
+ },
137
+ "gitdb": {
138
+ "hashes": [
139
+ "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd",
140
+ "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"
141
+ ],
142
+ "markers": "python_version >= '3.6'",
143
+ "version": "==4.0.9"
144
+ },
145
+ "gitpython": {
146
+ "hashes": [
147
+ "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704",
148
+ "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"
149
+ ],
150
+ "markers": "python_version >= '3.7'",
151
+ "version": "==3.1.27"
152
+ },
153
+ "google-auth": {
154
+ "hashes": [
155
+ "sha256:be62acaae38d0049c21ca90f27a23847245c9f161ff54ede13af2cb6afecbac9",
156
+ "sha256:ed65ecf9f681832298e29328e1ef0a3676e3732b2e56f41532d45f70a22de0fb"
157
+ ],
158
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
159
+ "version": "==2.11.0"
160
+ },
161
+ "google-auth-oauthlib": {
162
+ "hashes": [
163
+ "sha256:3f2a6e802eebbb6fb736a370fbf3b055edcb6b52878bf2f26330b5e041316c73",
164
+ "sha256:a90a072f6993f2c327067bf65270046384cda5a8ecb20b94ea9a687f1f233a7a"
165
+ ],
166
+ "markers": "python_version >= '3.6'",
167
+ "version": "==0.4.6"
168
+ },
169
+ "grpcio": {
170
+ "hashes": [
171
+ "sha256:1471e6f25a8e47d9f88499f48c565fc5b2876e8ee91bfb0ff33eaadd188b7ea6",
172
+ "sha256:19f9c021ae858d3ef6d5ec4c0acf3f0b0a61e599e5aa36c36943c209520a0e66",
173
+ "sha256:1c924d4e0493fd536ba3b82584b370e8b3c809ef341f9f828cff2dc3c761b3ab",
174
+ "sha256:1d065f40fe74b52b88a6c42d4373a0983f1b0090f952a0747f34f2c11d6cbc64",
175
+ "sha256:1ff1be0474846ed15682843b187e6062f845ddfeaceb2b28972073f474f7b735",
176
+ "sha256:2563357697f5f2d7fd80c1b07a57ef4736551327ad84de604e7b9f6c1b6b4e20",
177
+ "sha256:2b6c336409937fd1cd2bf78eb72651f44d292d88da5e63059a4e8bd01b9d7411",
178
+ "sha256:3340cb2224cc397954def015729391d85fb31135b5a7efca363e73e6f1b0e908",
179
+ "sha256:346bef672a1536d59437210f16af35389d715d2b321bfe4899b3d6476a196706",
180
+ "sha256:3d319a0c89ffac9b8dfc75bfe727a4c835d18bbccc14203b20eb5949c6c7d87d",
181
+ "sha256:460f5bec23fffa3c041aeba1f93a0f06b7a29e6a4da3658a52e1a866494920ab",
182
+ "sha256:4786323555a9f2c6380cd9a9922bcfd42165a51d68d242eebfcdfdc667651c96",
183
+ "sha256:53b6306f9473020bc47ddf64ca704356466e63d5f88f5c2a7bf0a4692e7f03c4",
184
+ "sha256:53fa2fc1a1713195fa7acf7443a6f59b6ac7837607690f813c66cc18a9cb8135",
185
+ "sha256:598c8c42420443c55431eba1821c7a2f72707f1ff674a4de9e0bb03282923cfb",
186
+ "sha256:5a6a750c8324f3974e95265d3f9a0541573c537af1f67b3f6f46bf9c0b2e1b36",
187
+ "sha256:5d81cd3c161291339ed3b469250c2f5013c3083dea7796e93aedff8f05fdcec1",
188
+ "sha256:626822d799d8fab08f07c8d95ef5c36213d24143f7cad3f548e97413db9f4110",
189
+ "sha256:660217eccd2943bf23ea9a36e2a292024305aec04bf747fbcff1f5032b83610e",
190
+ "sha256:741eeff39a26d26da2b6d74ff0559f882ee95ee4e3b20c0b4b829021cb917f96",
191
+ "sha256:7cee20a4f873d61274d70c28ff63d19677d9eeea869c6a9cbaf3a00712336b6c",
192
+ "sha256:8bbaa6647986b874891bc682a1093df54cbdb073b5d4b844a2b480c47c7ffafd",
193
+ "sha256:934aad7350d9577f4275e787f3d91d3c8ff4efffa8d6b807d343d3c891ff53eb",
194
+ "sha256:9477967e605ba08715dcc769b5ee0f0d8b22bda40ef25a0df5a8759e5a4d21a5",
195
+ "sha256:97dc35a99c61d5f35ec6457d3df0a4695ba9bb04a35686e1c254462b15c53f98",
196
+ "sha256:9d116106cf220c79e91595523c893f1cf09ec0c2ea49de4fb82152528b7e6833",
197
+ "sha256:9fba1d0ba7cf56811728f1951c800a9aca6677e86433c5e353f2cc2c4039fda6",
198
+ "sha256:a15409bc1d05c52ecb00f5e42ab8ff280e7149f2eb854728f628fb2a0a161a5b",
199
+ "sha256:a1b81849061c67c2ffaa6ed27aa3d9b0762e71e68e784e24b0330b7b1c67470a",
200
+ "sha256:a5edbcb8289681fcb5ded7542f2b7dd456489e83007a95e32fcaf55e9f18603e",
201
+ "sha256:a661d4b9b314327dec1e92ed57e591e8e5eb055700e0ba9e9687f734d922dcb6",
202
+ "sha256:b005502c59835f9ba3c3f8742f64c19eeb3db41eae1a89b035a559b39b421803",
203
+ "sha256:b01faf7934c606d5050cf055c1d03943180f23d995d68d04cf50c80d1ef2c65a",
204
+ "sha256:b0fa666fecdb1b118d37823937e9237afa17fe734fc4dbe6dd642e1e4cca0246",
205
+ "sha256:c54734a6eb3be544d332e65c846236d02e5fc71325e8c53af91e83a46b87b506",
206
+ "sha256:c6b6969c529521c86884a13745a4b68930db1ef2e051735c0f479d0a7adb25b6",
207
+ "sha256:ca382028cdfd2d79b7704b2acb8ae1fb54e9e1a03a6765e1895ba89a6fcfaba1",
208
+ "sha256:ca5209ef89f7607be47a308fa92308cf079805ed556ecda672f00039a26e366f",
209
+ "sha256:d03009a26f7edca9f0a581aa5d3153242b815b858cb4790e34a955afb303c6ba",
210
+ "sha256:d751f8beb383c4a5a95625d7ccc1ab183b98b02c6a88924814ea7fbff530872d",
211
+ "sha256:dad2501603f954f222a6e555413c454a5f8d763ab910fbab3855bcdfef6b3148",
212
+ "sha256:dbba883c2b6d63949bc98ab1950bc22cf7c8d4e8cb68de6edde49d3cccd8fd26",
213
+ "sha256:e02f6ba10a3d4e289fa7ae91b301783a750d118b60f17924ca05e506c7d29bc8",
214
+ "sha256:f0ef1dafb4eadeaca58aec8c721a5a73d551064b0c63d57fa003e233277c642e",
215
+ "sha256:f29627d66ae816837fd32c9450dc9c54780962cd74d034513ed829ba3ab46652",
216
+ "sha256:f3a99ed422c38bd1bc893cb2cb2cea6d64173ec30927f699e95f5f58bdf625cf"
217
+ ],
218
+ "markers": "python_version >= '3.6'",
219
+ "version": "==1.48.1"
220
+ },
221
+ "idna": {
222
+ "hashes": [
223
+ "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4",
224
+ "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
225
+ ],
226
+ "markers": "python_version >= '3.5'",
227
+ "version": "==3.4"
228
+ },
229
+ "importlib-metadata": {
230
+ "hashes": [
231
+ "sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670",
232
+ "sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23"
233
+ ],
234
+ "markers": "python_version >= '3.7'",
235
+ "version": "==4.12.0"
236
+ },
237
+ "jinja2": {
238
+ "hashes": [
239
+ "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852",
240
+ "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"
241
+ ],
242
+ "markers": "python_version >= '3.7'",
243
+ "version": "==3.1.2"
244
+ },
245
+ "jsonschema": {
246
+ "hashes": [
247
+ "sha256:165059f076eff6971bae5b742fc029a7b4ef3f9bcf04c14e4776a7605de14b23",
248
+ "sha256:9e74b8f9738d6a946d70705dc692b74b5429cd0960d58e79ffecfc43b2221eb9"
249
+ ],
250
+ "markers": "python_version >= '3.7'",
251
+ "version": "==4.16.0"
252
+ },
253
+ "kiwisolver": {
254
+ "hashes": [
255
+ "sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b",
256
+ "sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166",
257
+ "sha256:1041feb4cda8708ce73bb4dcb9ce1ccf49d553bf87c3954bdfa46f0c3f77252c",
258
+ "sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c",
259
+ "sha256:1d1573129aa0fd901076e2bfb4275a35f5b7aa60fbfb984499d661ec950320b0",
260
+ "sha256:283dffbf061a4ec60391d51e6155e372a1f7a4f5b15d59c8505339454f8989e4",
261
+ "sha256:28bc5b299f48150b5f822ce68624e445040595a4ac3d59251703779836eceff9",
262
+ "sha256:2a66fdfb34e05b705620dd567f5a03f239a088d5a3f321e7b6ac3239d22aa286",
263
+ "sha256:2e307eb9bd99801f82789b44bb45e9f541961831c7311521b13a6c85afc09767",
264
+ "sha256:2e407cb4bd5a13984a6c2c0fe1845e4e41e96f183e5e5cd4d77a857d9693494c",
265
+ "sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6",
266
+ "sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b",
267
+ "sha256:3fe20f63c9ecee44560d0e7f116b3a747a5d7203376abeea292ab3152334d004",
268
+ "sha256:41dae968a94b1ef1897cb322b39360a0812661dba7c682aa45098eb8e193dbdf",
269
+ "sha256:4bd472dbe5e136f96a4b18f295d159d7f26fd399136f5b17b08c4e5f498cd494",
270
+ "sha256:4ea39b0ccc4f5d803e3337dd46bcce60b702be4d86fd0b3d7531ef10fd99a1ac",
271
+ "sha256:5853eb494c71e267912275e5586fe281444eb5e722de4e131cddf9d442615626",
272
+ "sha256:5bce61af018b0cb2055e0e72e7d65290d822d3feee430b7b8203d8a855e78766",
273
+ "sha256:6295ecd49304dcf3bfbfa45d9a081c96509e95f4b9d0eb7ee4ec0530c4a96514",
274
+ "sha256:62ac9cc684da4cf1778d07a89bf5f81b35834cb96ca523d3a7fb32509380cbf6",
275
+ "sha256:70e7c2e7b750585569564e2e5ca9845acfaa5da56ac46df68414f29fea97be9f",
276
+ "sha256:7577c1987baa3adc4b3c62c33bd1118c3ef5c8ddef36f0f2c950ae0b199e100d",
277
+ "sha256:75facbe9606748f43428fc91a43edb46c7ff68889b91fa31f53b58894503a191",
278
+ "sha256:787518a6789009c159453da4d6b683f468ef7a65bbde796bcea803ccf191058d",
279
+ "sha256:78d6601aed50c74e0ef02f4204da1816147a6d3fbdc8b3872d263338a9052c51",
280
+ "sha256:7c43e1e1206cd421cd92e6b3280d4385d41d7166b3ed577ac20444b6995a445f",
281
+ "sha256:81e38381b782cc7e1e46c4e14cd997ee6040768101aefc8fa3c24a4cc58e98f8",
282
+ "sha256:841293b17ad704d70c578f1f0013c890e219952169ce8a24ebc063eecf775454",
283
+ "sha256:872b8ca05c40d309ed13eb2e582cab0c5a05e81e987ab9c521bf05ad1d5cf5cb",
284
+ "sha256:877272cf6b4b7e94c9614f9b10140e198d2186363728ed0f701c6eee1baec1da",
285
+ "sha256:8c808594c88a025d4e322d5bb549282c93c8e1ba71b790f539567932722d7bd8",
286
+ "sha256:8ed58b8acf29798b036d347791141767ccf65eee7f26bde03a71c944449e53de",
287
+ "sha256:91672bacaa030f92fc2f43b620d7b337fd9a5af28b0d6ed3f77afc43c4a64b5a",
288
+ "sha256:968f44fdbf6dd757d12920d63b566eeb4d5b395fd2d00d29d7ef00a00582aac9",
289
+ "sha256:9f85003f5dfa867e86d53fac6f7e6f30c045673fa27b603c397753bebadc3008",
290
+ "sha256:a553dadda40fef6bfa1456dc4be49b113aa92c2a9a9e8711e955618cd69622e3",
291
+ "sha256:a68b62a02953b9841730db7797422f983935aeefceb1679f0fc85cbfbd311c32",
292
+ "sha256:abbe9fa13da955feb8202e215c4018f4bb57469b1b78c7a4c5c7b93001699938",
293
+ "sha256:ad881edc7ccb9d65b0224f4e4d05a1e85cf62d73aab798943df6d48ab0cd79a1",
294
+ "sha256:b1792d939ec70abe76f5054d3f36ed5656021dcad1322d1cc996d4e54165cef9",
295
+ "sha256:b428ef021242344340460fa4c9185d0b1f66fbdbfecc6c63eff4b7c29fad429d",
296
+ "sha256:b533558eae785e33e8c148a8d9921692a9fe5aa516efbdff8606e7d87b9d5824",
297
+ "sha256:ba59c92039ec0a66103b1d5fe588fa546373587a7d68f5c96f743c3396afc04b",
298
+ "sha256:bc8d3bd6c72b2dd9decf16ce70e20abcb3274ba01b4e1c96031e0c4067d1e7cd",
299
+ "sha256:bc9db8a3efb3e403e4ecc6cd9489ea2bac94244f80c78e27c31dcc00d2790ac2",
300
+ "sha256:bf7d9fce9bcc4752ca4a1b80aabd38f6d19009ea5cbda0e0856983cf6d0023f5",
301
+ "sha256:c2dbb44c3f7e6c4d3487b31037b1bdbf424d97687c1747ce4ff2895795c9bf69",
302
+ "sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3",
303
+ "sha256:c97528e64cb9ebeff9701e7938653a9951922f2a38bd847787d4a8e498cc83ae",
304
+ "sha256:d0611a0a2a518464c05ddd5a3a1a0e856ccc10e67079bb17f265ad19ab3c7597",
305
+ "sha256:d06adcfa62a4431d404c31216f0f8ac97397d799cd53800e9d3efc2fbb3cf14e",
306
+ "sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955",
307
+ "sha256:d5b61785a9ce44e5a4b880272baa7cf6c8f48a5180c3e81c59553ba0cb0821ca",
308
+ "sha256:da152d8cdcab0e56e4f45eb08b9aea6455845ec83172092f09b0e077ece2cf7a",
309
+ "sha256:da7e547706e69e45d95e116e6939488d62174e033b763ab1496b4c29b76fabea",
310
+ "sha256:db5283d90da4174865d520e7366801a93777201e91e79bacbac6e6927cbceede",
311
+ "sha256:db608a6757adabb32f1cfe6066e39b3706d8c3aa69bbc353a5b61edad36a5cb4",
312
+ "sha256:e0ea21f66820452a3f5d1655f8704a60d66ba1191359b96541eaf457710a5fc6",
313
+ "sha256:e7da3fec7408813a7cebc9e4ec55afed2d0fd65c4754bc376bf03498d4e92686",
314
+ "sha256:e92a513161077b53447160b9bd8f522edfbed4bd9759e4c18ab05d7ef7e49408",
315
+ "sha256:ecb1fa0db7bf4cff9dac752abb19505a233c7f16684c5826d1f11ebd9472b871",
316
+ "sha256:efda5fc8cc1c61e4f639b8067d118e742b812c930f708e6667a5ce0d13499e29",
317
+ "sha256:f0a1dbdb5ecbef0d34eb77e56fcb3e95bbd7e50835d9782a45df81cc46949750",
318
+ "sha256:f0a71d85ecdd570ded8ac3d1c0f480842f49a40beb423bb8014539a9f32a5897",
319
+ "sha256:f4f270de01dd3e129a72efad823da90cc4d6aafb64c410c9033aba70db9f1ff0",
320
+ "sha256:f6cb459eea32a4e2cf18ba5fcece2dbdf496384413bc1bae15583f19e567f3b2",
321
+ "sha256:f8ad8285b01b0d4695102546b342b493b3ccc6781fc28c8c6a1bb63e95d22f09",
322
+ "sha256:f9f39e2f049db33a908319cf46624a569b36983c7c78318e9726a4cb8923b26c"
323
+ ],
324
+ "markers": "python_version >= '3.7'",
325
+ "version": "==1.4.4"
326
+ },
327
+ "markdown": {
328
+ "hashes": [
329
+ "sha256:08fb8465cffd03d10b9dd34a5c3fea908e20391a2a90b88d66362cb05beed186",
330
+ "sha256:3b809086bb6efad416156e00a0da66fe47618a5d6918dd688f53f40c8e4cfeff"
331
+ ],
332
+ "markers": "python_version >= '3.7'",
333
+ "version": "==3.4.1"
334
+ },
335
+ "markupsafe": {
336
+ "hashes": [
337
+ "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003",
338
+ "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88",
339
+ "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5",
340
+ "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7",
341
+ "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a",
342
+ "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603",
343
+ "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1",
344
+ "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135",
345
+ "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247",
346
+ "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6",
347
+ "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601",
348
+ "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77",
349
+ "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02",
350
+ "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e",
351
+ "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63",
352
+ "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f",
353
+ "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980",
354
+ "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b",
355
+ "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812",
356
+ "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff",
357
+ "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96",
358
+ "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1",
359
+ "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925",
360
+ "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a",
361
+ "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6",
362
+ "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e",
363
+ "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f",
364
+ "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4",
365
+ "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f",
366
+ "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3",
367
+ "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c",
368
+ "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a",
369
+ "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417",
370
+ "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a",
371
+ "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a",
372
+ "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37",
373
+ "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452",
374
+ "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933",
375
+ "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a",
376
+ "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7"
377
+ ],
378
+ "markers": "python_version >= '3.7'",
379
+ "version": "==2.1.1"
380
+ },
381
+ "matplotlib": {
382
+ "hashes": [
383
+ "sha256:0bcdfcb0f976e1bac6721d7d457c17be23cf7501f977b6a38f9d38a3762841f7",
384
+ "sha256:1e64ac9be9da6bfff0a732e62116484b93b02a0b4d4b19934fb4f8e7ad26ad6a",
385
+ "sha256:22227c976ad4dc8c5a5057540421f0d8708c6560744ad2ad638d48e2984e1dbc",
386
+ "sha256:2886cc009f40e2984c083687251821f305d811d38e3df8ded414265e4583f0c5",
387
+ "sha256:2e6d184ebe291b9e8f7e78bbab7987d269c38ea3e062eace1fe7d898042ef804",
388
+ "sha256:3211ba82b9f1518d346f6309df137b50c3dc4421b4ed4815d1d7eadc617f45a1",
389
+ "sha256:339cac48b80ddbc8bfd05daae0a3a73414651a8596904c2a881cfd1edb65f26c",
390
+ "sha256:35a8ad4dddebd51f94c5d24bec689ec0ec66173bf614374a1244c6241c1595e0",
391
+ "sha256:3b4fa56159dc3c7f9250df88f653f085068bcd32dcd38e479bba58909254af7f",
392
+ "sha256:43e9d3fa077bf0cc95ded13d331d2156f9973dce17c6f0c8b49ccd57af94dbd9",
393
+ "sha256:57f1b4e69f438a99bb64d7f2c340db1b096b41ebaa515cf61ea72624279220ce",
394
+ "sha256:5c096363b206a3caf43773abebdbb5a23ea13faef71d701b21a9c27fdcef72f4",
395
+ "sha256:6bb93a0492d68461bd458eba878f52fdc8ac7bdb6c4acdfe43dba684787838c2",
396
+ "sha256:6ea6aef5c4338e58d8d376068e28f80a24f54e69f09479d1c90b7172bad9f25b",
397
+ "sha256:6fe807e8a22620b4cd95cfbc795ba310dc80151d43b037257250faf0bfcd82bc",
398
+ "sha256:73dd93dc35c85dece610cca8358003bf0760d7986f70b223e2306b4ea6d1406b",
399
+ "sha256:839d47b8ead7ad9669aaacdbc03f29656dc21f0d41a6fea2d473d856c39c8b1c",
400
+ "sha256:874df7505ba820e0400e7091199decf3ff1fde0583652120c50cd60d5820ca9a",
401
+ "sha256:879c7e5fce4939c6aa04581dfe08d57eb6102a71f2e202e3314d5fbc072fd5a0",
402
+ "sha256:94ff86af56a3869a4ae26a9637a849effd7643858a1a04dd5ee50e9ab75069a7",
403
+ "sha256:99482b83ebf4eb6d5fc6813d7aacdefdd480f0d9c0b52dcf9f1cc3b2c4b3361a",
404
+ "sha256:9ab29589cef03bc88acfa3a1490359000c18186fc30374d8aa77d33cc4a51a4a",
405
+ "sha256:9befa5954cdbc085e37d974ff6053da269474177921dd61facdad8023c4aeb51",
406
+ "sha256:a206a1b762b39398efea838f528b3a6d60cdb26fe9d58b48265787e29cd1d693",
407
+ "sha256:ab8d26f07fe64f6f6736d635cce7bfd7f625320490ed5bfc347f2cdb4fae0e56",
408
+ "sha256:b28de401d928890187c589036857a270a032961411934bdac4cf12dde3d43094",
409
+ "sha256:b428076a55fb1c084c76cb93e68006f27d247169f056412607c5c88828d08f88",
410
+ "sha256:bf618a825deb6205f015df6dfe6167a5d9b351203b03fab82043ae1d30f16511",
411
+ "sha256:c995f7d9568f18b5db131ab124c64e51b6820a92d10246d4f2b3f3a66698a15b",
412
+ "sha256:cd45a6f3e93a780185f70f05cf2a383daed13c3489233faad83e81720f7ede24",
413
+ "sha256:d2484b350bf3d32cae43f85dcfc89b3ed7bd2bcd781ef351f93eb6fb2cc483f9",
414
+ "sha256:d62880e1f60e5a30a2a8484432bcb3a5056969dc97258d7326ad465feb7ae069",
415
+ "sha256:dacddf5bfcec60e3f26ec5c0ae3d0274853a258b6c3fc5ef2f06a8eb23e042be",
416
+ "sha256:f3840c280ebc87a48488a46f760ea1c0c0c83fcf7abbe2e6baf99d033fd35fd8",
417
+ "sha256:f814504e459c68118bf2246a530ed953ebd18213dc20e3da524174d84ed010b2"
418
+ ],
419
+ "index": "pypi",
420
+ "version": "==3.5.3"
421
+ },
422
+ "numpy": {
423
+ "hashes": [
424
+ "sha256:004f0efcb2fe1c0bd6ae1fcfc69cc8b6bf2407e0f18be308612007a0762b4089",
425
+ "sha256:09f6b7bdffe57fc61d869a22f506049825d707b288039d30f26a0d0d8ea05164",
426
+ "sha256:0ea3f98a0ffce3f8f57675eb9119f3f4edb81888b6874bc1953f91e0b1d4f440",
427
+ "sha256:17c0e467ade9bda685d5ac7f5fa729d8d3e76b23195471adae2d6a6941bd2c18",
428
+ "sha256:1f27b5322ac4067e67c8f9378b41c746d8feac8bdd0e0ffede5324667b8a075c",
429
+ "sha256:22d43376ee0acd547f3149b9ec12eec2f0ca4a6ab2f61753c5b29bb3e795ac4d",
430
+ "sha256:2ad3ec9a748a8943e6eb4358201f7e1c12ede35f510b1a2221b70af4bb64295c",
431
+ "sha256:301c00cf5e60e08e04d842fc47df641d4a181e651c7135c50dc2762ffe293dbd",
432
+ "sha256:39a664e3d26ea854211867d20ebcc8023257c1800ae89773cbba9f9e97bae036",
433
+ "sha256:51bf49c0cd1d52be0a240aa66f3458afc4b95d8993d2d04f0d91fa60c10af6cd",
434
+ "sha256:78a63d2df1d947bd9d1b11d35564c2f9e4b57898aae4626638056ec1a231c40c",
435
+ "sha256:7cd1328e5bdf0dee621912f5833648e2daca72e3839ec1d6695e91089625f0b4",
436
+ "sha256:8355fc10fd33a5a70981a5b8a0de51d10af3688d7a9e4a34fcc8fa0d7467bb7f",
437
+ "sha256:8c79d7cf86d049d0c5089231a5bcd31edb03555bd93d81a16870aa98c6cfb79d",
438
+ "sha256:91b8d6768a75247026e951dce3b2aac79dc7e78622fc148329135ba189813584",
439
+ "sha256:94c15ca4e52671a59219146ff584488907b1f9b3fc232622b47e2cf832e94fb8",
440
+ "sha256:98dcbc02e39b1658dc4b4508442a560fe3ca5ca0d989f0df062534e5ca3a5c1a",
441
+ "sha256:a64403f634e5ffdcd85e0b12c08f04b3080d3e840aef118721021f9b48fc1460",
442
+ "sha256:bc6e8da415f359b578b00bcfb1d08411c96e9a97f9e6c7adada554a0812a6cc6",
443
+ "sha256:bdc9febce3e68b697d931941b263c59e0c74e8f18861f4064c1f712562903411",
444
+ "sha256:c1ba66c48b19cc9c2975c0d354f24058888cdc674bebadceb3cdc9ec403fb5d1",
445
+ "sha256:c9f707b5bb73bf277d812ded9896f9512a43edff72712f31667d0a8c2f8e71ee",
446
+ "sha256:d5422d6a1ea9b15577a9432e26608c73a78faf0b9039437b075cf322c92e98e7",
447
+ "sha256:e5d5420053bbb3dd64c30e58f9363d7a9c27444c3648e61460c1237f9ec3fa14",
448
+ "sha256:e868b0389c5ccfc092031a861d4e158ea164d8b7fdbb10e3b5689b4fc6498df6",
449
+ "sha256:efd9d3abe5774404becdb0748178b48a218f1d8c44e0375475732211ea47c67e",
450
+ "sha256:f8c02ec3c4c4fcb718fdf89a6c6f709b14949408e8cf2a2be5bfa9c49548fd85",
451
+ "sha256:ffcf105ecdd9396e05a8e58e81faaaf34d3f9875f137c7372450baa5d77c9a54"
452
+ ],
453
+ "index": "pypi",
454
+ "version": "==1.23.3"
455
+ },
456
+ "oauthlib": {
457
+ "hashes": [
458
+ "sha256:1565237372795bf6ee3e5aba5e2a85bd5a65d0e2aa5c628b9a97b7d7a0da3721",
459
+ "sha256:88e912ca1ad915e1dcc1c06fc9259d19de8deacd6fd17cc2df266decc2e49066"
460
+ ],
461
+ "markers": "python_version >= '3.6'",
462
+ "version": "==3.2.1"
463
+ },
464
+ "opencv-python": {
465
+ "hashes": [
466
+ "sha256:0dc82a3d8630c099d2f3ac1b1aabee164e8188db54a786abb7a4e27eba309440",
467
+ "sha256:5af8ba35a4fcb8913ffb86e92403e9a656a4bff4a645d196987468f0f8947875",
468
+ "sha256:6e32af22e3202748bd233ed8f538741876191863882eba44e332d1a34993165b",
469
+ "sha256:c5bfae41ad4031e66bb10ec4a0a2ffd3e514d092652781e8b1ac98d1b59f1158",
470
+ "sha256:dbdc84a9b4ea2cbae33861652d25093944b9959279200b7ae0badd32439f74de",
471
+ "sha256:e6e448b62afc95c5b58f97e87ef84699e6607fe5c58730a03301c52496005cae",
472
+ "sha256:f482e78de6e7b0b060ff994ffd859bddc3f7f382bb2019ef157b0ea8ca8712f5"
473
+ ],
474
+ "index": "pypi",
475
+ "version": "==4.6.0.66"
476
+ },
477
+ "packaging": {
478
+ "hashes": [
479
+ "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb",
480
+ "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"
481
+ ],
482
+ "markers": "python_version >= '3.6'",
483
+ "version": "==21.3"
484
+ },
485
+ "pandas": {
486
+ "hashes": [
487
+ "sha256:050aada67a5ec6699a7879e769825b510018a95fb9ac462bb1867483d0974a97",
488
+ "sha256:0959c41004e3d2d16f39c828d6da66ebee329836a7ecee49fb777ac9ad8a7501",
489
+ "sha256:4591cadd06fbbbd16fafc2de6e840c1aaefeae3d5864b688004777ef1bbdede3",
490
+ "sha256:51c424ca134fdaeac9a4acd719d1ab48046afc60943a489028f0413fdbe9ef1c",
491
+ "sha256:785e878a6e6d8ddcdb8c181e600855402750052497d7fc6d6b508894f6b8830b",
492
+ "sha256:799e6a25932df7e6b1f8dabf63de064e2205dc309abb75956126a0453fd88e97",
493
+ "sha256:7cd1d69a387f7d5e1a5a06a87574d9ef2433847c0e78113ab51c84d3a8bcaeaa",
494
+ "sha256:87b4194f344dcd14c0f885cecb22005329b38bda10f1aaf7b9596a00ec8a4768",
495
+ "sha256:8d4d2fe2863ecddb0ba1979bdda26c8bc2ea138f5a979abe3ba80c0fa4015c91",
496
+ "sha256:94f2ed1fd51e545ebf71da1e942fe1822ee01e10d3dd2a7276d01351333b7c6b",
497
+ "sha256:9d2a7a3c1fea668d56bd91edbd5f2732e0af8feb9d2bf8d9bfacb2dea5fa9536",
498
+ "sha256:9d805bce209714b1c1fa29bfb1e42ad87e4c0a825e4b390c56a3e71593b7e8d8",
499
+ "sha256:a08ceb59db499864c58a9bf85ab6219d527d91f14c0240cc25fa2c261032b2a7",
500
+ "sha256:a981cfabf51c318a562deb4ae7deec594c07aee7cf18b4594a92c23718ec8275",
501
+ "sha256:ab6c0d738617b675183e5f28db32b5148b694ad9bba0a40c3ea26d96b431db67",
502
+ "sha256:afbddad78a98ec4d2ce08b384b81730de1ccc975b99eb663e6dac43703f36d98",
503
+ "sha256:c4bb8b0ab9f94207d07e401d24baebfc63057246b1a5e0cd9ee50df85a656871",
504
+ "sha256:ce35f947202b0b99c660221d82beb91d2e6d553d55a40b30128204e3e2c63848",
505
+ "sha256:d0022fe6a313df1c4869b5edc012d734c6519a6fffa3cf70930f32e6a1078e49",
506
+ "sha256:e7cc960959be28d064faefc0cb2aef854d46b827c004ebea7e79b5497ed83e7d",
507
+ "sha256:ee6f1848148ed3204235967613b0a32be2d77f214e9623f554511047705c1e04"
508
+ ],
509
+ "index": "pypi",
510
+ "version": "==1.4.4"
511
+ },
512
+ "pillow": {
513
+ "hashes": [
514
+ "sha256:0030fdbd926fb85844b8b92e2f9449ba89607231d3dd597a21ae72dc7fe26927",
515
+ "sha256:030e3460861488e249731c3e7ab59b07c7853838ff3b8e16aac9561bb345da14",
516
+ "sha256:0ed2c4ef2451de908c90436d6e8092e13a43992f1860275b4d8082667fbb2ffc",
517
+ "sha256:136659638f61a251e8ed3b331fc6ccd124590eeff539de57c5f80ef3a9594e58",
518
+ "sha256:13b725463f32df1bfeacbf3dd197fb358ae8ebcd8c5548faa75126ea425ccb60",
519
+ "sha256:1536ad017a9f789430fb6b8be8bf99d2f214c76502becc196c6f2d9a75b01b76",
520
+ "sha256:15928f824870535c85dbf949c09d6ae7d3d6ac2d6efec80f3227f73eefba741c",
521
+ "sha256:17d4cafe22f050b46d983b71c707162d63d796a1235cdf8b9d7a112e97b15bac",
522
+ "sha256:1802f34298f5ba11d55e5bb09c31997dc0c6aed919658dfdf0198a2fe75d5490",
523
+ "sha256:1cc1d2451e8a3b4bfdb9caf745b58e6c7a77d2e469159b0d527a4554d73694d1",
524
+ "sha256:1fd6f5e3c0e4697fa7eb45b6e93996299f3feee73a3175fa451f49a74d092b9f",
525
+ "sha256:254164c57bab4b459f14c64e93df11eff5ded575192c294a0c49270f22c5d93d",
526
+ "sha256:2ad0d4df0f5ef2247e27fc790d5c9b5a0af8ade9ba340db4a73bb1a4a3e5fb4f",
527
+ "sha256:2c58b24e3a63efd22554c676d81b0e57f80e0a7d3a5874a7e14ce90ec40d3069",
528
+ "sha256:2d33a11f601213dcd5718109c09a52c2a1c893e7461f0be2d6febc2879ec2402",
529
+ "sha256:336b9036127eab855beec9662ac3ea13a4544a523ae273cbf108b228ecac8437",
530
+ "sha256:337a74fd2f291c607d220c793a8135273c4c2ab001b03e601c36766005f36885",
531
+ "sha256:37ff6b522a26d0538b753f0b4e8e164fdada12db6c6f00f62145d732d8a3152e",
532
+ "sha256:3d1f14f5f691f55e1b47f824ca4fdcb4b19b4323fe43cc7bb105988cad7496be",
533
+ "sha256:4134d3f1ba5f15027ff5c04296f13328fecd46921424084516bdb1b2548e66ff",
534
+ "sha256:4ad2f835e0ad81d1689f1b7e3fbac7b01bb8777d5a985c8962bedee0cc6d43da",
535
+ "sha256:50dff9cc21826d2977ef2d2a205504034e3a4563ca6f5db739b0d1026658e004",
536
+ "sha256:510cef4a3f401c246cfd8227b300828715dd055463cdca6176c2e4036df8bd4f",
537
+ "sha256:5aed7dde98403cd91d86a1115c78d8145c83078e864c1de1064f52e6feb61b20",
538
+ "sha256:69bd1a15d7ba3694631e00df8de65a8cb031911ca11f44929c97fe05eb9b6c1d",
539
+ "sha256:6bf088c1ce160f50ea40764f825ec9b72ed9da25346216b91361eef8ad1b8f8c",
540
+ "sha256:6e8c66f70fb539301e064f6478d7453e820d8a2c631da948a23384865cd95544",
541
+ "sha256:74a04183e6e64930b667d321524e3c5361094bb4af9083db5c301db64cd341f3",
542
+ "sha256:75e636fd3e0fb872693f23ccb8a5ff2cd578801251f3a4f6854c6a5d437d3c04",
543
+ "sha256:7761afe0126d046974a01e030ae7529ed0ca6a196de3ec6937c11df0df1bc91c",
544
+ "sha256:7888310f6214f19ab2b6df90f3f06afa3df7ef7355fc025e78a3044737fab1f5",
545
+ "sha256:7b0554af24df2bf96618dac71ddada02420f946be943b181108cac55a7a2dcd4",
546
+ "sha256:7c7b502bc34f6e32ba022b4a209638f9e097d7a9098104ae420eb8186217ebbb",
547
+ "sha256:808add66ea764ed97d44dda1ac4f2cfec4c1867d9efb16a33d158be79f32b8a4",
548
+ "sha256:831e648102c82f152e14c1a0938689dbb22480c548c8d4b8b248b3e50967b88c",
549
+ "sha256:93689632949aff41199090eff5474f3990b6823404e45d66a5d44304e9cdc467",
550
+ "sha256:96b5e6874431df16aee0c1ba237574cb6dff1dcb173798faa6a9d8b399a05d0e",
551
+ "sha256:9a54614049a18a2d6fe156e68e188da02a046a4a93cf24f373bffd977e943421",
552
+ "sha256:a138441e95562b3c078746a22f8fca8ff1c22c014f856278bdbdd89ca36cff1b",
553
+ "sha256:a647c0d4478b995c5e54615a2e5360ccedd2f85e70ab57fbe817ca613d5e63b8",
554
+ "sha256:a9c9bc489f8ab30906d7a85afac4b4944a572a7432e00698a7239f44a44e6efb",
555
+ "sha256:ad2277b185ebce47a63f4dc6302e30f05762b688f8dc3de55dbae4651872cdf3",
556
+ "sha256:adabc0bce035467fb537ef3e5e74f2847c8af217ee0be0455d4fec8adc0462fc",
557
+ "sha256:b6d5e92df2b77665e07ddb2e4dbd6d644b78e4c0d2e9272a852627cdba0d75cf",
558
+ "sha256:bc431b065722a5ad1dfb4df354fb9333b7a582a5ee39a90e6ffff688d72f27a1",
559
+ "sha256:bdd0de2d64688ecae88dd8935012c4a72681e5df632af903a1dca8c5e7aa871a",
560
+ "sha256:c79698d4cd9318d9481d89a77e2d3fcaeff5486be641e60a4b49f3d2ecca4e28",
561
+ "sha256:cb6259196a589123d755380b65127ddc60f4c64b21fc3bb46ce3a6ea663659b0",
562
+ "sha256:d5b87da55a08acb586bad5c3aa3b86505f559b84f39035b233d5bf844b0834b1",
563
+ "sha256:dcd7b9c7139dc8258d164b55696ecd16c04607f1cc33ba7af86613881ffe4ac8",
564
+ "sha256:dfe4c1fedfde4e2fbc009d5ad420647f7730d719786388b7de0999bf32c0d9fd",
565
+ "sha256:ea98f633d45f7e815db648fd7ff0f19e328302ac36427343e4432c84432e7ff4",
566
+ "sha256:ec52c351b35ca269cb1f8069d610fc45c5bd38c3e91f9ab4cbbf0aebc136d9c8",
567
+ "sha256:eef7592281f7c174d3d6cbfbb7ee5984a671fcd77e3fc78e973d492e9bf0eb3f",
568
+ "sha256:f07f1f00e22b231dd3d9b9208692042e29792d6bd4f6639415d2f23158a80013",
569
+ "sha256:f3fac744f9b540148fa7715a435d2283b71f68bfb6d4aae24482a890aed18b59",
570
+ "sha256:fa768eff5f9f958270b081bb33581b4b569faabf8774726b283edb06617101dc",
571
+ "sha256:fac2d65901fb0fdf20363fbd345c01958a742f2dc62a8dd4495af66e3ff502a4"
572
+ ],
573
+ "index": "pypi",
574
+ "version": "==9.2.0"
575
+ },
576
+ "protobuf": {
577
+ "hashes": [
578
+ "sha256:1867f93b06a183f87696871bb8d1e99ee71dbb69d468ce1f0cc8bf3d30f982f3",
579
+ "sha256:3c4160b601220627f7e91154e572baf5e161a9c3f445a8242d536ee3d0b7b17c",
580
+ "sha256:4ee2af7051d3b10c8a4fe6fd1a2c69f201fea36aeee7086cf202a692e1b99ee1",
581
+ "sha256:5266c36cc0af3bb3dbf44f199d225b33da66a9a5c3bdc2b14865ad10eddf0e37",
582
+ "sha256:5470f892961af464ae6eaf0f3099e2c1190ae8c7f36f174b89491281341f79ca",
583
+ "sha256:66d14b5b90090353efe75c9fb1bf65ef7267383034688d255b500822e37d5c2f",
584
+ "sha256:67efb5d20618020aa9596e17bfc37ca068c28ec0c1507d9507f73c93d46c9855",
585
+ "sha256:696e6cfab94cc15a14946f2bf72719dced087d437adbd994fff34f38986628bc",
586
+ "sha256:6a02172b9650f819d01fb8e224fc69b0706458fc1ab4f1c669281243c71c1a5e",
587
+ "sha256:6eca9ae238ba615d702387a2ddea635d535d769994a9968c09a4ca920c487ab9",
588
+ "sha256:950abd6c00e7b51f87ae8b18a0ce4d69fea217f62f171426e77de5061f6d9850",
589
+ "sha256:9e1d74032f56ff25f417cfe84c8147047732e5059137ca42efad20cbbd25f5e0",
590
+ "sha256:9e42b1cf2ecd8a1bd161239e693f22035ba99905ae6d7efeac8a0546c7ec1a27",
591
+ "sha256:9f957ef53e872d58a0afd3bf6d80d48535d28c99b40e75e6634cbc33ea42fd54",
592
+ "sha256:a89aa0c042e61e11ade320b802d6db4ee5391d8d973e46d3a48172c1597789f8",
593
+ "sha256:c0f80876a8ff0ae7064084ed094eb86497bd5a3812e6fc96a05318b92301674e",
594
+ "sha256:c44e3282cff74ad18c7e8a0375f407f69ee50c2116364b44492a196293e08b21",
595
+ "sha256:d249519ba5ecf5dd6b18150c9b6bcde510b273714b696f3923ff8308fc11ae49",
596
+ "sha256:d3973a2d58aefc7d1230725c2447ce7f86a71cbc094b86a77c6ee1505ac7cdb1",
597
+ "sha256:dca2284378a5f2a86ffed35c6ac147d14c48b525eefcd1083e5a9ce28dfa8657",
598
+ "sha256:e63b0b3c42e51c94add62b010366cd4979cb6d5f06158bcae8faac4c294f91e1",
599
+ "sha256:f2b599a21c9a32e171ec29a2ac54e03297736c578698e11b099d031f79da114b",
600
+ "sha256:f2bde37667b18c2b5280df83bc799204394a5d2d774e4deaf9de0eb741df6833",
601
+ "sha256:f4f909f4dde413dec435a44b0894956d55bb928ded7d6e3c726556ca4c796e84",
602
+ "sha256:f976234e20ab2785f54224bcdafa027674e23663b132fa3ca0caa291a6cfbde7",
603
+ "sha256:f9cebda093c2f6bfed88f1c17cdade09d4d96096421b344026feee236532d4de"
604
+ ],
605
+ "markers": "python_version >= '3.5'",
606
+ "version": "==3.19.5"
607
+ },
608
+ "psutil": {
609
+ "hashes": [
610
+ "sha256:14b29f581b5edab1f133563272a6011925401804d52d603c5c606936b49c8b97",
611
+ "sha256:256098b4f6ffea6441eb54ab3eb64db9ecef18f6a80d7ba91549195d55420f84",
612
+ "sha256:39ec06dc6c934fb53df10c1672e299145ce609ff0611b569e75a88f313634969",
613
+ "sha256:404f4816c16a2fcc4eaa36d7eb49a66df2d083e829d3e39ee8759a411dbc9ecf",
614
+ "sha256:42638876b7f5ef43cef8dcf640d3401b27a51ee3fa137cb2aa2e72e188414c32",
615
+ "sha256:4642fd93785a29353d6917a23e2ac6177308ef5e8be5cc17008d885cb9f70f12",
616
+ "sha256:4fb54941aac044a61db9d8eb56fc5bee207db3bc58645d657249030e15ba3727",
617
+ "sha256:561dec454853846d1dd0247b44c2e66a0a0c490f937086930ec4b8f83bf44f06",
618
+ "sha256:5d39e3a2d5c40efa977c9a8dd4f679763c43c6c255b1340a56489955dbca767c",
619
+ "sha256:614337922702e9be37a39954d67fdb9e855981624d8011a9927b8f2d3c9625d9",
620
+ "sha256:67b33f27fc0427483b61563a16c90d9f3b547eeb7af0ef1b9fe024cdc9b3a6ea",
621
+ "sha256:68b35cbff92d1f7103d8f1db77c977e72f49fcefae3d3d2b91c76b0e7aef48b8",
622
+ "sha256:7cbb795dcd8ed8fd238bc9e9f64ab188f3f4096d2e811b5a82da53d164b84c3f",
623
+ "sha256:8f024fbb26c8daf5d70287bb3edfafa22283c255287cf523c5d81721e8e5d82c",
624
+ "sha256:91aa0dac0c64688667b4285fa29354acfb3e834e1fd98b535b9986c883c2ce1d",
625
+ "sha256:94e621c6a4ddb2573d4d30cba074f6d1aa0186645917df42c811c473dd22b339",
626
+ "sha256:9770c1d25aee91417eba7869139d629d6328a9422ce1cdd112bd56377ca98444",
627
+ "sha256:b1928b9bf478d31fdffdb57101d18f9b70ed4e9b0e41af751851813547b2a9ab",
628
+ "sha256:b2f248ffc346f4f4f0d747ee1947963613216b06688be0be2e393986fe20dbbb",
629
+ "sha256:b315febaebae813326296872fdb4be92ad3ce10d1d742a6b0c49fb619481ed0b",
630
+ "sha256:b3591616fa07b15050b2f87e1cdefd06a554382e72866fcc0ab2be9d116486c8",
631
+ "sha256:b4018d5f9b6651f9896c7a7c2c9f4652e4eea53f10751c4e7d08a9093ab587ec",
632
+ "sha256:d75291912b945a7351d45df682f9644540d564d62115d4a20d45fa17dc2d48f8",
633
+ "sha256:dc9bda7d5ced744622f157cc8d8bdd51735dafcecff807e928ff26bdb0ff097d",
634
+ "sha256:e3ac2c0375ef498e74b9b4ec56df3c88be43fe56cac465627572dbfb21c4be34",
635
+ "sha256:e4c4a7636ffc47b7141864f1c5e7d649f42c54e49da2dd3cceb1c5f5d29bfc85",
636
+ "sha256:ed29ea0b9a372c5188cdb2ad39f937900a10fb5478dc077283bf86eeac678ef1",
637
+ "sha256:f40ba362fefc11d6bea4403f070078d60053ed422255bd838cd86a40674364c9",
638
+ "sha256:f4cb67215c10d4657e320037109939b1c1d2fd70ca3d76301992f89fe2edb1f1",
639
+ "sha256:f7929a516125f62399d6e8e026129c8835f6c5a3aab88c3fff1a05ee8feb840d",
640
+ "sha256:fd331866628d18223a4265371fd255774affd86244fc307ef66eaf00de0633d5",
641
+ "sha256:feb861a10b6c3bb00701063b37e4afc754f8217f0f09c42280586bd6ac712b5c"
642
+ ],
643
+ "index": "pypi",
644
+ "version": "==5.9.2"
645
+ },
646
+ "pyarrow": {
647
+ "hashes": [
648
+ "sha256:0238998dc692efcb4e41ae74738d7c1234723271ccf520bd8312dca07d49ef8d",
649
+ "sha256:02b820ecd1da02012092c180447de449fc688d0c3f9ff8526ca301cdd60dacd0",
650
+ "sha256:1c5a073a930c632058461547e0bc572da1e724b17b6b9eb31a97da13f50cb6e0",
651
+ "sha256:29eb3e086e2b26202f3a4678316b93cfb15d0e2ba20f3ec12db8fd9cc07cde63",
652
+ "sha256:2c715eca2092273dcccf6f08437371e04d112f9354245ba2fbe6c801879450b7",
653
+ "sha256:2e753f8fcf07d8e3a0efa0c8bd51fef5c90281ffd4c5637c08ce42cd0ac297de",
654
+ "sha256:3eef8a981f45d89de403e81fb83b8119c20824caddf1404274e41a5d66c73806",
655
+ "sha256:4eebdab05afa23d5d5274b24c1cbeb1ba017d67c280f7d39fd8a8f18cbad2ec9",
656
+ "sha256:5526a3bfb404ff6d31d62ea582cf2466c7378a474a99ee04d1a9b05de5264541",
657
+ "sha256:55328348b9139c2b47450d512d716c2248fd58e2f04e2fc23a65e18726666d42",
658
+ "sha256:767cafb14278165ad539a2918c14c1b73cf20689747c21375c38e3fe62884902",
659
+ "sha256:7fa56cbd415cef912677270b8e41baad70cde04c6d8a8336eeb2aba85aa93706",
660
+ "sha256:7fb02bebc13ab55573d1ae9bb5002a6d20ba767bf8569b52fce5301d42495ab7",
661
+ "sha256:81a60bb291a964f63b2717fb1b28f6615ffab7e8585322bfb8a6738e6b321282",
662
+ "sha256:8ad430cee28ebc4d6661fc7315747c7a18ae2a74e67498dcb039e1c762a2fb67",
663
+ "sha256:92f3977e901db1ef5cba30d6cc1d7942b8d94b910c60f89013e8f7bb86a86eef",
664
+ "sha256:9cef618159567d5f62040f2b79b1c7b38e3885f4ffad0ec97cd2d86f88b67cef",
665
+ "sha256:a5b390bdcfb8c5b900ef543f911cdfec63e88524fafbcc15f83767202a4a2491",
666
+ "sha256:d9eb04db626fa24fdfb83c00f76679ca0d98728cdbaa0481b6402bf793a290c0",
667
+ "sha256:da3e0f319509a5881867effd7024099fb06950a0768dad0d6873668bb88cfaba",
668
+ "sha256:f11a645a41ee531c3a5edda45dea07c42267f52571f818d388971d33fc7e2d4a",
669
+ "sha256:f241bd488c2705df930eedfe304ada71191dcf67d6b98ceda0cc934fd2a8388e",
670
+ "sha256:f59bcd5217a3ae1e17870792f82b2ff92df9f3862996e2c78e156c13e56ff62e",
671
+ "sha256:f8c46bde1030d704e2796182286d1c56846552c50a39ad5bf5a20c0d8159fc35",
672
+ "sha256:fc856628acd8d281652c15b6268ec7f27ebcb015abbe99d9baad17f02adc51f1",
673
+ "sha256:fe2ce795fa1d95e4e940fe5661c3c58aee7181c730f65ac5dd8794a77228de59"
674
+ ],
675
+ "markers": "python_version >= '3.7'",
676
+ "version": "==9.0.0"
677
+ },
678
+ "pyasn1": {
679
+ "hashes": [
680
+ "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359",
681
+ "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576",
682
+ "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf",
683
+ "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7",
684
+ "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d",
685
+ "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00",
686
+ "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8",
687
+ "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86",
688
+ "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12",
689
+ "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776",
690
+ "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba",
691
+ "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2",
692
+ "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"
693
+ ],
694
+ "version": "==0.4.8"
695
+ },
696
+ "pyasn1-modules": {
697
+ "hashes": [
698
+ "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8",
699
+ "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199",
700
+ "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811",
701
+ "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed",
702
+ "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4",
703
+ "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e",
704
+ "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74",
705
+ "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb",
706
+ "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45",
707
+ "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd",
708
+ "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0",
709
+ "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d",
710
+ "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"
711
+ ],
712
+ "version": "==0.2.8"
713
+ },
714
+ "pydeck": {
715
+ "hashes": [
716
+ "sha256:359f3dbdda9dbcf77e0dfb1052d471d4475c1887ba22e22f9a036d3202a0ae23",
717
+ "sha256:d65bd540c7d44292dca95a4ca1f3def095085740873bc444d1ee00cbc48feedf"
718
+ ],
719
+ "markers": "python_version >= '3.7'",
720
+ "version": "==0.8.0b3"
721
+ },
722
+ "pygments": {
723
+ "hashes": [
724
+ "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1",
725
+ "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"
726
+ ],
727
+ "markers": "python_version >= '3.6'",
728
+ "version": "==2.13.0"
729
+ },
730
+ "pympler": {
731
+ "hashes": [
732
+ "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa",
733
+ "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"
734
+ ],
735
+ "markers": "python_version >= '3.6'",
736
+ "version": "==1.0.1"
737
+ },
738
+ "pyparsing": {
739
+ "hashes": [
740
+ "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb",
741
+ "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"
742
+ ],
743
+ "markers": "python_full_version >= '3.6.8'",
744
+ "version": "==3.0.9"
745
+ },
746
+ "pyrsistent": {
747
+ "hashes": [
748
+ "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c",
749
+ "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc",
750
+ "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e",
751
+ "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26",
752
+ "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec",
753
+ "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286",
754
+ "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045",
755
+ "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec",
756
+ "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8",
757
+ "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c",
758
+ "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca",
759
+ "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22",
760
+ "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a",
761
+ "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96",
762
+ "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc",
763
+ "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1",
764
+ "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07",
765
+ "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6",
766
+ "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b",
767
+ "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5",
768
+ "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"
769
+ ],
770
+ "markers": "python_version >= '3.7'",
771
+ "version": "==0.18.1"
772
+ },
773
+ "python-dateutil": {
774
+ "hashes": [
775
+ "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86",
776
+ "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"
777
+ ],
778
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
779
+ "version": "==2.8.2"
780
+ },
781
+ "pytz": {
782
+ "hashes": [
783
+ "sha256:220f481bdafa09c3955dfbdddb7b57780e9a94f5127e35456a48589b9e0c0197",
784
+ "sha256:cea221417204f2d1a2aa03ddae3e867921971d0d76f14d87abb4414415bbdcf5"
785
+ ],
786
+ "version": "==2022.2.1"
787
+ },
788
+ "pytz-deprecation-shim": {
789
+ "hashes": [
790
+ "sha256:8314c9692a636c8eb3bda879b9f119e350e93223ae83e70e80c31675a0fdc1a6",
791
+ "sha256:af097bae1b616dde5c5744441e2ddc69e74dfdcb0c263129610d85b87445a59d"
792
+ ],
793
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
794
+ "version": "==0.1.0.post0"
795
+ },
796
+ "pyyaml": {
797
+ "hashes": [
798
+ "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf",
799
+ "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293",
800
+ "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b",
801
+ "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57",
802
+ "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b",
803
+ "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4",
804
+ "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07",
805
+ "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba",
806
+ "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9",
807
+ "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287",
808
+ "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513",
809
+ "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0",
810
+ "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782",
811
+ "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0",
812
+ "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92",
813
+ "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f",
814
+ "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2",
815
+ "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc",
816
+ "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1",
817
+ "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c",
818
+ "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86",
819
+ "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4",
820
+ "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c",
821
+ "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34",
822
+ "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b",
823
+ "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d",
824
+ "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c",
825
+ "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb",
826
+ "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7",
827
+ "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737",
828
+ "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3",
829
+ "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d",
830
+ "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358",
831
+ "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53",
832
+ "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78",
833
+ "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803",
834
+ "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a",
835
+ "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f",
836
+ "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174",
837
+ "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"
838
+ ],
839
+ "index": "pypi",
840
+ "version": "==6.0"
841
+ },
842
+ "requests": {
843
+ "hashes": [
844
+ "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983",
845
+ "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"
846
+ ],
847
+ "index": "pypi",
848
+ "version": "==2.28.1"
849
+ },
850
+ "requests-oauthlib": {
851
+ "hashes": [
852
+ "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5",
853
+ "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"
854
+ ],
855
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
856
+ "version": "==1.3.1"
857
+ },
858
+ "rich": {
859
+ "hashes": [
860
+ "sha256:2eb4e6894cde1e017976d2975ac210ef515d7548bc595ba20e195fb9628acdeb",
861
+ "sha256:63a5c5ce3673d3d5fbbf23cd87e11ab84b6b451436f1b7f19ec54b6bc36ed7ca"
862
+ ],
863
+ "markers": "python_full_version >= '3.6.3' and python_full_version < '4.0.0'",
864
+ "version": "==12.5.1"
865
+ },
866
+ "rsa": {
867
+ "hashes": [
868
+ "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7",
869
+ "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"
870
+ ],
871
+ "markers": "python_version >= '3.6'",
872
+ "version": "==4.9"
873
+ },
874
+ "scipy": {
875
+ "hashes": [
876
+ "sha256:0419485dbcd0ed78c0d5bf234c5dd63e86065b39b4d669e45810d42199d49521",
877
+ "sha256:09412eb7fb60b8f00b328037fd814d25d261066ebc43a1e339cdce4f7502877e",
878
+ "sha256:26d28c468900e6d5fdb37d2812ab46db0ccd22c63baa095057871faa3a498bc9",
879
+ "sha256:34441dfbee5b002f9e15285014fd56e5e3372493c3e64ae297bae2c4b9659f5a",
880
+ "sha256:39ab9240cd215a9349c85ab908dda6d732f7d3b4b192fa05780812495536acc4",
881
+ "sha256:3bc1ab68b9a096f368ba06c3a5e1d1d50957a86665fc929c4332d21355e7e8f4",
882
+ "sha256:3c6f5d1d4b9a5e4fe5e14f26ffc9444fc59473bbf8d45dc4a9a15283b7063a72",
883
+ "sha256:47d1a95bd9d37302afcfe1b84c8011377c4f81e33649c5a5785db9ab827a6ade",
884
+ "sha256:71487c503e036740635f18324f62a11f283a632ace9d35933b2b0a04fd898c98",
885
+ "sha256:7a412c476a91b080e456229e413792bbb5d6202865dae963d1e6e28c2bb58691",
886
+ "sha256:825951b88f56765aeb6e5e38ac9d7d47407cfaaeb008d40aa1b45a2d7ea2731e",
887
+ "sha256:8cc81ac25659fec73599ccc52c989670e5ccd8974cf34bacd7b54a8d809aff1a",
888
+ "sha256:8d3faa40ac16c6357aaf7ea50394ea6f1e8e99d75e927a51102b1943b311b4d9",
889
+ "sha256:90c805f30c46cf60f1e76e947574f02954d25e3bb1e97aa8a07bc53aa31cf7d1",
890
+ "sha256:96d7cf7b25c9f23c59a766385f6370dab0659741699ecc7a451f9b94604938ce",
891
+ "sha256:b97b479f39c7e4aaf807efd0424dec74bbb379108f7d22cf09323086afcd312c",
892
+ "sha256:bc4e2c77d4cd015d739e75e74ebbafed59ba8497a7ed0fd400231ed7683497c4",
893
+ "sha256:c61b4a91a702e8e04aeb0bfc40460e1f17a640977c04dda8757efb0199c75332",
894
+ "sha256:d79da472015d0120ba9b357b28a99146cd6c17b9609403164b1a8ed149b4dfc8",
895
+ "sha256:e8fe305d9d67a81255e06203454729405706907dccbdfcc330b7b3482a6c371d",
896
+ "sha256:eb954f5aca4d26f468bbebcdc5448348eb287f7bea536c6306f62ea062f63d9a",
897
+ "sha256:f7c39f7dbb57cce00c108d06d731f3b0e2a4d3a95c66d96bce697684876ce4d4",
898
+ "sha256:f950a04b33e17b38ff561d5a0951caf3f5b47caa841edd772ffb7959f20a6af0"
899
+ ],
900
+ "index": "pypi",
901
+ "version": "==1.9.1"
902
+ },
903
+ "seaborn": {
904
+ "hashes": [
905
+ "sha256:893f17292d8baca616c1578ddb58eb25c72d622f54fc5ee329c8207dc9b57b23",
906
+ "sha256:cbeff3deef7c2515aa0af99b2c7e02dc5bf8b42c936a74d8e4b416905b549db0"
907
+ ],
908
+ "index": "pypi",
909
+ "version": "==0.12.0"
910
+ },
911
+ "semver": {
912
+ "hashes": [
913
+ "sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4",
914
+ "sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f"
915
+ ],
916
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
917
+ "version": "==2.13.0"
918
+ },
919
+ "setuptools": {
920
+ "hashes": [
921
+ "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82",
922
+ "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"
923
+ ],
924
+ "markers": "python_version >= '3.7'",
925
+ "version": "==65.3.0"
926
+ },
927
+ "six": {
928
+ "hashes": [
929
+ "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
930
+ "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
931
+ ],
932
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
933
+ "version": "==1.16.0"
934
+ },
935
+ "smmap": {
936
+ "hashes": [
937
+ "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94",
938
+ "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"
939
+ ],
940
+ "markers": "python_version >= '3.6'",
941
+ "version": "==5.0.0"
942
+ },
943
+ "streamlit": {
944
+ "hashes": [
945
+ "sha256:c56d0775feb39116ff90a8b01ee15be27212ee50abb88943607205d26d1d9923",
946
+ "sha256:f0461bebd6c1b58c38f0f602ee9bb6699f66dfe14fd2e05abc25ebe96ff4ba21"
947
+ ],
948
+ "index": "pypi",
949
+ "version": "==1.12.2"
950
+ },
951
+ "tensorboard": {
952
+ "hashes": [
953
+ "sha256:76c91a5e8959cd2208cc32cb17a0cb002badabb66a06ac2af02a7810f49a59e3"
954
+ ],
955
+ "index": "pypi",
956
+ "version": "==2.10.0"
957
+ },
958
+ "tensorboard-data-server": {
959
+ "hashes": [
960
+ "sha256:809fe9887682d35c1f7d1f54f0f40f98bb1f771b14265b453ca051e2ce58fca7",
961
+ "sha256:d8237580755e58eff68d1f3abefb5b1e39ae5c8b127cc40920f9c4fb33f4b98a",
962
+ "sha256:fa8cef9be4fcae2f2363c88176638baf2da19c5ec90addb49b1cde05c95c88ee"
963
+ ],
964
+ "markers": "python_version >= '3.6'",
965
+ "version": "==0.6.1"
966
+ },
967
+ "tensorboard-plugin-wit": {
968
+ "hashes": [
969
+ "sha256:ff26bdd583d155aa951ee3b152b3d0cffae8005dc697f72b44a8e8c2a77a8cbe"
970
+ ],
971
+ "version": "==1.8.1"
972
+ },
973
+ "thop": {
974
+ "hashes": [
975
+ "sha256:01473c225231927d2ad718351f78ebf7cffe6af3bed464c4f1ba1ef0f7cdda27"
976
+ ],
977
+ "index": "pypi",
978
+ "version": "==0.1.1.post2209072238"
979
+ },
980
+ "toml": {
981
+ "hashes": [
982
+ "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
983
+ "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
984
+ ],
985
+ "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
986
+ "version": "==0.10.2"
987
+ },
988
+ "toolz": {
989
+ "hashes": [
990
+ "sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f",
991
+ "sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194"
992
+ ],
993
+ "markers": "python_version >= '3.5'",
994
+ "version": "==0.12.0"
995
+ },
996
+ "torch": {
997
+ "hashes": [
998
+ "sha256:06435080ba0a2c8f88b65af0550b973c5aa7771eacd9b17f69057fc7436a8ae2",
999
+ "sha256:1fb49ca0ca8edefbb3f47f6801482144c3a746ec21a65eb3f0839a1d8fb24705",
1000
+ "sha256:335961a5c893f7b33b29aecbc19382a1a1b0106b3457a1c45148e1e14f8f5e09",
1001
+ "sha256:351dda9f483486bec66ed838234e96f077e6886c88110bb1e2f4a708ed2356ce",
1002
+ "sha256:35ec703bc535bde7e8790ab9500f02d4413d995ac981520501fde95e268781e1",
1003
+ "sha256:42ca081a2e0e759844e70cad7efd8fcfb2f81634dffa73a226564eb83d989e5b",
1004
+ "sha256:54dacb6a3f63c54334fadbf22fb6e9ee865085a4e0368962edff5babda057606",
1005
+ "sha256:936d303c5e1d60259fb71d95a33e84d84fececa25a0fae112f6a23286ff183c8",
1006
+ "sha256:a198332e2d344d25e423ae2df98d56d83060f19e9f4cf23164dffc8d403efeb8",
1007
+ "sha256:b92f934b3c95578b3fd37cc06afca208d63f02b0d01b806e979cb4e46124a7f8",
1008
+ "sha256:dd3ca91dc1a9fe3fbcddf035cb2fb8be44d57a527b845cd196ba69249adecccf",
1009
+ "sha256:e470697006a4c08e4fb6a645e8ca49b0d36c8e7ccf413deef5161335bd7399f1"
1010
+ ],
1011
+ "index": "pypi",
1012
+ "version": "==1.9.1"
1013
+ },
1014
+ "torchvision": {
1015
+ "hashes": [
1016
+ "sha256:1c186f42b4f8aa9a01c56c3a758693b0447aa169afb9fba0051177f8fecbd691",
1017
+ "sha256:453e935212193e89b4bbb8d51082d8138631c2f8a420390284b1946d893df6eb",
1018
+ "sha256:46a70a30ea7aeab63e67504778f2565fbb1c153fdd8e1a8c6a22193aec4dbddd",
1019
+ "sha256:4ebffeee5468a0934952030eaba1de1dbb08154132235ee1d9049e41dfb1600d",
1020
+ "sha256:6c8fe90213be4bce590ac9647b34db022d5d1ae94f309a733b9a64e65232173a",
1021
+ "sha256:99d3e01e1d67d12bcc88e826431b70cad5b8e4729a277c04601f83358a120508",
1022
+ "sha256:ac8dfbe4933013dda898b815e2476ebbc35e3a16b9352dfdd66e773c77755bec",
1023
+ "sha256:bc99a984b162ee5626787eaee885d9fec1a5f16837f9d0c8223cca3269b9e47d",
1024
+ "sha256:cd7e2b1a89d5a08f24325fc12441f5ba2822f407489377ac7841bf351a1f4d37",
1025
+ "sha256:d6420bf21b9d0bdbabe55d64c8b11c61f8eb077948a55d5707946fcb17d97cec",
1026
+ "sha256:d7c2d6c20244404fc9ca3568c88c305cb5a81d526d5912d52d22c64999bd4353",
1027
+ "sha256:e504d9d51eae60a98925aee4a3fd58655abd5669659ad7431f7791a93af166fc"
1028
+ ],
1029
+ "index": "pypi",
1030
+ "version": "==0.10.1"
1031
+ },
1032
+ "tornado": {
1033
+ "hashes": [
1034
+ "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca",
1035
+ "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72",
1036
+ "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23",
1037
+ "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8",
1038
+ "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b",
1039
+ "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9",
1040
+ "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13",
1041
+ "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75",
1042
+ "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac",
1043
+ "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e",
1044
+ "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"
1045
+ ],
1046
+ "markers": "python_version >= '3.7'",
1047
+ "version": "==6.2"
1048
+ },
1049
+ "tqdm": {
1050
+ "hashes": [
1051
+ "sha256:5f4f682a004951c1b450bc753c710e9280c5746ce6ffedee253ddbcbf54cf1e4",
1052
+ "sha256:6fee160d6ffcd1b1c68c65f14c829c22832bc401726335ce92c52d395944a6a1"
1053
+ ],
1054
+ "index": "pypi",
1055
+ "version": "==4.64.1"
1056
+ },
1057
+ "typing-extensions": {
1058
+ "hashes": [
1059
+ "sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02",
1060
+ "sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6"
1061
+ ],
1062
+ "markers": "python_version >= '3.7'",
1063
+ "version": "==4.3.0"
1064
+ },
1065
+ "tzdata": {
1066
+ "hashes": [
1067
+ "sha256:21f4f0d7241572efa7f7a4fdabb052e61b55dc48274e6842697ccdf5253e5451",
1068
+ "sha256:c3119520447d68ef3eb8187a55a4f44fa455f30eb1b4238fa5691ba094f2b05b"
1069
+ ],
1070
+ "markers": "platform_system == 'Windows'",
1071
+ "version": "==2022.2"
1072
+ },
1073
+ "tzlocal": {
1074
+ "hashes": [
1075
+ "sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745",
1076
+ "sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7"
1077
+ ],
1078
+ "markers": "python_version >= '3.6'",
1079
+ "version": "==4.2"
1080
+ },
1081
+ "urllib3": {
1082
+ "hashes": [
1083
+ "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e",
1084
+ "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"
1085
+ ],
1086
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' and python_full_version < '4.0.0'",
1087
+ "version": "==1.26.12"
1088
+ },
1089
+ "validators": {
1090
+ "hashes": [
1091
+ "sha256:24148ce4e64100a2d5e267233e23e7afeb55316b47d30faae7eb6e7292bc226a"
1092
+ ],
1093
+ "markers": "python_version >= '3.4'",
1094
+ "version": "==0.20.0"
1095
+ },
1096
+ "watchdog": {
1097
+ "hashes": [
1098
+ "sha256:083171652584e1b8829581f965b9b7723ca5f9a2cd7e20271edf264cfd7c1412",
1099
+ "sha256:117ffc6ec261639a0209a3252546b12800670d4bf5f84fbd355957a0595fe654",
1100
+ "sha256:186f6c55abc5e03872ae14c2f294a153ec7292f807af99f57611acc8caa75306",
1101
+ "sha256:195fc70c6e41237362ba720e9aaf394f8178bfc7fa68207f112d108edef1af33",
1102
+ "sha256:226b3c6c468ce72051a4c15a4cc2ef317c32590d82ba0b330403cafd98a62cfd",
1103
+ "sha256:247dcf1df956daa24828bfea5a138d0e7a7c98b1a47cf1fa5b0c3c16241fcbb7",
1104
+ "sha256:255bb5758f7e89b1a13c05a5bceccec2219f8995a3a4c4d6968fe1de6a3b2892",
1105
+ "sha256:43ce20ebb36a51f21fa376f76d1d4692452b2527ccd601950d69ed36b9e21609",
1106
+ "sha256:4f4e1c4aa54fb86316a62a87b3378c025e228178d55481d30d857c6c438897d6",
1107
+ "sha256:5952135968519e2447a01875a6f5fc8c03190b24d14ee52b0f4b1682259520b1",
1108
+ "sha256:64a27aed691408a6abd83394b38503e8176f69031ca25d64131d8d640a307591",
1109
+ "sha256:6b17d302850c8d412784d9246cfe8d7e3af6bcd45f958abb2d08a6f8bedf695d",
1110
+ "sha256:70af927aa1613ded6a68089a9262a009fbdf819f46d09c1a908d4b36e1ba2b2d",
1111
+ "sha256:7a833211f49143c3d336729b0020ffd1274078e94b0ae42e22f596999f50279c",
1112
+ "sha256:8250546a98388cbc00c3ee3cc5cf96799b5a595270dfcfa855491a64b86ef8c3",
1113
+ "sha256:97f9752208f5154e9e7b76acc8c4f5a58801b338de2af14e7e181ee3b28a5d39",
1114
+ "sha256:9f05a5f7c12452f6a27203f76779ae3f46fa30f1dd833037ea8cbc2887c60213",
1115
+ "sha256:a735a990a1095f75ca4f36ea2ef2752c99e6ee997c46b0de507ba40a09bf7330",
1116
+ "sha256:ad576a565260d8f99d97f2e64b0f97a48228317095908568a9d5c786c829d428",
1117
+ "sha256:b530ae007a5f5d50b7fbba96634c7ee21abec70dc3e7f0233339c81943848dc1",
1118
+ "sha256:bfc4d351e6348d6ec51df007432e6fe80adb53fd41183716017026af03427846",
1119
+ "sha256:d3dda00aca282b26194bdd0adec21e4c21e916956d972369359ba63ade616153",
1120
+ "sha256:d9820fe47c20c13e3c9dd544d3706a2a26c02b2b43c993b62fcd8011bcc0adb3",
1121
+ "sha256:ed80a1628cee19f5cfc6bb74e173f1b4189eb532e705e2a13e3250312a62e0c9",
1122
+ "sha256:ee3e38a6cc050a8830089f79cbec8a3878ec2fe5160cdb2dc8ccb6def8552658"
1123
+ ],
1124
+ "markers": "platform_system != 'Darwin'",
1125
+ "version": "==2.1.9"
1126
+ },
1127
+ "werkzeug": {
1128
+ "hashes": [
1129
+ "sha256:7ea2d48322cc7c0f8b3a215ed73eabd7b5d75d0b50e31ab006286ccff9e00b8f",
1130
+ "sha256:f979ab81f58d7318e064e99c4506445d60135ac5cd2e177a2de0089bfd4c9bd5"
1131
+ ],
1132
+ "markers": "python_version >= '3.7'",
1133
+ "version": "==2.2.2"
1134
+ },
1135
+ "wheel": {
1136
+ "hashes": [
1137
+ "sha256:4bdcd7d840138086126cd09254dc6195fb4fc6f01c050a1d7236f2630db1d22a",
1138
+ "sha256:e9a504e793efbca1b8e0e9cb979a249cf4a0a7b5b8c9e8b65a5e39d49529c1c4"
1139
+ ],
1140
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
1141
+ "version": "==0.37.1"
1142
+ },
1143
+ "zipp": {
1144
+ "hashes": [
1145
+ "sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2",
1146
+ "sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009"
1147
+ ],
1148
+ "markers": "python_version >= '3.7'",
1149
+ "version": "==3.8.1"
1150
+ }
1151
+ },
1152
+ "develop": {}
1153
+ }
Shibuya_Crossing_FullHD.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c2b456095cb70f2c7a815d032ad354d851294377939cd6accea29b285b51324
3
+ size 100731917
app.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import streamlit as st
3
+ from deep_list import *
4
+ import torch
5
+
6
+ def main():
7
+
8
+ st.image('https://upload.wikimedia.org/wikipedia/commons/thumb/e/e5/Dentsu-logo_black.svg/2560px-Dentsu-logo_black.svg.png', width=250)
9
+ st.title("Traffic Analytics for Out of Home Environment")
10
+
11
+ inference_msg = st.empty()
12
+ st.sidebar.title("Configuration")
13
+
14
+ input_source = st.sidebar.radio(
15
+ "Select input source",
16
+ ('RTSP', 'Webcam', 'Local video'))
17
+
18
+ conf_thres = st.sidebar.text_input("Class confidence threshold", "0.25")
19
+
20
+ conf_thres_drift = st.sidebar.text_input("Class confidence threshold for drift dectection", "0.75")
21
+
22
+ fps_drop_warn_thresh = st.sidebar.text_input("FPS drop warning threshold", "8")
23
+
24
+ save_output_video = st.sidebar.radio("Save output video?",('Yes', 'No'))
25
+ if save_output_video == 'Yes':
26
+ nosave = False
27
+ display_labels = False
28
+ else:
29
+ nosave = True
30
+ display_labels = True
31
+
32
+ save_poor_frame = st.sidebar.radio("Save poor performing frames?",('Yes', 'No'))
33
+ if save_poor_frame == "Yes":
34
+ save_poor_frame__ = True
35
+ else:
36
+ save_poor_frame__ = False
37
+
38
+ # ------------------------- LOCAL VIDEO ------------------------------
39
+ if input_source == "Local video":
40
+ video = st.sidebar.file_uploader("Select input video", type=["mp4", "avi"], accept_multiple_files=False)
41
+
42
+ if st.sidebar.button("Start tracking"):
43
+
44
+ stframe = st.empty()
45
+
46
+ st.subheader("Inference Stats")
47
+ kpi1, kpi2, kpi3 = st.columns(3)
48
+
49
+ st.subheader("System Stats")
50
+ js1, js2, js3 = st.columns(3)
51
+
52
+ # Updating Inference results
53
+
54
+ with kpi1:
55
+ st.markdown("**Frame Rate**")
56
+ kpi1_text = st.markdown("0")
57
+ fps_warn = st.empty()
58
+
59
+ with kpi2:
60
+ st.markdown("**Detected objects in curret Frame**")
61
+ kpi2_text = st.markdown("0")
62
+
63
+ with kpi3:
64
+ st.markdown("**Overall Detected objects**")
65
+ kpi3_text = st.markdown("0")
66
+
67
+ # Updating System stats
68
+
69
+ with js1:
70
+ st.markdown("**Memory usage**")
71
+ js1_text = st.markdown("0")
72
+
73
+ with js2:
74
+ st.markdown("**CPU Usage**")
75
+ js2_text = st.markdown("0")
76
+
77
+ with js3:
78
+ st.markdown("**GPU Memory Usage**")
79
+ js3_text = st.markdown("0")
80
+
81
+ st.subheader("Inference Overview")
82
+ inf_ov_1, inf_ov_2, inf_ov_3, inf_ov_4 = st.columns(4)
83
+
84
+ with inf_ov_1:
85
+ st.markdown("**Poor performing classes (Conf < {0})**".format(conf_thres_drift))
86
+ inf_ov_1_text = st.markdown("0")
87
+
88
+ with inf_ov_2:
89
+ st.markdown("**No. of poor peforming frames**")
90
+ inf_ov_2_text = st.markdown("0")
91
+
92
+ with inf_ov_3:
93
+ st.markdown("**Minimum FPS**")
94
+ inf_ov_3_text = st.markdown("0")
95
+
96
+ with inf_ov_4:
97
+ st.markdown("**Maximum FPS**")
98
+ inf_ov_4_text = st.markdown("0")
99
+
100
+ detect(source=video.name, stframe=stframe, kpi1_text=kpi1_text, kpi2_text=kpi2_text, kpi3_text=kpi3_text, js1_text=js1_text, js2_text=js2_text, js3_text=js3_text, conf_thres=float(conf_thres), nosave=nosave, display_labels=display_labels, conf_thres_drift = float(conf_thres_drift), save_poor_frame__= save_poor_frame__, inf_ov_1_text=inf_ov_1_text, inf_ov_2_text=inf_ov_2_text, inf_ov_3_text=inf_ov_3_text, inf_ov_4_text=inf_ov_4_text, fps_warn=fps_warn, fps_drop_warn_thresh = float(fps_drop_warn_thresh))
101
+
102
+ inference_msg.success("Inference Complete!")
103
+
104
+ # -------------------------- WEBCAM ----------------------------------
105
+ if input_source == "Webcam":
106
+
107
+ if st.sidebar.button("Start tracking"):
108
+
109
+ stframe = st.empty()
110
+
111
+ st.subheader("Inference Stats")
112
+ kpi1, kpi2, kpi3 = st.columns(3)
113
+
114
+ st.subheader("System Stats")
115
+ js1, js2, js3 = st.columns(3)
116
+
117
+ # Updating Inference results
118
+
119
+ with kpi1:
120
+ st.markdown("**Frame Rate**")
121
+ kpi1_text = st.markdown("0")
122
+ fps_warn = st.empty()
123
+
124
+ with kpi2:
125
+ st.markdown("**Detected objects in curret Frame**")
126
+ kpi2_text = st.markdown("0")
127
+
128
+ with kpi3:
129
+ st.markdown("**Total Detected objects**")
130
+ kpi3_text = st.markdown("0")
131
+
132
+ # Updating System stats
133
+
134
+ with js1:
135
+ st.markdown("**Memory usage**")
136
+ js1_text = st.markdown("0")
137
+
138
+ with js2:
139
+ st.markdown("**CPU Usage**")
140
+ js2_text = st.markdown("0")
141
+
142
+ with js3:
143
+ st.markdown("**GPU Memory Usage**")
144
+ js3_text = st.markdown("0")
145
+
146
+ st.subheader("Inference Overview")
147
+ inf_ov_1, inf_ov_2, inf_ov_3, inf_ov_4 = st.columns(4)
148
+
149
+ with inf_ov_1:
150
+ st.markdown("**Poor performing classes (Conf < {0})**".format(conf_thres_drift))
151
+ inf_ov_1_text = st.markdown("0")
152
+
153
+ with inf_ov_2:
154
+ st.markdown("**No. of poor peforming frames**")
155
+ inf_ov_2_text = st.markdown("0")
156
+
157
+ with inf_ov_3:
158
+ st.markdown("**Minimum FPS**")
159
+ inf_ov_3_text = st.markdown("0")
160
+
161
+ with inf_ov_4:
162
+ st.markdown("**Maximum FPS**")
163
+ inf_ov_4_text = st.markdown("0")
164
+
165
+ detect(source='0', stframe=stframe, kpi1_text=kpi1_text, kpi2_text=kpi2_text, kpi3_text=kpi3_text, js1_text=js1_text, js2_text=js2_text, js3_text=js3_text, conf_thres=float(conf_thres), nosave=nosave, display_labels=display_labels, conf_thres_drift = float(conf_thres_drift), save_poor_frame__= save_poor_frame__, inf_ov_1_text=inf_ov_1_text, inf_ov_2_text=inf_ov_2_text, inf_ov_3_text=inf_ov_3_text, inf_ov_4_text=inf_ov_4_text, fps_warn=fps_warn, fps_drop_warn_thresh = float(fps_drop_warn_thresh))
166
+
167
+ # -------------------------- RTSP ------------------------------
168
+ if input_source == "RTSP":
169
+
170
+ rtsp_input = st.sidebar.text_input("IP Address", "rtsp://192.168.0.1")
171
+ if st.sidebar.button("Start tracking"):
172
+
173
+ stframe = st.empty()
174
+
175
+ st.subheader("Inference Stats")
176
+ kpi1, kpi2, kpi3 = st.columns(3)
177
+
178
+ st.subheader("System Stats")
179
+ js1, js2, js3 = st.columns(3)
180
+
181
+ # Updating Inference results
182
+
183
+ with kpi1:
184
+ st.markdown("**Frame Rate**")
185
+ kpi1_text = st.markdown("0")
186
+ fps_warn = st.empty()
187
+
188
+ with kpi2:
189
+ st.markdown("**Detected objects in curret Frame**")
190
+ kpi2_text = st.markdown("0")
191
+
192
+ with kpi3:
193
+ st.markdown("**Total Detected objects**")
194
+ kpi3_text = st.markdown("0")
195
+
196
+ # Updating System stats
197
+
198
+ with js1:
199
+ st.markdown("**Memory usage**")
200
+ js1_text = st.markdown("0")
201
+
202
+ with js2:
203
+ st.markdown("**CPU Usage**")
204
+ js2_text = st.markdown("0")
205
+
206
+ with js3:
207
+ st.markdown("**GPU Memory Usage**")
208
+ js3_text = st.markdown("0")
209
+
210
+ st.subheader("Inference Overview")
211
+ inf_ov_1, inf_ov_2, inf_ov_3, inf_ov_4 = st.columns(4)
212
+
213
+ with inf_ov_1:
214
+ st.markdown("**Poor performing classes (Conf < {0})**".format(conf_thres_drift))
215
+ inf_ov_1_text = st.markdown("0")
216
+
217
+ with inf_ov_2:
218
+ st.markdown("**No. of poor peforming frames**")
219
+ inf_ov_2_text = st.markdown("0")
220
+
221
+ with inf_ov_3:
222
+ st.markdown("**Minimum FPS**")
223
+ inf_ov_3_text = st.markdown("0")
224
+
225
+ with inf_ov_4:
226
+ st.markdown("**Maximum FPS**")
227
+ inf_ov_4_text = st.markdown("0")
228
+
229
+ detect(source=rtsp_input, stframe=stframe, kpi1_text=kpi1_text, kpi2_text=kpi2_text, kpi3_text=kpi3_text, js1_text=js1_text, js2_text=js2_text, js3_text=js3_text, conf_thres=float(conf_thres), nosave=nosave, display_labels=display_labels, conf_thres_drift = float(conf_thres_drift), save_poor_frame__= save_poor_frame__, inf_ov_1_text=inf_ov_1_text, inf_ov_2_text=inf_ov_2_text, inf_ov_3_text=inf_ov_3_text, inf_ov_4_text=inf_ov_4_text, fps_warn=fps_warn, fps_drop_warn_thresh = float(fps_drop_warn_thresh))
230
+
231
+ # torch.cuda.empty_cache()
232
+
233
+ if __name__ == "__main__":
234
+ try:
235
+ main()
236
+ except SystemExit:
237
+ pass
deep_list.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import time
3
+ from pathlib import Path
4
+ import streamlit as st
5
+ import cv2
6
+ import torch
7
+ import torch.backends.cudnn as cudnn
8
+ import os
9
+ import sys
10
+ import datetime
11
+ import matplotlib.pyplot as plt
12
+ import seaborn as sns
13
+ sys.path.insert(0, './yolov5') # Path for internal module without changing base
14
+ import numpy as np
15
+ from yolov5.models.common import DetectMultiBackend
16
+ from yolov5.utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
17
+ from yolov5.utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
18
+ increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
19
+ from yolov5.utils.general import set_logging
20
+ from yolov5.utils.plots import Annotator, colors, save_one_box, plot_one_box
21
+ from yolov5.utils.torch_utils import select_device, time_sync
22
+
23
+
24
+ from deep_sort_pytorch.utils.parser import get_config
25
+ from deep_sort_pytorch.deep_sort import DeepSort
26
+
27
+ from graphs import bbox_rel,draw_boxes
28
+ from collections import Counter
29
+
30
+ import psutil
31
+ import subprocess
32
+
33
+ FILE = Path(__file__).resolve()
34
+ ROOT = FILE.parents[0] # YOLOv5 root directory
35
+ if str(ROOT) not in sys.path:
36
+ sys.path.append(str(ROOT)) # add ROOT to PATH
37
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
38
+
39
+ def get_gpu_memory():
40
+ result = subprocess.check_output(
41
+ [
42
+ 'nvidia-smi', '--query-gpu=memory.used',
43
+ '--format=csv,nounits,noheader'
44
+ ], encoding='utf-8')
45
+ gpu_memory = [int(x) for x in result.strip().split('\n')]
46
+ return gpu_memory[0]
47
+
48
+ @torch.no_grad()
49
+ def detect(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
50
+ source=ROOT / 'yolov5/data/images', # file/dir/URL/glob, 0 for webcam
51
+ data=ROOT / 'yolov5/data/coco128.yaml', # dataset.yaml path
52
+ stframe=None,
53
+ #stgraph=None,
54
+ kpi1_text="",
55
+ kpi2_text="", kpi3_text="",
56
+ js1_text="",js2_text="",js3_text="",
57
+ imgsz=(640, 640), # inference size (height, width)
58
+ conf_thres=0.25, # confidence threshold
59
+ iou_thres=0.45, # NMS IOU threshold
60
+ max_det=1000, # maximum detections per image
61
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
62
+ view_img=False, # show results
63
+ save_txt=False, # save results to *.txt
64
+ save_conf=False, # save confidences in --save-txt labels
65
+ save_crop=False, # save cropped prediction boxes
66
+ nosave=False, # do not save images/videos
67
+ classes=None, # filter by class: --class 0, or --class 0 2 3
68
+ agnostic_nms=False, # class-agnostic NMS
69
+ augment=False, # augmented inference
70
+ visualize=False, # visualize features
71
+ update=False, # update all models
72
+ project=ROOT / 'runs/detect', # save results to project/name
73
+ name='exp', # save results to project/name
74
+ exist_ok=False, # existing project/name ok, do not increment
75
+ line_thickness=1, # bounding box thickness (pixels)
76
+ hide_labels=False, # hide labels
77
+ hide_conf=False, # hide confidences
78
+ half=False, # use FP16 half-precision inference
79
+ dnn=False,
80
+ display_labels=False,
81
+ config_deepsort="deep_sort_pytorch/configs/deep_sort.yaml", #Deep Sort configuration
82
+ conf_thres_drift = 0.75,
83
+ save_poor_frame__ = False,
84
+ inf_ov_1_text="", inf_ov_2_text="",inf_ov_3_text="", inf_ov_4_text="",
85
+ fps_warn="",fps_drop_warn_thresh=8
86
+ ):
87
+ save_img = not nosave and not source.endswith('.txt') # save inference images
88
+ webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
89
+ ('rtsp://', 'rtmp://', 'http://', 'https://'))
90
+
91
+ ## initialize deepsort
92
+ cfg = get_config()
93
+ cfg.merge_from_file(config_deepsort)
94
+ deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
95
+ max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
96
+ nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
97
+ max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
98
+ use_cuda=True)
99
+
100
+
101
+ # Directories
102
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
103
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
104
+ if save_poor_frame__:
105
+ try:
106
+ os.mkdir("drift_frames")
107
+ except:
108
+ print("Folder exists, overwriting...")
109
+
110
+ # Initialize
111
+ set_logging()
112
+ device = select_device(device)
113
+ half &= device.type != 'cpu' # half precision only supported on CUDA
114
+
115
+ # Load model
116
+ device = select_device(device)
117
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data)
118
+ stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine
119
+ imgsz = check_img_size(imgsz, s=stride) # check image size
120
+
121
+ # Half
122
+ half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA
123
+ if pt or jit:
124
+ model.model.half() if half else model.model.float()
125
+
126
+ # Second-stage classifier
127
+ classify = False
128
+ if classify:
129
+ modelc = load_classifier(name='resnet101', n=2) # initialize
130
+ modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
131
+
132
+ # Dataloader
133
+ if webcam:
134
+ #view_img = check_imshow()
135
+ cudnn.benchmark = True # set True to speed up constant image size inference
136
+ dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt)
137
+ bs = len(dataset) # batch_size
138
+ else:
139
+ dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
140
+ bs = 1 # batch_size
141
+ vid_path, vid_writer = [None] * bs, [None] * bs
142
+
143
+ # Run inference
144
+ t0 = time.time()
145
+
146
+ dt, seen = [0.0, 0.0, 0.0], 0
147
+ prev_time = time.time()
148
+ selected_names = names.copy()
149
+ global_graph_dict = dict()
150
+ global_drift_dict = dict()
151
+ test_drift = []
152
+ frame_num = -1
153
+ poor_perf_frame_counter=0
154
+ mapped_ = dict()
155
+ min_FPS = 10000
156
+ max_FPS = -1
157
+ for path, im, im0s, vid_cap, s in dataset:
158
+ frame_num = frame_num+1
159
+ t1 = time_sync()
160
+ im = torch.from_numpy(im).to(device)
161
+ im = im.half() if half else im.float() # uint8 to fp16/32
162
+ im /= 255 # 0 - 255 to 0.0 - 1.0
163
+ if len(im.shape) == 3:
164
+ im = im[None] # expand for batch dim
165
+ t2 = time_sync()
166
+ dt[0] += t2 - t1
167
+
168
+ # Inference
169
+ visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
170
+ pred = model(im, augment=augment, visualize=visualize)
171
+ t3 = time_sync()
172
+ dt[1] += t3 - t2
173
+
174
+ # NMS
175
+ pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
176
+ dt[2] += time_sync() - t3
177
+
178
+ # Process predictions
179
+ class_count = 0
180
+
181
+ drift_dict = dict()
182
+
183
+ for i, det in enumerate(pred): # per image
184
+ seen += 1
185
+ if webcam: # batch_size >= 1
186
+ p, im0, frame = path[i], im0s[i].copy(), dataset.count
187
+ s += f'{i}: '
188
+ else:
189
+ p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
190
+
191
+ p = Path(p) # to Path
192
+ save_path = str(save_dir / p.name) # im.jpg
193
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
194
+ s += '%gx%g ' % im.shape[2:] # print string
195
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
196
+ imc = im0.copy() if save_crop else im0 # for save_crop
197
+ annotator = Annotator(im0, line_width=line_thickness, example=str(names))
198
+ if len(det):
199
+ # Rescale boxes from img_size to im0 size
200
+ det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
201
+
202
+ # Print results
203
+ names_ = []
204
+ cnt = []
205
+ for c in det[:, -1].unique():
206
+ n = (det[:, -1] == c).sum() # detections per class
207
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
208
+ names_.append(names[int(c)])
209
+ cnt.append(int(n.detach().cpu().numpy()))
210
+ mapped_.update(dict(zip(names_, cnt)))
211
+
212
+ global_graph_dict = Counter(global_graph_dict) + Counter(mapped_)
213
+
214
+ bbox_xywh = []
215
+ confs = []
216
+ # Adapt detections to deep sort input format
217
+ for *xyxy, conf, cls in det:
218
+ x_c, y_c, bbox_w, bbox_h = bbox_rel(*xyxy)
219
+ obj = [x_c, y_c, bbox_w, bbox_h]
220
+ bbox_xywh.append(obj)
221
+ confs.append([conf.item()])
222
+ # print("conf : {0}, conf_t : {1}".format(conf, conf_thres))
223
+ if conf<conf_thres_drift:
224
+ if names[int(cls)] not in test_drift:
225
+ test_drift.append(names[int(cls)])
226
+ if save_poor_frame__:
227
+ cv2.imwrite("drift_frames/frame_{0}.png".format(frame_num), im0)
228
+ poor_perf_frame_counter+=1
229
+ # print(type(conf_thres))
230
+
231
+ xywhs = torch.Tensor(bbox_xywh)
232
+ confss = torch.Tensor(confs)
233
+
234
+ # Pass detections to deepsort
235
+ outputs = deepsort.update(xywhs, confss, im0)
236
+
237
+ # draw boxes for visualization
238
+ if len(outputs) > 0:
239
+ # print("Outputs :", outputs)
240
+ bbox_xyxy = outputs[:, :4]
241
+ identities = outputs[:, -1]
242
+ draw_boxes(im0, bbox_xyxy, identities)
243
+
244
+ # Write MOT compliant results to file
245
+ if save_txt and len(outputs) != 0:
246
+ for j, output in enumerate(outputs):
247
+ bbox_left = output[0]
248
+ bbox_top = output[1]
249
+ bbox_w = output[2]
250
+ bbox_h = output[3]
251
+ identity = output[-1]
252
+ with open(txt_path, 'a') as f:
253
+ f.write(('%g ' * 10 + '\n') % (frame_idx, identity, bbox_left,
254
+ bbox_top, bbox_w, bbox_h, -1, -1, -1, -1)) # label format
255
+
256
+ # Write results Label
257
+ for *xyxy, conf, cls in reversed(det):
258
+ if save_txt: # Write to file
259
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
260
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
261
+ with open(txt_path + '.txt', 'a') as f:
262
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
263
+
264
+ if save_img or save_crop or view_img or display_labels: # Add bbox to image
265
+ c = int(cls) # integer class
266
+ label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
267
+ plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=line_thickness)
268
+ if save_crop:
269
+ save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
270
+
271
+ else:
272
+ deepsort.increment_ages()
273
+
274
+ # Stream results
275
+ if view_img:
276
+ cv2.imshow(str(p), im0)
277
+ cv2.waitKey(1) # 1 millisecond
278
+
279
+ # Save results (image with detections)
280
+ if save_img:
281
+ if dataset.mode == 'image':
282
+ cv2.imwrite(save_path, im0)
283
+ else: # 'video' or 'stream'
284
+ if vid_path != save_path: # new video
285
+ vid_path = save_path
286
+ if isinstance(vid_writer, cv2.VideoWriter):
287
+ vid_writer.release() # release previous video writer
288
+ if vid_cap: # video
289
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
290
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
291
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
292
+ else: # stream
293
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
294
+ save_path += '.mp4'
295
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
296
+ vid_writer.write(im0)
297
+
298
+ curr_time = time.time()
299
+ fps_ = curr_time - prev_time
300
+ fps_ = round(1/round(fps_, 3),1)
301
+ prev_time = curr_time
302
+
303
+ js1_text.write(str(psutil.virtual_memory()[2])+"%")
304
+ js2_text.write(str(psutil.cpu_percent())+'%')
305
+ try:
306
+ js3_text.write(str(get_gpu_memory())+' MB')
307
+ except:
308
+ js3_text.write(str('NA'))
309
+
310
+
311
+ kpi1_text.write(str(fps_)+' FPS')
312
+ if fps_ < fps_drop_warn_thresh:
313
+ fps_warn.warning(f"FPS dropped below {fps_drop_warn_thresh}")
314
+ kpi2_text.write(mapped_)
315
+ kpi3_text.write(global_graph_dict)
316
+
317
+ inf_ov_1_text.write(test_drift)
318
+ inf_ov_2_text.write(poor_perf_frame_counter)
319
+
320
+ if fps_<min_FPS:
321
+ inf_ov_3_text.write(fps_)
322
+ min_FPS = fps_
323
+ if fps_>max_FPS:
324
+ inf_ov_4_text.write(fps_)
325
+ max_FPS = fps_
326
+
327
+ stframe.image(im0, channels="BGR", use_column_width=True)
328
+
329
+ if save_txt or save_img:
330
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
331
+ print(f"Results saved to {save_dir}{s}")
332
+
333
+ if update:
334
+ strip_optimizer(weights) # update model (to fix SourceChangeWarning)
335
+
336
+ if vid_cap:
337
+ vid_cap.release()
deep_sort_pytorch/.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Folders
2
+ __pycache__/
3
+ build/
4
+ *.egg-info
5
+
6
+
7
+ # Files
8
+ *.weights
9
+ *.t7
10
+ *.mp4
11
+ *.avi
12
+ *.so
13
+ *.txt
deep_sort_pytorch/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Ziqiang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
deep_sort_pytorch/configs/deep_sort.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ DEEPSORT:
2
+ REID_CKPT: "deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7"
3
+ MAX_DIST: 0.2
4
+ MIN_CONFIDENCE: 0.3
5
+ NMS_MAX_OVERLAP: 0.5
6
+ MAX_IOU_DISTANCE: 0.7
7
+ MAX_AGE: 70
8
+ N_INIT: 3
9
+ NN_BUDGET: 100
10
+
deep_sort_pytorch/deep_sort/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Deep Sort
2
+
3
+ This is the implemention of deep sort with pytorch.
deep_sort_pytorch/deep_sort/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .deep_sort import DeepSort
2
+
3
+
4
+ __all__ = ['DeepSort', 'build_tracker']
5
+
6
+
7
+ def build_tracker(cfg, use_cuda):
8
+ return DeepSort(cfg.DEEPSORT.REID_CKPT,
9
+ max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
10
+ nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
11
+ max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=use_cuda)
12
+
13
+
14
+
15
+
16
+
17
+
18
+
19
+
20
+
21
+
deep_sort_pytorch/deep_sort/deep/__init__.py ADDED
File without changes
deep_sort_pytorch/deep_sort/deep/checkpoint/.gitkeep ADDED
File without changes
deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df75ddef42c3d1bda67bc94b093e7ce61de7f75a89f36a8f868a428462198316
3
+ size 46034619
deep_sort_pytorch/deep_sort/deep/evaluate.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ features = torch.load("features.pth")
4
+ qf = features["qf"]
5
+ ql = features["ql"]
6
+ gf = features["gf"]
7
+ gl = features["gl"]
8
+
9
+ scores = qf.mm(gf.t())
10
+ res = scores.topk(5, dim=1)[1][:, 0]
11
+ top1correct = gl[res].eq(ql).sum().item()
12
+
13
+ print("Acc top1:{:.3f}".format(top1correct / ql.size(0)))
deep_sort_pytorch/deep_sort/deep/feature_extractor.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision.transforms as transforms
3
+ import numpy as np
4
+ import cv2
5
+ import logging
6
+
7
+ from .model import Net
8
+
9
+
10
+ class Extractor(object):
11
+ def __init__(self, model_path, use_cuda=True):
12
+ self.net = Net(reid=True)
13
+ self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
14
+ state_dict = torch.load(model_path, map_location=torch.device(self.device))[
15
+ 'net_dict']
16
+ self.net.load_state_dict(state_dict)
17
+ logger = logging.getLogger("root.tracker")
18
+ logger.info("Loading weights from {}... Done!".format(model_path))
19
+ self.net.to(self.device)
20
+ self.size = (64, 128)
21
+ self.norm = transforms.Compose([
22
+ transforms.ToTensor(),
23
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
24
+ ])
25
+
26
+ def _preprocess(self, im_crops):
27
+ """
28
+ TODO:
29
+ 1. to float with scale from 0 to 1
30
+ 2. resize to (64, 128) as Market1501 dataset did
31
+ 3. concatenate to a numpy array
32
+ 3. to torch Tensor
33
+ 4. normalize
34
+ """
35
+ def _resize(im, size):
36
+ return cv2.resize(im.astype(np.float32)/255., size)
37
+
38
+ im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(
39
+ 0) for im in im_crops], dim=0).float()
40
+ return im_batch
41
+
42
+ def __call__(self, im_crops):
43
+ im_batch = self._preprocess(im_crops)
44
+ with torch.no_grad():
45
+ im_batch = im_batch.to(self.device)
46
+ features = self.net(im_batch)
47
+ return features.cpu().numpy()
48
+
49
+
50
+ if __name__ == '__main__':
51
+ img = cv2.imread("demo.jpg")[:, :, (2, 1, 0)]
52
+ extr = Extractor("checkpoint/ckpt.t7")
53
+ feature = extr(img)
54
+ print(feature.shape)
deep_sort_pytorch/deep_sort/deep/model.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ class BasicBlock(nn.Module):
7
+ def __init__(self, c_in, c_out, is_downsample=False):
8
+ super(BasicBlock, self).__init__()
9
+ self.is_downsample = is_downsample
10
+ if is_downsample:
11
+ self.conv1 = nn.Conv2d(
12
+ c_in, c_out, 3, stride=2, padding=1, bias=False)
13
+ else:
14
+ self.conv1 = nn.Conv2d(
15
+ c_in, c_out, 3, stride=1, padding=1, bias=False)
16
+ self.bn1 = nn.BatchNorm2d(c_out)
17
+ self.relu = nn.ReLU(True)
18
+ self.conv2 = nn.Conv2d(c_out, c_out, 3, stride=1,
19
+ padding=1, bias=False)
20
+ self.bn2 = nn.BatchNorm2d(c_out)
21
+ if is_downsample:
22
+ self.downsample = nn.Sequential(
23
+ nn.Conv2d(c_in, c_out, 1, stride=2, bias=False),
24
+ nn.BatchNorm2d(c_out)
25
+ )
26
+ elif c_in != c_out:
27
+ self.downsample = nn.Sequential(
28
+ nn.Conv2d(c_in, c_out, 1, stride=1, bias=False),
29
+ nn.BatchNorm2d(c_out)
30
+ )
31
+ self.is_downsample = True
32
+
33
+ def forward(self, x):
34
+ y = self.conv1(x)
35
+ y = self.bn1(y)
36
+ y = self.relu(y)
37
+ y = self.conv2(y)
38
+ y = self.bn2(y)
39
+ if self.is_downsample:
40
+ x = self.downsample(x)
41
+ return F.relu(x.add(y), True)
42
+
43
+
44
+ def make_layers(c_in, c_out, repeat_times, is_downsample=False):
45
+ blocks = []
46
+ for i in range(repeat_times):
47
+ if i == 0:
48
+ blocks += [BasicBlock(c_in, c_out, is_downsample=is_downsample), ]
49
+ else:
50
+ blocks += [BasicBlock(c_out, c_out), ]
51
+ return nn.Sequential(*blocks)
52
+
53
+
54
+ class Net(nn.Module):
55
+ def __init__(self, num_classes=751, reid=False):
56
+ super(Net, self).__init__()
57
+ # 3 128 64
58
+ self.conv = nn.Sequential(
59
+ nn.Conv2d(3, 64, 3, stride=1, padding=1),
60
+ nn.BatchNorm2d(64),
61
+ nn.ReLU(inplace=True),
62
+ # nn.Conv2d(32,32,3,stride=1,padding=1),
63
+ # nn.BatchNorm2d(32),
64
+ # nn.ReLU(inplace=True),
65
+ nn.MaxPool2d(3, 2, padding=1),
66
+ )
67
+ # 32 64 32
68
+ self.layer1 = make_layers(64, 64, 2, False)
69
+ # 32 64 32
70
+ self.layer2 = make_layers(64, 128, 2, True)
71
+ # 64 32 16
72
+ self.layer3 = make_layers(128, 256, 2, True)
73
+ # 128 16 8
74
+ self.layer4 = make_layers(256, 512, 2, True)
75
+ # 256 8 4
76
+ self.avgpool = nn.AvgPool2d((8, 4), 1)
77
+ # 256 1 1
78
+ self.reid = reid
79
+ self.classifier = nn.Sequential(
80
+ nn.Linear(512, 256),
81
+ nn.BatchNorm1d(256),
82
+ nn.ReLU(inplace=True),
83
+ nn.Dropout(),
84
+ nn.Linear(256, num_classes),
85
+ )
86
+
87
+ def forward(self, x):
88
+ x = self.conv(x)
89
+ x = self.layer1(x)
90
+ x = self.layer2(x)
91
+ x = self.layer3(x)
92
+ x = self.layer4(x)
93
+ x = self.avgpool(x)
94
+ x = x.view(x.size(0), -1)
95
+ # B x 128
96
+ if self.reid:
97
+ x = x.div(x.norm(p=2, dim=1, keepdim=True))
98
+ return x
99
+ # classifier
100
+ x = self.classifier(x)
101
+ return x
102
+
103
+
104
+ if __name__ == '__main__':
105
+ net = Net()
106
+ x = torch.randn(4, 3, 128, 64)
107
+ y = net(x)
108
+ import ipdb
109
+ ipdb.set_trace()
deep_sort_pytorch/deep_sort/deep/original_model.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ class BasicBlock(nn.Module):
7
+ def __init__(self, c_in, c_out, is_downsample=False):
8
+ super(BasicBlock, self).__init__()
9
+ self.is_downsample = is_downsample
10
+ if is_downsample:
11
+ self.conv1 = nn.Conv2d(
12
+ c_in, c_out, 3, stride=2, padding=1, bias=False)
13
+ else:
14
+ self.conv1 = nn.Conv2d(
15
+ c_in, c_out, 3, stride=1, padding=1, bias=False)
16
+ self.bn1 = nn.BatchNorm2d(c_out)
17
+ self.relu = nn.ReLU(True)
18
+ self.conv2 = nn.Conv2d(c_out, c_out, 3, stride=1,
19
+ padding=1, bias=False)
20
+ self.bn2 = nn.BatchNorm2d(c_out)
21
+ if is_downsample:
22
+ self.downsample = nn.Sequential(
23
+ nn.Conv2d(c_in, c_out, 1, stride=2, bias=False),
24
+ nn.BatchNorm2d(c_out)
25
+ )
26
+ elif c_in != c_out:
27
+ self.downsample = nn.Sequential(
28
+ nn.Conv2d(c_in, c_out, 1, stride=1, bias=False),
29
+ nn.BatchNorm2d(c_out)
30
+ )
31
+ self.is_downsample = True
32
+
33
+ def forward(self, x):
34
+ y = self.conv1(x)
35
+ y = self.bn1(y)
36
+ y = self.relu(y)
37
+ y = self.conv2(y)
38
+ y = self.bn2(y)
39
+ if self.is_downsample:
40
+ x = self.downsample(x)
41
+ return F.relu(x.add(y), True)
42
+
43
+
44
+ def make_layers(c_in, c_out, repeat_times, is_downsample=False):
45
+ blocks = []
46
+ for i in range(repeat_times):
47
+ if i == 0:
48
+ blocks += [BasicBlock(c_in, c_out, is_downsample=is_downsample), ]
49
+ else:
50
+ blocks += [BasicBlock(c_out, c_out), ]
51
+ return nn.Sequential(*blocks)
52
+
53
+
54
+ class Net(nn.Module):
55
+ def __init__(self, num_classes=625, reid=False):
56
+ super(Net, self).__init__()
57
+ # 3 128 64
58
+ self.conv = nn.Sequential(
59
+ nn.Conv2d(3, 32, 3, stride=1, padding=1),
60
+ nn.BatchNorm2d(32),
61
+ nn.ELU(inplace=True),
62
+ nn.Conv2d(32, 32, 3, stride=1, padding=1),
63
+ nn.BatchNorm2d(32),
64
+ nn.ELU(inplace=True),
65
+ nn.MaxPool2d(3, 2, padding=1),
66
+ )
67
+ # 32 64 32
68
+ self.layer1 = make_layers(32, 32, 2, False)
69
+ # 32 64 32
70
+ self.layer2 = make_layers(32, 64, 2, True)
71
+ # 64 32 16
72
+ self.layer3 = make_layers(64, 128, 2, True)
73
+ # 128 16 8
74
+ self.dense = nn.Sequential(
75
+ nn.Dropout(p=0.6),
76
+ nn.Linear(128*16*8, 128),
77
+ nn.BatchNorm1d(128),
78
+ nn.ELU(inplace=True)
79
+ )
80
+ # 256 1 1
81
+ self.reid = reid
82
+ self.batch_norm = nn.BatchNorm1d(128)
83
+ self.classifier = nn.Sequential(
84
+ nn.Linear(128, num_classes),
85
+ )
86
+
87
+ def forward(self, x):
88
+ x = self.conv(x)
89
+ x = self.layer1(x)
90
+ x = self.layer2(x)
91
+ x = self.layer3(x)
92
+
93
+ x = x.view(x.size(0), -1)
94
+ if self.reid:
95
+ x = self.dense[0](x)
96
+ x = self.dense[1](x)
97
+ x = x.div(x.norm(p=2, dim=1, keepdim=True))
98
+ return x
99
+ x = self.dense(x)
100
+ # B x 128
101
+ # classifier
102
+ x = self.classifier(x)
103
+ return x
104
+
105
+
106
+ if __name__ == '__main__':
107
+ net = Net(reid=True)
108
+ x = torch.randn(4, 3, 128, 64)
109
+ y = net(x)
110
+ import ipdb
111
+ ipdb.set_trace()
deep_sort_pytorch/deep_sort/deep/test.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.backends.cudnn as cudnn
3
+ import torchvision
4
+
5
+ import argparse
6
+ import os
7
+
8
+ from model import Net
9
+
10
+ parser = argparse.ArgumentParser(description="Train on market1501")
11
+ parser.add_argument("--data-dir", default='data', type=str)
12
+ parser.add_argument("--no-cuda", action="store_true")
13
+ parser.add_argument("--gpu-id", default=0, type=int)
14
+ args = parser.parse_args()
15
+
16
+ # device
17
+ device = "cuda:{}".format(
18
+ args.gpu_id) if torch.cuda.is_available() and not args.no_cuda else "cpu"
19
+ if torch.cuda.is_available() and not args.no_cuda:
20
+ cudnn.benchmark = True
21
+
22
+ # data loader
23
+ root = args.data_dir
24
+ query_dir = os.path.join(root, "query")
25
+ gallery_dir = os.path.join(root, "gallery")
26
+ transform = torchvision.transforms.Compose([
27
+ torchvision.transforms.Resize((128, 64)),
28
+ torchvision.transforms.ToTensor(),
29
+ torchvision.transforms.Normalize(
30
+ [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
31
+ ])
32
+ queryloader = torch.utils.data.DataLoader(
33
+ torchvision.datasets.ImageFolder(query_dir, transform=transform),
34
+ batch_size=64, shuffle=False
35
+ )
36
+ galleryloader = torch.utils.data.DataLoader(
37
+ torchvision.datasets.ImageFolder(gallery_dir, transform=transform),
38
+ batch_size=64, shuffle=False
39
+ )
40
+
41
+ # net definition
42
+ net = Net(reid=True)
43
+ assert os.path.isfile(
44
+ "./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
45
+ print('Loading from checkpoint/ckpt.t7')
46
+ checkpoint = torch.load("./checkpoint/ckpt.t7")
47
+ net_dict = checkpoint['net_dict']
48
+ net.load_state_dict(net_dict, strict=False)
49
+ net.eval()
50
+ net.to(device)
51
+
52
+ # compute features
53
+ query_features = torch.tensor([]).float()
54
+ query_labels = torch.tensor([]).long()
55
+ gallery_features = torch.tensor([]).float()
56
+ gallery_labels = torch.tensor([]).long()
57
+
58
+ with torch.no_grad():
59
+ for idx, (inputs, labels) in enumerate(queryloader):
60
+ inputs = inputs.to(device)
61
+ features = net(inputs).cpu()
62
+ query_features = torch.cat((query_features, features), dim=0)
63
+ query_labels = torch.cat((query_labels, labels))
64
+
65
+ for idx, (inputs, labels) in enumerate(galleryloader):
66
+ inputs = inputs.to(device)
67
+ features = net(inputs).cpu()
68
+ gallery_features = torch.cat((gallery_features, features), dim=0)
69
+ gallery_labels = torch.cat((gallery_labels, labels))
70
+
71
+ gallery_labels -= 2
72
+
73
+ # save features
74
+ features = {
75
+ "qf": query_features,
76
+ "ql": query_labels,
77
+ "gf": gallery_features,
78
+ "gl": gallery_labels
79
+ }
80
+ torch.save(features, "features.pth")
deep_sort_pytorch/deep_sort/deep/train.jpg ADDED
deep_sort_pytorch/deep_sort/deep/train.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import time
4
+
5
+ import numpy as np
6
+ import matplotlib.pyplot as plt
7
+ import torch
8
+ import torch.backends.cudnn as cudnn
9
+ import torchvision
10
+
11
+ from model import Net
12
+
13
+ parser = argparse.ArgumentParser(description="Train on market1501")
14
+ parser.add_argument("--data-dir", default='data', type=str)
15
+ parser.add_argument("--no-cuda", action="store_true")
16
+ parser.add_argument("--gpu-id", default=0, type=int)
17
+ parser.add_argument("--lr", default=0.1, type=float)
18
+ parser.add_argument("--interval", '-i', default=20, type=int)
19
+ parser.add_argument('--resume', '-r', action='store_true')
20
+ args = parser.parse_args()
21
+
22
+ # device
23
+ device = "cuda:{}".format(
24
+ args.gpu_id) if torch.cuda.is_available() and not args.no_cuda else "cpu"
25
+ if torch.cuda.is_available() and not args.no_cuda:
26
+ cudnn.benchmark = True
27
+
28
+ # data loading
29
+ root = args.data_dir
30
+ train_dir = os.path.join(root, "train")
31
+ test_dir = os.path.join(root, "test")
32
+ transform_train = torchvision.transforms.Compose([
33
+ torchvision.transforms.RandomCrop((128, 64), padding=4),
34
+ torchvision.transforms.RandomHorizontalFlip(),
35
+ torchvision.transforms.ToTensor(),
36
+ torchvision.transforms.Normalize(
37
+ [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
38
+ ])
39
+ transform_test = torchvision.transforms.Compose([
40
+ torchvision.transforms.Resize((128, 64)),
41
+ torchvision.transforms.ToTensor(),
42
+ torchvision.transforms.Normalize(
43
+ [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
44
+ ])
45
+ trainloader = torch.utils.data.DataLoader(
46
+ torchvision.datasets.ImageFolder(train_dir, transform=transform_train),
47
+ batch_size=64, shuffle=True
48
+ )
49
+ testloader = torch.utils.data.DataLoader(
50
+ torchvision.datasets.ImageFolder(test_dir, transform=transform_test),
51
+ batch_size=64, shuffle=True
52
+ )
53
+ num_classes = max(len(trainloader.dataset.classes),
54
+ len(testloader.dataset.classes))
55
+
56
+ # net definition
57
+ start_epoch = 0
58
+ net = Net(num_classes=num_classes)
59
+ if args.resume:
60
+ assert os.path.isfile(
61
+ "./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
62
+ print('Loading from checkpoint/ckpt.t7')
63
+ checkpoint = torch.load("./checkpoint/ckpt.t7")
64
+ # import ipdb; ipdb.set_trace()
65
+ net_dict = checkpoint['net_dict']
66
+ net.load_state_dict(net_dict)
67
+ best_acc = checkpoint['acc']
68
+ start_epoch = checkpoint['epoch']
69
+ net.to(device)
70
+
71
+ # loss and optimizer
72
+ criterion = torch.nn.CrossEntropyLoss()
73
+ optimizer = torch.optim.SGD(
74
+ net.parameters(), args.lr, momentum=0.9, weight_decay=5e-4)
75
+ best_acc = 0.
76
+
77
+ # train function for each epoch
78
+
79
+
80
+ def train(epoch):
81
+ print("\nEpoch : %d" % (epoch+1))
82
+ net.train()
83
+ training_loss = 0.
84
+ train_loss = 0.
85
+ correct = 0
86
+ total = 0
87
+ interval = args.interval
88
+ start = time.time()
89
+ for idx, (inputs, labels) in enumerate(trainloader):
90
+ # forward
91
+ inputs, labels = inputs.to(device), labels.to(device)
92
+ outputs = net(inputs)
93
+ loss = criterion(outputs, labels)
94
+
95
+ # backward
96
+ optimizer.zero_grad()
97
+ loss.backward()
98
+ optimizer.step()
99
+
100
+ # accumurating
101
+ training_loss += loss.item()
102
+ train_loss += loss.item()
103
+ correct += outputs.max(dim=1)[1].eq(labels).sum().item()
104
+ total += labels.size(0)
105
+
106
+ # print
107
+ if (idx+1) % interval == 0:
108
+ end = time.time()
109
+ print("[progress:{:.1f}%]time:{:.2f}s Loss:{:.5f} Correct:{}/{} Acc:{:.3f}%".format(
110
+ 100.*(idx+1)/len(trainloader), end-start, training_loss /
111
+ interval, correct, total, 100.*correct/total
112
+ ))
113
+ training_loss = 0.
114
+ start = time.time()
115
+
116
+ return train_loss/len(trainloader), 1. - correct/total
117
+
118
+
119
+ def test(epoch):
120
+ global best_acc
121
+ net.eval()
122
+ test_loss = 0.
123
+ correct = 0
124
+ total = 0
125
+ start = time.time()
126
+ with torch.no_grad():
127
+ for idx, (inputs, labels) in enumerate(testloader):
128
+ inputs, labels = inputs.to(device), labels.to(device)
129
+ outputs = net(inputs)
130
+ loss = criterion(outputs, labels)
131
+
132
+ test_loss += loss.item()
133
+ correct += outputs.max(dim=1)[1].eq(labels).sum().item()
134
+ total += labels.size(0)
135
+
136
+ print("Testing ...")
137
+ end = time.time()
138
+ print("[progress:{:.1f}%]time:{:.2f}s Loss:{:.5f} Correct:{}/{} Acc:{:.3f}%".format(
139
+ 100.*(idx+1)/len(testloader), end-start, test_loss /
140
+ len(testloader), correct, total, 100.*correct/total
141
+ ))
142
+
143
+ # saving checkpoint
144
+ acc = 100.*correct/total
145
+ if acc > best_acc:
146
+ best_acc = acc
147
+ print("Saving parameters to checkpoint/ckpt.t7")
148
+ checkpoint = {
149
+ 'net_dict': net.state_dict(),
150
+ 'acc': acc,
151
+ 'epoch': epoch,
152
+ }
153
+ if not os.path.isdir('checkpoint'):
154
+ os.mkdir('checkpoint')
155
+ torch.save(checkpoint, './checkpoint/ckpt.t7')
156
+
157
+ return test_loss/len(testloader), 1. - correct/total
158
+
159
+
160
+ # plot figure
161
+ x_epoch = []
162
+ record = {'train_loss': [], 'train_err': [], 'test_loss': [], 'test_err': []}
163
+ fig = plt.figure()
164
+ ax0 = fig.add_subplot(121, title="loss")
165
+ ax1 = fig.add_subplot(122, title="top1err")
166
+
167
+
168
+ def draw_curve(epoch, train_loss, train_err, test_loss, test_err):
169
+ global record
170
+ record['train_loss'].append(train_loss)
171
+ record['train_err'].append(train_err)
172
+ record['test_loss'].append(test_loss)
173
+ record['test_err'].append(test_err)
174
+
175
+ x_epoch.append(epoch)
176
+ ax0.plot(x_epoch, record['train_loss'], 'bo-', label='train')
177
+ ax0.plot(x_epoch, record['test_loss'], 'ro-', label='val')
178
+ ax1.plot(x_epoch, record['train_err'], 'bo-', label='train')
179
+ ax1.plot(x_epoch, record['test_err'], 'ro-', label='val')
180
+ if epoch == 0:
181
+ ax0.legend()
182
+ ax1.legend()
183
+ fig.savefig("train.jpg")
184
+
185
+ # lr decay
186
+
187
+
188
+ def lr_decay():
189
+ global optimizer
190
+ for params in optimizer.param_groups:
191
+ params['lr'] *= 0.1
192
+ lr = params['lr']
193
+ print("Learning rate adjusted to {}".format(lr))
194
+
195
+
196
+ def main():
197
+ for epoch in range(start_epoch, start_epoch+40):
198
+ train_loss, train_err = train(epoch)
199
+ test_loss, test_err = test(epoch)
200
+ draw_curve(epoch, train_loss, train_err, test_loss, test_err)
201
+ if (epoch+1) % 20 == 0:
202
+ lr_decay()
203
+
204
+
205
+ if __name__ == '__main__':
206
+ main()
deep_sort_pytorch/deep_sort/deep_sort.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from .deep.feature_extractor import Extractor
5
+ from .sort.nn_matching import NearestNeighborDistanceMetric
6
+ from .sort.detection import Detection
7
+ from .sort.tracker import Tracker
8
+
9
+
10
+ __all__ = ['DeepSort']
11
+
12
+
13
+ class DeepSort(object):
14
+ def __init__(self, model_path, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):
15
+ self.min_confidence = min_confidence
16
+ self.nms_max_overlap = nms_max_overlap
17
+
18
+ self.extractor = Extractor(model_path, use_cuda=use_cuda)
19
+
20
+ max_cosine_distance = max_dist
21
+ metric = NearestNeighborDistanceMetric(
22
+ "cosine", max_cosine_distance, nn_budget)
23
+ self.tracker = Tracker(
24
+ metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
25
+
26
+ def update(self, bbox_xywh, confidences, ori_img):
27
+ self.height, self.width = ori_img.shape[:2]
28
+ # generate detections
29
+ features = self._get_features(bbox_xywh, ori_img)
30
+ bbox_tlwh = self._xywh_to_tlwh(bbox_xywh)
31
+ detections = [Detection(bbox_tlwh[i], conf, features[i]) for i, conf in enumerate(
32
+ confidences) if conf > self.min_confidence]
33
+
34
+ # run on non-maximum supression
35
+ boxes = np.array([d.tlwh for d in detections])
36
+ scores = np.array([d.confidence for d in detections])
37
+
38
+ # update tracker
39
+ self.tracker.predict()
40
+ self.tracker.update(detections)
41
+
42
+ # output bbox identities
43
+ outputs = []
44
+ for track in self.tracker.tracks:
45
+ if not track.is_confirmed() or track.time_since_update > 1:
46
+ continue
47
+ box = track.to_tlwh()
48
+ x1, y1, x2, y2 = self._tlwh_to_xyxy(box)
49
+ track_id = track.track_id
50
+ outputs.append(np.array([x1, y1, x2, y2, track_id], dtype=np.int))
51
+ if len(outputs) > 0:
52
+ outputs = np.stack(outputs, axis=0)
53
+ return outputs
54
+
55
+ """
56
+ TODO:
57
+ Convert bbox from xc_yc_w_h to xtl_ytl_w_h
58
+ Thanks [email protected] for reporting this bug!
59
+ """
60
+ @staticmethod
61
+ def _xywh_to_tlwh(bbox_xywh):
62
+ if isinstance(bbox_xywh, np.ndarray):
63
+ bbox_tlwh = bbox_xywh.copy()
64
+ elif isinstance(bbox_xywh, torch.Tensor):
65
+ bbox_tlwh = bbox_xywh.clone()
66
+ bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2.
67
+ bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2.
68
+ return bbox_tlwh
69
+
70
+ def _xywh_to_xyxy(self, bbox_xywh):
71
+ x, y, w, h = bbox_xywh
72
+ x1 = max(int(x - w / 2), 0)
73
+ x2 = min(int(x + w / 2), self.width - 1)
74
+ y1 = max(int(y - h / 2), 0)
75
+ y2 = min(int(y + h / 2), self.height - 1)
76
+ return x1, y1, x2, y2
77
+
78
+ def _tlwh_to_xyxy(self, bbox_tlwh):
79
+ """
80
+ TODO:
81
+ Convert bbox from xtl_ytl_w_h to xc_yc_w_h
82
+ Thanks [email protected] for reporting this bug!
83
+ """
84
+ x, y, w, h = bbox_tlwh
85
+ x1 = max(int(x), 0)
86
+ x2 = min(int(x+w), self.width - 1)
87
+ y1 = max(int(y), 0)
88
+ y2 = min(int(y+h), self.height - 1)
89
+ return x1, y1, x2, y2
90
+
91
+ def increment_ages(self):
92
+ self.tracker.increment_ages()
93
+
94
+ def _xyxy_to_tlwh(self, bbox_xyxy):
95
+ x1, y1, x2, y2 = bbox_xyxy
96
+
97
+ t = x1
98
+ l = y1
99
+ w = int(x2 - x1)
100
+ h = int(y2 - y1)
101
+ return t, l, w, h
102
+
103
+ def _get_features(self, bbox_xywh, ori_img):
104
+ im_crops = []
105
+ for box in bbox_xywh:
106
+ x1, y1, x2, y2 = self._xywh_to_xyxy(box)
107
+ im = ori_img[y1:y2, x1:x2]
108
+ im_crops.append(im)
109
+ if im_crops:
110
+ features = self.extractor(im_crops)
111
+ else:
112
+ features = np.array([])
113
+ return features
deep_sort_pytorch/deep_sort/sort/__init__.py ADDED
File without changes
deep_sort_pytorch/deep_sort/sort/detection.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vim: expandtab:ts=4:sw=4
2
+ import numpy as np
3
+
4
+
5
+ class Detection(object):
6
+ """
7
+ This class represents a bounding box detection in a single image.
8
+
9
+ Parameters
10
+ ----------
11
+ tlwh : array_like
12
+ Bounding box in format `(x, y, w, h)`.
13
+ confidence : float
14
+ Detector confidence score.
15
+ feature : array_like
16
+ A feature vector that describes the object contained in this image.
17
+
18
+ Attributes
19
+ ----------
20
+ tlwh : ndarray
21
+ Bounding box in format `(top left x, top left y, width, height)`.
22
+ confidence : ndarray
23
+ Detector confidence score.
24
+ feature : ndarray | NoneType
25
+ A feature vector that describes the object contained in this image.
26
+
27
+ """
28
+
29
+ def __init__(self, tlwh, confidence, feature):
30
+ self.tlwh = np.asarray(tlwh, dtype=np.float)
31
+ self.confidence = float(confidence)
32
+ self.feature = np.asarray(feature, dtype=np.float32)
33
+
34
+ def to_tlbr(self):
35
+ """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
36
+ `(top left, bottom right)`.
37
+ """
38
+ ret = self.tlwh.copy()
39
+ ret[2:] += ret[:2]
40
+ return ret
41
+
42
+ def to_xyah(self):
43
+ """Convert bounding box to format `(center x, center y, aspect ratio,
44
+ height)`, where the aspect ratio is `width / height`.
45
+ """
46
+ ret = self.tlwh.copy()
47
+ ret[:2] += ret[2:] / 2
48
+ ret[2] /= ret[3]
49
+ return ret
deep_sort_pytorch/deep_sort/sort/iou_matching.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vim: expandtab:ts=4:sw=4
2
+ from __future__ import absolute_import
3
+ import numpy as np
4
+ from . import linear_assignment
5
+
6
+
7
+ def iou(bbox, candidates):
8
+ """Computer intersection over union.
9
+
10
+ Parameters
11
+ ----------
12
+ bbox : ndarray
13
+ A bounding box in format `(top left x, top left y, width, height)`.
14
+ candidates : ndarray
15
+ A matrix of candidate bounding boxes (one per row) in the same format
16
+ as `bbox`.
17
+
18
+ Returns
19
+ -------
20
+ ndarray
21
+ The intersection over union in [0, 1] between the `bbox` and each
22
+ candidate. A higher score means a larger fraction of the `bbox` is
23
+ occluded by the candidate.
24
+
25
+ """
26
+ bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
27
+ candidates_tl = candidates[:, :2]
28
+ candidates_br = candidates[:, :2] + candidates[:, 2:]
29
+
30
+ tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
31
+ np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
32
+ br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
33
+ np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
34
+ wh = np.maximum(0., br - tl)
35
+
36
+ area_intersection = wh.prod(axis=1)
37
+ area_bbox = bbox[2:].prod()
38
+ area_candidates = candidates[:, 2:].prod(axis=1)
39
+ return area_intersection / (area_bbox + area_candidates - area_intersection)
40
+
41
+
42
+ def iou_cost(tracks, detections, track_indices=None,
43
+ detection_indices=None):
44
+ """An intersection over union distance metric.
45
+
46
+ Parameters
47
+ ----------
48
+ tracks : List[deep_sort.track.Track]
49
+ A list of tracks.
50
+ detections : List[deep_sort.detection.Detection]
51
+ A list of detections.
52
+ track_indices : Optional[List[int]]
53
+ A list of indices to tracks that should be matched. Defaults to
54
+ all `tracks`.
55
+ detection_indices : Optional[List[int]]
56
+ A list of indices to detections that should be matched. Defaults
57
+ to all `detections`.
58
+
59
+ Returns
60
+ -------
61
+ ndarray
62
+ Returns a cost matrix of shape
63
+ len(track_indices), len(detection_indices) where entry (i, j) is
64
+ `1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
65
+
66
+ """
67
+ if track_indices is None:
68
+ track_indices = np.arange(len(tracks))
69
+ if detection_indices is None:
70
+ detection_indices = np.arange(len(detections))
71
+
72
+ cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
73
+ for row, track_idx in enumerate(track_indices):
74
+ if tracks[track_idx].time_since_update > 1:
75
+ cost_matrix[row, :] = linear_assignment.INFTY_COST
76
+ continue
77
+
78
+ bbox = tracks[track_idx].to_tlwh()
79
+ candidates = np.asarray(
80
+ [detections[i].tlwh for i in detection_indices])
81
+ cost_matrix[row, :] = 1. - iou(bbox, candidates)
82
+ return cost_matrix
deep_sort_pytorch/deep_sort/sort/kalman_filter.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vim: expandtab:ts=4:sw=4
2
+ import numpy as np
3
+ import scipy.linalg
4
+
5
+
6
+ """
7
+ Table for the 0.95 quantile of the chi-square distribution with N degrees of
8
+ freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
9
+ function and used as Mahalanobis gating threshold.
10
+ """
11
+ chi2inv95 = {
12
+ 1: 3.8415,
13
+ 2: 5.9915,
14
+ 3: 7.8147,
15
+ 4: 9.4877,
16
+ 5: 11.070,
17
+ 6: 12.592,
18
+ 7: 14.067,
19
+ 8: 15.507,
20
+ 9: 16.919}
21
+
22
+
23
+ class KalmanFilter(object):
24
+ """
25
+ A simple Kalman filter for tracking bounding boxes in image space.
26
+
27
+ The 8-dimensional state space
28
+
29
+ x, y, a, h, vx, vy, va, vh
30
+
31
+ contains the bounding box center position (x, y), aspect ratio a, height h,
32
+ and their respective velocities.
33
+
34
+ Object motion follows a constant velocity model. The bounding box location
35
+ (x, y, a, h) is taken as direct observation of the state space (linear
36
+ observation model).
37
+
38
+ """
39
+
40
+ def __init__(self):
41
+ ndim, dt = 4, 1.
42
+
43
+ # Create Kalman filter model matrices.
44
+ self._motion_mat = np.eye(2 * ndim, 2 * ndim)
45
+ for i in range(ndim):
46
+ self._motion_mat[i, ndim + i] = dt
47
+ self._update_mat = np.eye(ndim, 2 * ndim)
48
+
49
+ # Motion and observation uncertainty are chosen relative to the current
50
+ # state estimate. These weights control the amount of uncertainty in
51
+ # the model. This is a bit hacky.
52
+ self._std_weight_position = 1. / 20
53
+ self._std_weight_velocity = 1. / 160
54
+
55
+ def initiate(self, measurement):
56
+ """Create track from unassociated measurement.
57
+
58
+ Parameters
59
+ ----------
60
+ measurement : ndarray
61
+ Bounding box coordinates (x, y, a, h) with center position (x, y),
62
+ aspect ratio a, and height h.
63
+
64
+ Returns
65
+ -------
66
+ (ndarray, ndarray)
67
+ Returns the mean vector (8 dimensional) and covariance matrix (8x8
68
+ dimensional) of the new track. Unobserved velocities are initialized
69
+ to 0 mean.
70
+
71
+ """
72
+ mean_pos = measurement
73
+ mean_vel = np.zeros_like(mean_pos)
74
+ mean = np.r_[mean_pos, mean_vel]
75
+
76
+ std = [
77
+ 2 * self._std_weight_position * measurement[3],
78
+ 2 * self._std_weight_position * measurement[3],
79
+ 1e-2,
80
+ 2 * self._std_weight_position * measurement[3],
81
+ 10 * self._std_weight_velocity * measurement[3],
82
+ 10 * self._std_weight_velocity * measurement[3],
83
+ 1e-5,
84
+ 10 * self._std_weight_velocity * measurement[3]]
85
+ covariance = np.diag(np.square(std))
86
+ return mean, covariance
87
+
88
+ def predict(self, mean, covariance):
89
+ """Run Kalman filter prediction step.
90
+
91
+ Parameters
92
+ ----------
93
+ mean : ndarray
94
+ The 8 dimensional mean vector of the object state at the previous
95
+ time step.
96
+ covariance : ndarray
97
+ The 8x8 dimensional covariance matrix of the object state at the
98
+ previous time step.
99
+
100
+ Returns
101
+ -------
102
+ (ndarray, ndarray)
103
+ Returns the mean vector and covariance matrix of the predicted
104
+ state. Unobserved velocities are initialized to 0 mean.
105
+
106
+ """
107
+ std_pos = [
108
+ self._std_weight_position * mean[3],
109
+ self._std_weight_position * mean[3],
110
+ 1e-2,
111
+ self._std_weight_position * mean[3]]
112
+ std_vel = [
113
+ self._std_weight_velocity * mean[3],
114
+ self._std_weight_velocity * mean[3],
115
+ 1e-5,
116
+ self._std_weight_velocity * mean[3]]
117
+ motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
118
+
119
+ mean = np.dot(self._motion_mat, mean)
120
+ covariance = np.linalg.multi_dot((
121
+ self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
122
+
123
+ return mean, covariance
124
+
125
+ def project(self, mean, covariance):
126
+ """Project state distribution to measurement space.
127
+
128
+ Parameters
129
+ ----------
130
+ mean : ndarray
131
+ The state's mean vector (8 dimensional array).
132
+ covariance : ndarray
133
+ The state's covariance matrix (8x8 dimensional).
134
+
135
+ Returns
136
+ -------
137
+ (ndarray, ndarray)
138
+ Returns the projected mean and covariance matrix of the given state
139
+ estimate.
140
+
141
+ """
142
+ std = [
143
+ self._std_weight_position * mean[3],
144
+ self._std_weight_position * mean[3],
145
+ 1e-1,
146
+ self._std_weight_position * mean[3]]
147
+ innovation_cov = np.diag(np.square(std))
148
+
149
+ mean = np.dot(self._update_mat, mean)
150
+ covariance = np.linalg.multi_dot((
151
+ self._update_mat, covariance, self._update_mat.T))
152
+ return mean, covariance + innovation_cov
153
+
154
+ def update(self, mean, covariance, measurement):
155
+ """Run Kalman filter correction step.
156
+
157
+ Parameters
158
+ ----------
159
+ mean : ndarray
160
+ The predicted state's mean vector (8 dimensional).
161
+ covariance : ndarray
162
+ The state's covariance matrix (8x8 dimensional).
163
+ measurement : ndarray
164
+ The 4 dimensional measurement vector (x, y, a, h), where (x, y)
165
+ is the center position, a the aspect ratio, and h the height of the
166
+ bounding box.
167
+
168
+ Returns
169
+ -------
170
+ (ndarray, ndarray)
171
+ Returns the measurement-corrected state distribution.
172
+
173
+ """
174
+ projected_mean, projected_cov = self.project(mean, covariance)
175
+
176
+ chol_factor, lower = scipy.linalg.cho_factor(
177
+ projected_cov, lower=True, check_finite=False)
178
+ kalman_gain = scipy.linalg.cho_solve(
179
+ (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
180
+ check_finite=False).T
181
+ innovation = measurement - projected_mean
182
+
183
+ new_mean = mean + np.dot(innovation, kalman_gain.T)
184
+ new_covariance = covariance - np.linalg.multi_dot((
185
+ kalman_gain, projected_cov, kalman_gain.T))
186
+ return new_mean, new_covariance
187
+
188
+ def gating_distance(self, mean, covariance, measurements,
189
+ only_position=False):
190
+ """Compute gating distance between state distribution and measurements.
191
+
192
+ A suitable distance threshold can be obtained from `chi2inv95`. If
193
+ `only_position` is False, the chi-square distribution has 4 degrees of
194
+ freedom, otherwise 2.
195
+
196
+ Parameters
197
+ ----------
198
+ mean : ndarray
199
+ Mean vector over the state distribution (8 dimensional).
200
+ covariance : ndarray
201
+ Covariance of the state distribution (8x8 dimensional).
202
+ measurements : ndarray
203
+ An Nx4 dimensional matrix of N measurements, each in
204
+ format (x, y, a, h) where (x, y) is the bounding box center
205
+ position, a the aspect ratio, and h the height.
206
+ only_position : Optional[bool]
207
+ If True, distance computation is done with respect to the bounding
208
+ box center position only.
209
+
210
+ Returns
211
+ -------
212
+ ndarray
213
+ Returns an array of length N, where the i-th element contains the
214
+ squared Mahalanobis distance between (mean, covariance) and
215
+ `measurements[i]`.
216
+
217
+ """
218
+ mean, covariance = self.project(mean, covariance)
219
+ if only_position:
220
+ mean, covariance = mean[:2], covariance[:2, :2]
221
+ measurements = measurements[:, :2]
222
+
223
+ cholesky_factor = np.linalg.cholesky(covariance)
224
+ d = measurements - mean
225
+ z = scipy.linalg.solve_triangular(
226
+ cholesky_factor, d.T, lower=True, check_finite=False,
227
+ overwrite_b=True)
228
+ squared_maha = np.sum(z * z, axis=0)
229
+ return squared_maha
deep_sort_pytorch/deep_sort/sort/linear_assignment.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vim: expandtab:ts=4:sw=4
2
+ from __future__ import absolute_import
3
+ import numpy as np
4
+ # from sklearn.utils.linear_assignment_ import linear_assignment
5
+ from scipy.optimize import linear_sum_assignment as linear_assignment
6
+ from . import kalman_filter
7
+
8
+
9
+ INFTY_COST = 1e+5
10
+
11
+
12
+ def min_cost_matching(
13
+ distance_metric, max_distance, tracks, detections, track_indices=None,
14
+ detection_indices=None):
15
+ """Solve linear assignment problem.
16
+
17
+ Parameters
18
+ ----------
19
+ distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
20
+ The distance metric is given a list of tracks and detections as well as
21
+ a list of N track indices and M detection indices. The metric should
22
+ return the NxM dimensional cost matrix, where element (i, j) is the
23
+ association cost between the i-th track in the given track indices and
24
+ the j-th detection in the given detection_indices.
25
+ max_distance : float
26
+ Gating threshold. Associations with cost larger than this value are
27
+ disregarded.
28
+ tracks : List[track.Track]
29
+ A list of predicted tracks at the current time step.
30
+ detections : List[detection.Detection]
31
+ A list of detections at the current time step.
32
+ track_indices : List[int]
33
+ List of track indices that maps rows in `cost_matrix` to tracks in
34
+ `tracks` (see description above).
35
+ detection_indices : List[int]
36
+ List of detection indices that maps columns in `cost_matrix` to
37
+ detections in `detections` (see description above).
38
+
39
+ Returns
40
+ -------
41
+ (List[(int, int)], List[int], List[int])
42
+ Returns a tuple with the following three entries:
43
+ * A list of matched track and detection indices.
44
+ * A list of unmatched track indices.
45
+ * A list of unmatched detection indices.
46
+
47
+ """
48
+ if track_indices is None:
49
+ track_indices = np.arange(len(tracks))
50
+ if detection_indices is None:
51
+ detection_indices = np.arange(len(detections))
52
+
53
+ if len(detection_indices) == 0 or len(track_indices) == 0:
54
+ return [], track_indices, detection_indices # Nothing to match.
55
+
56
+ cost_matrix = distance_metric(
57
+ tracks, detections, track_indices, detection_indices)
58
+ cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
59
+
60
+ row_indices, col_indices = linear_assignment(cost_matrix)
61
+
62
+ matches, unmatched_tracks, unmatched_detections = [], [], []
63
+ for col, detection_idx in enumerate(detection_indices):
64
+ if col not in col_indices:
65
+ unmatched_detections.append(detection_idx)
66
+ for row, track_idx in enumerate(track_indices):
67
+ if row not in row_indices:
68
+ unmatched_tracks.append(track_idx)
69
+ for row, col in zip(row_indices, col_indices):
70
+ track_idx = track_indices[row]
71
+ detection_idx = detection_indices[col]
72
+ if cost_matrix[row, col] > max_distance:
73
+ unmatched_tracks.append(track_idx)
74
+ unmatched_detections.append(detection_idx)
75
+ else:
76
+ matches.append((track_idx, detection_idx))
77
+ return matches, unmatched_tracks, unmatched_detections
78
+
79
+
80
+ def matching_cascade(
81
+ distance_metric, max_distance, cascade_depth, tracks, detections,
82
+ track_indices=None, detection_indices=None):
83
+ """Run matching cascade.
84
+
85
+ Parameters
86
+ ----------
87
+ distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
88
+ The distance metric is given a list of tracks and detections as well as
89
+ a list of N track indices and M detection indices. The metric should
90
+ return the NxM dimensional cost matrix, where element (i, j) is the
91
+ association cost between the i-th track in the given track indices and
92
+ the j-th detection in the given detection indices.
93
+ max_distance : float
94
+ Gating threshold. Associations with cost larger than this value are
95
+ disregarded.
96
+ cascade_depth: int
97
+ The cascade depth, should be se to the maximum track age.
98
+ tracks : List[track.Track]
99
+ A list of predicted tracks at the current time step.
100
+ detections : List[detection.Detection]
101
+ A list of detections at the current time step.
102
+ track_indices : Optional[List[int]]
103
+ List of track indices that maps rows in `cost_matrix` to tracks in
104
+ `tracks` (see description above). Defaults to all tracks.
105
+ detection_indices : Optional[List[int]]
106
+ List of detection indices that maps columns in `cost_matrix` to
107
+ detections in `detections` (see description above). Defaults to all
108
+ detections.
109
+
110
+ Returns
111
+ -------
112
+ (List[(int, int)], List[int], List[int])
113
+ Returns a tuple with the following three entries:
114
+ * A list of matched track and detection indices.
115
+ * A list of unmatched track indices.
116
+ * A list of unmatched detection indices.
117
+
118
+ """
119
+ if track_indices is None:
120
+ track_indices = list(range(len(tracks)))
121
+ if detection_indices is None:
122
+ detection_indices = list(range(len(detections)))
123
+
124
+ unmatched_detections = detection_indices
125
+ matches = []
126
+ for level in range(cascade_depth):
127
+ if len(unmatched_detections) == 0: # No detections left
128
+ break
129
+
130
+ track_indices_l = [
131
+ k for k in track_indices
132
+ if tracks[k].time_since_update == 1 + level
133
+ ]
134
+ if len(track_indices_l) == 0: # Nothing to match at this level
135
+ continue
136
+
137
+ matches_l, _, unmatched_detections = \
138
+ min_cost_matching(
139
+ distance_metric, max_distance, tracks, detections,
140
+ track_indices_l, unmatched_detections)
141
+ matches += matches_l
142
+ unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
143
+ return matches, unmatched_tracks, unmatched_detections
144
+
145
+
146
+ def gate_cost_matrix(
147
+ kf, cost_matrix, tracks, detections, track_indices, detection_indices,
148
+ gated_cost=INFTY_COST, only_position=False):
149
+ """Invalidate infeasible entries in cost matrix based on the state
150
+ distributions obtained by Kalman filtering.
151
+
152
+ Parameters
153
+ ----------
154
+ kf : The Kalman filter.
155
+ cost_matrix : ndarray
156
+ The NxM dimensional cost matrix, where N is the number of track indices
157
+ and M is the number of detection indices, such that entry (i, j) is the
158
+ association cost between `tracks[track_indices[i]]` and
159
+ `detections[detection_indices[j]]`.
160
+ tracks : List[track.Track]
161
+ A list of predicted tracks at the current time step.
162
+ detections : List[detection.Detection]
163
+ A list of detections at the current time step.
164
+ track_indices : List[int]
165
+ List of track indices that maps rows in `cost_matrix` to tracks in
166
+ `tracks` (see description above).
167
+ detection_indices : List[int]
168
+ List of detection indices that maps columns in `cost_matrix` to
169
+ detections in `detections` (see description above).
170
+ gated_cost : Optional[float]
171
+ Entries in the cost matrix corresponding to infeasible associations are
172
+ set this value. Defaults to a very large value.
173
+ only_position : Optional[bool]
174
+ If True, only the x, y position of the state distribution is considered
175
+ during gating. Defaults to False.
176
+
177
+ Returns
178
+ -------
179
+ ndarray
180
+ Returns the modified cost matrix.
181
+
182
+ """
183
+ gating_dim = 2 if only_position else 4
184
+ gating_threshold = kalman_filter.chi2inv95[gating_dim]
185
+ measurements = np.asarray(
186
+ [detections[i].to_xyah() for i in detection_indices])
187
+ for row, track_idx in enumerate(track_indices):
188
+ track = tracks[track_idx]
189
+ gating_distance = kf.gating_distance(
190
+ track.mean, track.covariance, measurements, only_position)
191
+ cost_matrix[row, gating_distance > gating_threshold] = gated_cost
192
+ return cost_matrix
deep_sort_pytorch/deep_sort/sort/nn_matching.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vim: expandtab:ts=4:sw=4
2
+ import numpy as np
3
+
4
+
5
+ def _pdist(a, b):
6
+ """Compute pair-wise squared distance between points in `a` and `b`.
7
+
8
+ Parameters
9
+ ----------
10
+ a : array_like
11
+ An NxM matrix of N samples of dimensionality M.
12
+ b : array_like
13
+ An LxM matrix of L samples of dimensionality M.
14
+
15
+ Returns
16
+ -------
17
+ ndarray
18
+ Returns a matrix of size len(a), len(b) such that eleement (i, j)
19
+ contains the squared distance between `a[i]` and `b[j]`.
20
+
21
+ """
22
+ a, b = np.asarray(a), np.asarray(b)
23
+ if len(a) == 0 or len(b) == 0:
24
+ return np.zeros((len(a), len(b)))
25
+ a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)
26
+ r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]
27
+ r2 = np.clip(r2, 0., float(np.inf))
28
+ return r2
29
+
30
+
31
+ def _cosine_distance(a, b, data_is_normalized=False):
32
+ """Compute pair-wise cosine distance between points in `a` and `b`.
33
+
34
+ Parameters
35
+ ----------
36
+ a : array_like
37
+ An NxM matrix of N samples of dimensionality M.
38
+ b : array_like
39
+ An LxM matrix of L samples of dimensionality M.
40
+ data_is_normalized : Optional[bool]
41
+ If True, assumes rows in a and b are unit length vectors.
42
+ Otherwise, a and b are explicitly normalized to lenght 1.
43
+
44
+ Returns
45
+ -------
46
+ ndarray
47
+ Returns a matrix of size len(a), len(b) such that eleement (i, j)
48
+ contains the squared distance between `a[i]` and `b[j]`.
49
+
50
+ """
51
+ if not data_is_normalized:
52
+ a = np.asarray(a) / np.linalg.norm(a, axis=1, keepdims=True)
53
+ b = np.asarray(b) / np.linalg.norm(b, axis=1, keepdims=True)
54
+ return 1. - np.dot(a, b.T)
55
+
56
+
57
+ def _nn_euclidean_distance(x, y):
58
+ """ Helper function for nearest neighbor distance metric (Euclidean).
59
+
60
+ Parameters
61
+ ----------
62
+ x : ndarray
63
+ A matrix of N row-vectors (sample points).
64
+ y : ndarray
65
+ A matrix of M row-vectors (query points).
66
+
67
+ Returns
68
+ -------
69
+ ndarray
70
+ A vector of length M that contains for each entry in `y` the
71
+ smallest Euclidean distance to a sample in `x`.
72
+
73
+ """
74
+ distances = _pdist(x, y)
75
+ return np.maximum(0.0, distances.min(axis=0))
76
+
77
+
78
+ def _nn_cosine_distance(x, y):
79
+ """ Helper function for nearest neighbor distance metric (cosine).
80
+
81
+ Parameters
82
+ ----------
83
+ x : ndarray
84
+ A matrix of N row-vectors (sample points).
85
+ y : ndarray
86
+ A matrix of M row-vectors (query points).
87
+
88
+ Returns
89
+ -------
90
+ ndarray
91
+ A vector of length M that contains for each entry in `y` the
92
+ smallest cosine distance to a sample in `x`.
93
+
94
+ """
95
+ distances = _cosine_distance(x, y)
96
+ return distances.min(axis=0)
97
+
98
+
99
+ class NearestNeighborDistanceMetric(object):
100
+ """
101
+ A nearest neighbor distance metric that, for each target, returns
102
+ the closest distance to any sample that has been observed so far.
103
+
104
+ Parameters
105
+ ----------
106
+ metric : str
107
+ Either "euclidean" or "cosine".
108
+ matching_threshold: float
109
+ The matching threshold. Samples with larger distance are considered an
110
+ invalid match.
111
+ budget : Optional[int]
112
+ If not None, fix samples per class to at most this number. Removes
113
+ the oldest samples when the budget is reached.
114
+
115
+ Attributes
116
+ ----------
117
+ samples : Dict[int -> List[ndarray]]
118
+ A dictionary that maps from target identities to the list of samples
119
+ that have been observed so far.
120
+
121
+ """
122
+
123
+ def __init__(self, metric, matching_threshold, budget=None):
124
+
125
+ if metric == "euclidean":
126
+ self._metric = _nn_euclidean_distance
127
+ elif metric == "cosine":
128
+ self._metric = _nn_cosine_distance
129
+ else:
130
+ raise ValueError(
131
+ "Invalid metric; must be either 'euclidean' or 'cosine'")
132
+ self.matching_threshold = matching_threshold
133
+ self.budget = budget
134
+ self.samples = {}
135
+
136
+ def partial_fit(self, features, targets, active_targets):
137
+ """Update the distance metric with new data.
138
+
139
+ Parameters
140
+ ----------
141
+ features : ndarray
142
+ An NxM matrix of N features of dimensionality M.
143
+ targets : ndarray
144
+ An integer array of associated target identities.
145
+ active_targets : List[int]
146
+ A list of targets that are currently present in the scene.
147
+
148
+ """
149
+ for feature, target in zip(features, targets):
150
+ self.samples.setdefault(target, []).append(feature)
151
+ if self.budget is not None:
152
+ self.samples[target] = self.samples[target][-self.budget:]
153
+ self.samples = {k: self.samples[k] for k in active_targets}
154
+
155
+ def distance(self, features, targets):
156
+ """Compute distance between features and targets.
157
+
158
+ Parameters
159
+ ----------
160
+ features : ndarray
161
+ An NxM matrix of N features of dimensionality M.
162
+ targets : List[int]
163
+ A list of targets to match the given `features` against.
164
+
165
+ Returns
166
+ -------
167
+ ndarray
168
+ Returns a cost matrix of shape len(targets), len(features), where
169
+ element (i, j) contains the closest squared distance between
170
+ `targets[i]` and `features[j]`.
171
+
172
+ """
173
+ cost_matrix = np.zeros((len(targets), len(features)))
174
+ for i, target in enumerate(targets):
175
+ cost_matrix[i, :] = self._metric(self.samples[target], features)
176
+ return cost_matrix
deep_sort_pytorch/deep_sort/sort/preprocessing.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vim: expandtab:ts=4:sw=4
2
+ import numpy as np
3
+ import cv2
4
+
5
+
6
+ def non_max_suppression(boxes, max_bbox_overlap, scores=None):
7
+ """Suppress overlapping detections.
8
+
9
+ Original code from [1]_ has been adapted to include confidence score.
10
+
11
+ .. [1] http://www.pyimagesearch.com/2015/02/16/
12
+ faster-non-maximum-suppression-python/
13
+
14
+ Examples
15
+ --------
16
+
17
+ >>> boxes = [d.roi for d in detections]
18
+ >>> scores = [d.confidence for d in detections]
19
+ >>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)
20
+ >>> detections = [detections[i] for i in indices]
21
+
22
+ Parameters
23
+ ----------
24
+ boxes : ndarray
25
+ Array of ROIs (x, y, width, height).
26
+ max_bbox_overlap : float
27
+ ROIs that overlap more than this values are suppressed.
28
+ scores : Optional[array_like]
29
+ Detector confidence score.
30
+
31
+ Returns
32
+ -------
33
+ List[int]
34
+ Returns indices of detections that have survived non-maxima suppression.
35
+
36
+ """
37
+ if len(boxes) == 0:
38
+ return []
39
+
40
+ boxes = boxes.astype(np.float)
41
+ pick = []
42
+
43
+ x1 = boxes[:, 0]
44
+ y1 = boxes[:, 1]
45
+ x2 = boxes[:, 2] + boxes[:, 0]
46
+ y2 = boxes[:, 3] + boxes[:, 1]
47
+
48
+ area = (x2 - x1 + 1) * (y2 - y1 + 1)
49
+ if scores is not None:
50
+ idxs = np.argsort(scores)
51
+ else:
52
+ idxs = np.argsort(y2)
53
+
54
+ while len(idxs) > 0:
55
+ last = len(idxs) - 1
56
+ i = idxs[last]
57
+ pick.append(i)
58
+
59
+ xx1 = np.maximum(x1[i], x1[idxs[:last]])
60
+ yy1 = np.maximum(y1[i], y1[idxs[:last]])
61
+ xx2 = np.minimum(x2[i], x2[idxs[:last]])
62
+ yy2 = np.minimum(y2[i], y2[idxs[:last]])
63
+
64
+ w = np.maximum(0, xx2 - xx1 + 1)
65
+ h = np.maximum(0, yy2 - yy1 + 1)
66
+
67
+ overlap = (w * h) / area[idxs[:last]]
68
+
69
+ idxs = np.delete(
70
+ idxs, np.concatenate(
71
+ ([last], np.where(overlap > max_bbox_overlap)[0])))
72
+
73
+ return pick
deep_sort_pytorch/deep_sort/sort/track.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vim: expandtab:ts=4:sw=4
2
+
3
+
4
+ class TrackState:
5
+ """
6
+ Enumeration type for the single target track state. Newly created tracks are
7
+ classified as `tentative` until enough evidence has been collected. Then,
8
+ the track state is changed to `confirmed`. Tracks that are no longer alive
9
+ are classified as `deleted` to mark them for removal from the set of active
10
+ tracks.
11
+
12
+ """
13
+
14
+ Tentative = 1
15
+ Confirmed = 2
16
+ Deleted = 3
17
+
18
+
19
+ class Track:
20
+ """
21
+ A single target track with state space `(x, y, a, h)` and associated
22
+ velocities, where `(x, y)` is the center of the bounding box, `a` is the
23
+ aspect ratio and `h` is the height.
24
+
25
+ Parameters
26
+ ----------
27
+ mean : ndarray
28
+ Mean vector of the initial state distribution.
29
+ covariance : ndarray
30
+ Covariance matrix of the initial state distribution.
31
+ track_id : int
32
+ A unique track identifier.
33
+ n_init : int
34
+ Number of consecutive detections before the track is confirmed. The
35
+ track state is set to `Deleted` if a miss occurs within the first
36
+ `n_init` frames.
37
+ max_age : int
38
+ The maximum number of consecutive misses before the track state is
39
+ set to `Deleted`.
40
+ feature : Optional[ndarray]
41
+ Feature vector of the detection this track originates from. If not None,
42
+ this feature is added to the `features` cache.
43
+
44
+ Attributes
45
+ ----------
46
+ mean : ndarray
47
+ Mean vector of the initial state distribution.
48
+ covariance : ndarray
49
+ Covariance matrix of the initial state distribution.
50
+ track_id : int
51
+ A unique track identifier.
52
+ hits : int
53
+ Total number of measurement updates.
54
+ age : int
55
+ Total number of frames since first occurance.
56
+ time_since_update : int
57
+ Total number of frames since last measurement update.
58
+ state : TrackState
59
+ The current track state.
60
+ features : List[ndarray]
61
+ A cache of features. On each measurement update, the associated feature
62
+ vector is added to this list.
63
+
64
+ """
65
+
66
+ def __init__(self, mean, covariance, track_id, n_init, max_age,
67
+ feature=None):
68
+ self.mean = mean
69
+ self.covariance = covariance
70
+ self.track_id = track_id
71
+ self.hits = 1
72
+ self.age = 1
73
+ self.time_since_update = 0
74
+
75
+ self.state = TrackState.Tentative
76
+ self.features = []
77
+ if feature is not None:
78
+ self.features.append(feature)
79
+
80
+ self._n_init = n_init
81
+ self._max_age = max_age
82
+
83
+ def to_tlwh(self):
84
+ """Get current position in bounding box format `(top left x, top left y,
85
+ width, height)`.
86
+
87
+ Returns
88
+ -------
89
+ ndarray
90
+ The bounding box.
91
+
92
+ """
93
+ ret = self.mean[:4].copy()
94
+ ret[2] *= ret[3]
95
+ ret[:2] -= ret[2:] / 2
96
+ return ret
97
+
98
+ def to_tlbr(self):
99
+ """Get current position in bounding box format `(min x, miny, max x,
100
+ max y)`.
101
+
102
+ Returns
103
+ -------
104
+ ndarray
105
+ The bounding box.
106
+
107
+ """
108
+ ret = self.to_tlwh()
109
+ ret[2:] = ret[:2] + ret[2:]
110
+ return ret
111
+
112
+ def increment_age(self):
113
+ self.age += 1
114
+ self.time_since_update += 1
115
+
116
+ def predict(self, kf):
117
+ """Propagate the state distribution to the current time step using a
118
+ Kalman filter prediction step.
119
+
120
+ Parameters
121
+ ----------
122
+ kf : kalman_filter.KalmanFilter
123
+ The Kalman filter.
124
+
125
+ """
126
+ self.mean, self.covariance = kf.predict(self.mean, self.covariance)
127
+ self.increment_age()
128
+
129
+ def update(self, kf, detection):
130
+ """Perform Kalman filter measurement update step and update the feature
131
+ cache.
132
+
133
+ Parameters
134
+ ----------
135
+ kf : kalman_filter.KalmanFilter
136
+ The Kalman filter.
137
+ detection : Detection
138
+ The associated detection.
139
+
140
+ """
141
+ self.mean, self.covariance = kf.update(
142
+ self.mean, self.covariance, detection.to_xyah())
143
+ self.features.append(detection.feature)
144
+
145
+ self.hits += 1
146
+ self.time_since_update = 0
147
+ if self.state == TrackState.Tentative and self.hits >= self._n_init:
148
+ self.state = TrackState.Confirmed
149
+
150
+ def mark_missed(self):
151
+ """Mark this track as missed (no association at the current time step).
152
+ """
153
+ if self.state == TrackState.Tentative:
154
+ self.state = TrackState.Deleted
155
+ elif self.time_since_update > self._max_age:
156
+ self.state = TrackState.Deleted
157
+
158
+ def is_tentative(self):
159
+ """Returns True if this track is tentative (unconfirmed).
160
+ """
161
+ return self.state == TrackState.Tentative
162
+
163
+ def is_confirmed(self):
164
+ """Returns True if this track is confirmed."""
165
+ return self.state == TrackState.Confirmed
166
+
167
+ def is_deleted(self):
168
+ """Returns True if this track is dead and should be deleted."""
169
+ return self.state == TrackState.Deleted
deep_sort_pytorch/deep_sort/sort/tracker.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # vim: expandtab:ts=4:sw=4
2
+ from __future__ import absolute_import
3
+ import numpy as np
4
+ from . import kalman_filter
5
+ from . import linear_assignment
6
+ from . import iou_matching
7
+ from .track import Track
8
+
9
+
10
+ class Tracker:
11
+ """
12
+ This is the multi-target tracker.
13
+
14
+ Parameters
15
+ ----------
16
+ metric : nn_matching.NearestNeighborDistanceMetric
17
+ A distance metric for measurement-to-track association.
18
+ max_age : int
19
+ Maximum number of missed misses before a track is deleted.
20
+ n_init : int
21
+ Number of consecutive detections before the track is confirmed. The
22
+ track state is set to `Deleted` if a miss occurs within the first
23
+ `n_init` frames.
24
+
25
+ Attributes
26
+ ----------
27
+ metric : nn_matching.NearestNeighborDistanceMetric
28
+ The distance metric used for measurement to track association.
29
+ max_age : int
30
+ Maximum number of missed misses before a track is deleted.
31
+ n_init : int
32
+ Number of frames that a track remains in initialization phase.
33
+ kf : kalman_filter.KalmanFilter
34
+ A Kalman filter to filter target trajectories in image space.
35
+ tracks : List[Track]
36
+ The list of active tracks at the current time step.
37
+
38
+ """
39
+
40
+ def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3):
41
+ self.metric = metric
42
+ self.max_iou_distance = max_iou_distance
43
+ self.max_age = max_age
44
+ self.n_init = n_init
45
+
46
+ self.kf = kalman_filter.KalmanFilter()
47
+ self.tracks = []
48
+ self._next_id = 1
49
+
50
+ def predict(self):
51
+ """Propagate track state distributions one time step forward.
52
+
53
+ This function should be called once every time step, before `update`.
54
+ """
55
+ for track in self.tracks:
56
+ track.predict(self.kf)
57
+
58
+ def increment_ages(self):
59
+ for track in self.tracks:
60
+ track.increment_age()
61
+ track.mark_missed()
62
+
63
+ def update(self, detections):
64
+ """Perform measurement update and track management.
65
+
66
+ Parameters
67
+ ----------
68
+ detections : List[deep_sort.detection.Detection]
69
+ A list of detections at the current time step.
70
+
71
+ """
72
+ # Run matching cascade.
73
+ matches, unmatched_tracks, unmatched_detections = \
74
+ self._match(detections)
75
+
76
+ # Update track set.
77
+ for track_idx, detection_idx in matches:
78
+ self.tracks[track_idx].update(
79
+ self.kf, detections[detection_idx])
80
+ for track_idx in unmatched_tracks:
81
+ self.tracks[track_idx].mark_missed()
82
+ for detection_idx in unmatched_detections:
83
+ self._initiate_track(detections[detection_idx])
84
+ self.tracks = [t for t in self.tracks if not t.is_deleted()]
85
+
86
+ # Update distance metric.
87
+ active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
88
+ features, targets = [], []
89
+ for track in self.tracks:
90
+ if not track.is_confirmed():
91
+ continue
92
+ features += track.features
93
+ targets += [track.track_id for _ in track.features]
94
+ track.features = []
95
+ self.metric.partial_fit(
96
+ np.asarray(features), np.asarray(targets), active_targets)
97
+
98
+ def _match(self, detections):
99
+
100
+ def gated_metric(tracks, dets, track_indices, detection_indices):
101
+ features = np.array([dets[i].feature for i in detection_indices])
102
+ targets = np.array([tracks[i].track_id for i in track_indices])
103
+ cost_matrix = self.metric.distance(features, targets)
104
+ cost_matrix = linear_assignment.gate_cost_matrix(
105
+ self.kf, cost_matrix, tracks, dets, track_indices,
106
+ detection_indices)
107
+
108
+ return cost_matrix
109
+
110
+ # Split track set into confirmed and unconfirmed tracks.
111
+ confirmed_tracks = [
112
+ i for i, t in enumerate(self.tracks) if t.is_confirmed()]
113
+ unconfirmed_tracks = [
114
+ i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
115
+
116
+ # Associate confirmed tracks using appearance features.
117
+ matches_a, unmatched_tracks_a, unmatched_detections = \
118
+ linear_assignment.matching_cascade(
119
+ gated_metric, self.metric.matching_threshold, self.max_age,
120
+ self.tracks, detections, confirmed_tracks)
121
+
122
+ # Associate remaining tracks together with unconfirmed tracks using IOU.
123
+ iou_track_candidates = unconfirmed_tracks + [
124
+ k for k in unmatched_tracks_a if
125
+ self.tracks[k].time_since_update == 1]
126
+ unmatched_tracks_a = [
127
+ k for k in unmatched_tracks_a if
128
+ self.tracks[k].time_since_update != 1]
129
+ matches_b, unmatched_tracks_b, unmatched_detections = \
130
+ linear_assignment.min_cost_matching(
131
+ iou_matching.iou_cost, self.max_iou_distance, self.tracks,
132
+ detections, iou_track_candidates, unmatched_detections)
133
+
134
+ matches = matches_a + matches_b
135
+ unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
136
+ return matches, unmatched_tracks, unmatched_detections
137
+
138
+ def _initiate_track(self, detection):
139
+ mean, covariance = self.kf.initiate(detection.to_xyah())
140
+ self.tracks.append(Track(
141
+ mean, covariance, self._next_id, self.n_init, self.max_age,
142
+ detection.feature))
143
+ self._next_id += 1
deep_sort_pytorch/utils/__init__.py ADDED
File without changes
deep_sort_pytorch/utils/asserts.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os import environ
2
+
3
+
4
+ def assert_in(file, files_to_check):
5
+ if file not in files_to_check:
6
+ raise AssertionError("{} does not exist in the list".format(str(file)))
7
+ return True
8
+
9
+
10
+ def assert_in_env(check_list: list):
11
+ for item in check_list:
12
+ assert_in(item, environ.keys())
13
+ return True
deep_sort_pytorch/utils/draw.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+
4
+ palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
5
+
6
+
7
+ def compute_color_for_labels(label):
8
+ """
9
+ Simple function that adds fixed color depending on the class
10
+ """
11
+ color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
12
+ return tuple(color)
13
+
14
+
15
+ def draw_boxes(img, bbox, identities=None, offset=(0,0)):
16
+ for i,box in enumerate(bbox):
17
+ x1,y1,x2,y2 = [int(i) for i in box]
18
+ x1 += offset[0]
19
+ x2 += offset[0]
20
+ y1 += offset[1]
21
+ y2 += offset[1]
22
+ # box text and bar
23
+ id = int(identities[i]) if identities is not None else 0
24
+ color = compute_color_for_labels(id)
25
+ label = '{}{:d}'.format("", id)
26
+ t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2 , 2)[0]
27
+ cv2.rectangle(img,(x1, y1),(x2,y2),color,3)
28
+ cv2.rectangle(img,(x1, y1),(x1+t_size[0]+3,y1+t_size[1]+4), color,-1)
29
+ cv2.putText(img,label,(x1,y1+t_size[1]+4), cv2.FONT_HERSHEY_PLAIN, 2, [255,255,255], 2)
30
+ return img
31
+
32
+
33
+
34
+ if __name__ == '__main__':
35
+ for i in range(82):
36
+ print(compute_color_for_labels(i))
deep_sort_pytorch/utils/evaluation.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import copy
4
+ import motmetrics as mm
5
+ mm.lap.default_solver = 'lap'
6
+ from utils.io import read_results, unzip_objs
7
+
8
+
9
+ class Evaluator(object):
10
+
11
+ def __init__(self, data_root, seq_name, data_type):
12
+ self.data_root = data_root
13
+ self.seq_name = seq_name
14
+ self.data_type = data_type
15
+
16
+ self.load_annotations()
17
+ self.reset_accumulator()
18
+
19
+ def load_annotations(self):
20
+ assert self.data_type == 'mot'
21
+
22
+ gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
23
+ self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
24
+ self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
25
+
26
+ def reset_accumulator(self):
27
+ self.acc = mm.MOTAccumulator(auto_id=True)
28
+
29
+ def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
30
+ # results
31
+ trk_tlwhs = np.copy(trk_tlwhs)
32
+ trk_ids = np.copy(trk_ids)
33
+
34
+ # gts
35
+ gt_objs = self.gt_frame_dict.get(frame_id, [])
36
+ gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
37
+
38
+ # ignore boxes
39
+ ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
40
+ ignore_tlwhs = unzip_objs(ignore_objs)[0]
41
+
42
+
43
+ # remove ignored results
44
+ keep = np.ones(len(trk_tlwhs), dtype=bool)
45
+ iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
46
+ if len(iou_distance) > 0:
47
+ match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
48
+ match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
49
+ match_ious = iou_distance[match_is, match_js]
50
+
51
+ match_js = np.asarray(match_js, dtype=int)
52
+ match_js = match_js[np.logical_not(np.isnan(match_ious))]
53
+ keep[match_js] = False
54
+ trk_tlwhs = trk_tlwhs[keep]
55
+ trk_ids = trk_ids[keep]
56
+
57
+ # get distance matrix
58
+ iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
59
+
60
+ # acc
61
+ self.acc.update(gt_ids, trk_ids, iou_distance)
62
+
63
+ if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
64
+ events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
65
+ else:
66
+ events = None
67
+ return events
68
+
69
+ def eval_file(self, filename):
70
+ self.reset_accumulator()
71
+
72
+ result_frame_dict = read_results(filename, self.data_type, is_gt=False)
73
+ frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
74
+ for frame_id in frames:
75
+ trk_objs = result_frame_dict.get(frame_id, [])
76
+ trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
77
+ self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
78
+
79
+ return self.acc
80
+
81
+ @staticmethod
82
+ def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
83
+ names = copy.deepcopy(names)
84
+ if metrics is None:
85
+ metrics = mm.metrics.motchallenge_metrics
86
+ metrics = copy.deepcopy(metrics)
87
+
88
+ mh = mm.metrics.create()
89
+ summary = mh.compute_many(
90
+ accs,
91
+ metrics=metrics,
92
+ names=names,
93
+ generate_overall=True
94
+ )
95
+
96
+ return summary
97
+
98
+ @staticmethod
99
+ def save_summary(summary, filename):
100
+ import pandas as pd
101
+ writer = pd.ExcelWriter(filename)
102
+ summary.to_excel(writer)
103
+ writer.save()
deep_sort_pytorch/utils/io.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict
3
+ import numpy as np
4
+
5
+ # from utils.log import get_logger
6
+
7
+
8
+ def write_results(filename, results, data_type):
9
+ if data_type == 'mot':
10
+ save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n'
11
+ elif data_type == 'kitti':
12
+ save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
13
+ else:
14
+ raise ValueError(data_type)
15
+
16
+ with open(filename, 'w') as f:
17
+ for frame_id, tlwhs, track_ids in results:
18
+ if data_type == 'kitti':
19
+ frame_id -= 1
20
+ for tlwh, track_id in zip(tlwhs, track_ids):
21
+ if track_id < 0:
22
+ continue
23
+ x1, y1, w, h = tlwh
24
+ x2, y2 = x1 + w, y1 + h
25
+ line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
26
+ f.write(line)
27
+
28
+
29
+ # def write_results(filename, results_dict: Dict, data_type: str):
30
+ # if not filename:
31
+ # return
32
+ # path = os.path.dirname(filename)
33
+ # if not os.path.exists(path):
34
+ # os.makedirs(path)
35
+
36
+ # if data_type in ('mot', 'mcmot', 'lab'):
37
+ # save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
38
+ # elif data_type == 'kitti':
39
+ # save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
40
+ # else:
41
+ # raise ValueError(data_type)
42
+
43
+ # with open(filename, 'w') as f:
44
+ # for frame_id, frame_data in results_dict.items():
45
+ # if data_type == 'kitti':
46
+ # frame_id -= 1
47
+ # for tlwh, track_id in frame_data:
48
+ # if track_id < 0:
49
+ # continue
50
+ # x1, y1, w, h = tlwh
51
+ # x2, y2 = x1 + w, y1 + h
52
+ # line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
53
+ # f.write(line)
54
+ # logger.info('Save results to {}'.format(filename))
55
+
56
+
57
+ def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
58
+ if data_type in ('mot', 'lab'):
59
+ read_fun = read_mot_results
60
+ else:
61
+ raise ValueError('Unknown data type: {}'.format(data_type))
62
+
63
+ return read_fun(filename, is_gt, is_ignore)
64
+
65
+
66
+ """
67
+ labels={'ped', ... % 1
68
+ 'person_on_vhcl', ... % 2
69
+ 'car', ... % 3
70
+ 'bicycle', ... % 4
71
+ 'mbike', ... % 5
72
+ 'non_mot_vhcl', ... % 6
73
+ 'static_person', ... % 7
74
+ 'distractor', ... % 8
75
+ 'occluder', ... % 9
76
+ 'occluder_on_grnd', ... %10
77
+ 'occluder_full', ... % 11
78
+ 'reflection', ... % 12
79
+ 'crowd' ... % 13
80
+ };
81
+ """
82
+
83
+
84
+ def read_mot_results(filename, is_gt, is_ignore):
85
+ valid_labels = {1}
86
+ ignore_labels = {2, 7, 8, 12}
87
+ results_dict = dict()
88
+ if os.path.isfile(filename):
89
+ with open(filename, 'r') as f:
90
+ for line in f.readlines():
91
+ linelist = line.split(',')
92
+ if len(linelist) < 7:
93
+ continue
94
+ fid = int(linelist[0])
95
+ if fid < 1:
96
+ continue
97
+ results_dict.setdefault(fid, list())
98
+
99
+ if is_gt:
100
+ if 'MOT16-' in filename or 'MOT17-' in filename:
101
+ label = int(float(linelist[7]))
102
+ mark = int(float(linelist[6]))
103
+ if mark == 0 or label not in valid_labels:
104
+ continue
105
+ score = 1
106
+ elif is_ignore:
107
+ if 'MOT16-' in filename or 'MOT17-' in filename:
108
+ label = int(float(linelist[7]))
109
+ vis_ratio = float(linelist[8])
110
+ if label not in ignore_labels and vis_ratio >= 0:
111
+ continue
112
+ else:
113
+ continue
114
+ score = 1
115
+ else:
116
+ score = float(linelist[6])
117
+
118
+ tlwh = tuple(map(float, linelist[2:6]))
119
+ target_id = int(linelist[1])
120
+
121
+ results_dict[fid].append((tlwh, target_id, score))
122
+
123
+ return results_dict
124
+
125
+
126
+ def unzip_objs(objs):
127
+ if len(objs) > 0:
128
+ tlwhs, ids, scores = zip(*objs)
129
+ else:
130
+ tlwhs, ids, scores = [], [], []
131
+ tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
132
+
133
+ return tlwhs, ids, scores
deep_sort_pytorch/utils/json_logger.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ References:
3
+ https://medium.com/analytics-vidhya/creating-a-custom-logging-mechanism-for-real-time-object-detection-using-tdd-4ca2cfcd0a2f
4
+ """
5
+ import json
6
+ from os import makedirs
7
+ from os.path import exists, join
8
+ from datetime import datetime
9
+
10
+
11
+ class JsonMeta(object):
12
+ HOURS = 3
13
+ MINUTES = 59
14
+ SECONDS = 59
15
+ PATH_TO_SAVE = 'LOGS'
16
+ DEFAULT_FILE_NAME = 'remaining'
17
+
18
+
19
+ class BaseJsonLogger(object):
20
+ """
21
+ This is the base class that returns __dict__ of its own
22
+ it also returns the dicts of objects in the attributes that are list instances
23
+
24
+ """
25
+
26
+ def dic(self):
27
+ # returns dicts of objects
28
+ out = {}
29
+ for k, v in self.__dict__.items():
30
+ if hasattr(v, 'dic'):
31
+ out[k] = v.dic()
32
+ elif isinstance(v, list):
33
+ out[k] = self.list(v)
34
+ else:
35
+ out[k] = v
36
+ return out
37
+
38
+ @staticmethod
39
+ def list(values):
40
+ # applies the dic method on items in the list
41
+ return [v.dic() if hasattr(v, 'dic') else v for v in values]
42
+
43
+
44
+ class Label(BaseJsonLogger):
45
+ """
46
+ For each bounding box there are various categories with confidences. Label class keeps track of that information.
47
+ """
48
+
49
+ def __init__(self, category: str, confidence: float):
50
+ self.category = category
51
+ self.confidence = confidence
52
+
53
+
54
+ class Bbox(BaseJsonLogger):
55
+ """
56
+ This module stores the information for each frame and use them in JsonParser
57
+ Attributes:
58
+ labels (list): List of label module.
59
+ top (int):
60
+ left (int):
61
+ width (int):
62
+ height (int):
63
+
64
+ Args:
65
+ bbox_id (float):
66
+ top (int):
67
+ left (int):
68
+ width (int):
69
+ height (int):
70
+
71
+ References:
72
+ Check Label module for better understanding.
73
+
74
+
75
+ """
76
+
77
+ def __init__(self, bbox_id, top, left, width, height):
78
+ self.labels = []
79
+ self.bbox_id = bbox_id
80
+ self.top = top
81
+ self.left = left
82
+ self.width = width
83
+ self.height = height
84
+
85
+ def add_label(self, category, confidence):
86
+ # adds category and confidence only if top_k is not exceeded.
87
+ self.labels.append(Label(category, confidence))
88
+
89
+ def labels_full(self, value):
90
+ return len(self.labels) == value
91
+
92
+
93
+ class Frame(BaseJsonLogger):
94
+ """
95
+ This module stores the information for each frame and use them in JsonParser
96
+ Attributes:
97
+ timestamp (float): The elapsed time of captured frame
98
+ frame_id (int): The frame number of the captured video
99
+ bboxes (list of Bbox objects): Stores the list of bbox objects.
100
+
101
+ References:
102
+ Check Bbox class for better information
103
+
104
+ Args:
105
+ timestamp (float):
106
+ frame_id (int):
107
+
108
+ """
109
+
110
+ def __init__(self, frame_id: int, timestamp: float = None):
111
+ self.frame_id = frame_id
112
+ self.timestamp = timestamp
113
+ self.bboxes = []
114
+
115
+ def add_bbox(self, bbox_id: int, top: int, left: int, width: int, height: int):
116
+ bboxes_ids = [bbox.bbox_id for bbox in self.bboxes]
117
+ if bbox_id not in bboxes_ids:
118
+ self.bboxes.append(Bbox(bbox_id, top, left, width, height))
119
+ else:
120
+ raise ValueError("Frame with id: {} already has a Bbox with id: {}".format(self.frame_id, bbox_id))
121
+
122
+ def add_label_to_bbox(self, bbox_id: int, category: str, confidence: float):
123
+ bboxes = {bbox.id: bbox for bbox in self.bboxes}
124
+ if bbox_id in bboxes.keys():
125
+ res = bboxes.get(bbox_id)
126
+ res.add_label(category, confidence)
127
+ else:
128
+ raise ValueError('the bbox with id: {} does not exists!'.format(bbox_id))
129
+
130
+
131
+ class BboxToJsonLogger(BaseJsonLogger):
132
+ """
133
+ ُ This module is designed to automate the task of logging jsons. An example json is used
134
+ to show the contents of json file shortly
135
+ Example:
136
+ {
137
+ "video_details": {
138
+ "frame_width": 1920,
139
+ "frame_height": 1080,
140
+ "frame_rate": 20,
141
+ "video_name": "/home/gpu/codes/MSD/pedestrian_2/project/public/camera1.avi"
142
+ },
143
+ "frames": [
144
+ {
145
+ "frame_id": 329,
146
+ "timestamp": 3365.1254
147
+ "bboxes": [
148
+ {
149
+ "labels": [
150
+ {
151
+ "category": "pedestrian",
152
+ "confidence": 0.9
153
+ }
154
+ ],
155
+ "bbox_id": 0,
156
+ "top": 1257,
157
+ "left": 138,
158
+ "width": 68,
159
+ "height": 109
160
+ }
161
+ ]
162
+ }],
163
+
164
+ Attributes:
165
+ frames (dict): It's a dictionary that maps each frame_id to json attributes.
166
+ video_details (dict): information about video file.
167
+ top_k_labels (int): shows the allowed number of labels
168
+ start_time (datetime object): we use it to automate the json output by time.
169
+
170
+ Args:
171
+ top_k_labels (int): shows the allowed number of labels
172
+
173
+ """
174
+
175
+ def __init__(self, top_k_labels: int = 1):
176
+ self.frames = {}
177
+ self.video_details = self.video_details = dict(frame_width=None, frame_height=None, frame_rate=None,
178
+ video_name=None)
179
+ self.top_k_labels = top_k_labels
180
+ self.start_time = datetime.now()
181
+
182
+ def set_top_k(self, value):
183
+ self.top_k_labels = value
184
+
185
+ def frame_exists(self, frame_id: int) -> bool:
186
+ """
187
+ Args:
188
+ frame_id (int):
189
+
190
+ Returns:
191
+ bool: true if frame_id is recognized
192
+ """
193
+ return frame_id in self.frames.keys()
194
+
195
+ def add_frame(self, frame_id: int, timestamp: float = None) -> None:
196
+ """
197
+ Args:
198
+ frame_id (int):
199
+ timestamp (float): opencv captured frame time property
200
+
201
+ Raises:
202
+ ValueError: if frame_id would not exist in class frames attribute
203
+
204
+ Returns:
205
+ None
206
+
207
+ """
208
+ if not self.frame_exists(frame_id):
209
+ self.frames[frame_id] = Frame(frame_id, timestamp)
210
+ else:
211
+ raise ValueError("Frame id: {} already exists".format(frame_id))
212
+
213
+ def bbox_exists(self, frame_id: int, bbox_id: int) -> bool:
214
+ """
215
+ Args:
216
+ frame_id:
217
+ bbox_id:
218
+
219
+ Returns:
220
+ bool: if bbox exists in frame bboxes list
221
+ """
222
+ bboxes = []
223
+ if self.frame_exists(frame_id=frame_id):
224
+ bboxes = [bbox.bbox_id for bbox in self.frames[frame_id].bboxes]
225
+ return bbox_id in bboxes
226
+
227
+ def find_bbox(self, frame_id: int, bbox_id: int):
228
+ """
229
+
230
+ Args:
231
+ frame_id:
232
+ bbox_id:
233
+
234
+ Returns:
235
+ bbox_id (int):
236
+
237
+ Raises:
238
+ ValueError: if bbox_id does not exist in the bbox list of specific frame.
239
+ """
240
+ if not self.bbox_exists(frame_id, bbox_id):
241
+ raise ValueError("frame with id: {} does not contain bbox with id: {}".format(frame_id, bbox_id))
242
+ bboxes = {bbox.bbox_id: bbox for bbox in self.frames[frame_id].bboxes}
243
+ return bboxes.get(bbox_id)
244
+
245
+ def add_bbox_to_frame(self, frame_id: int, bbox_id: int, top: int, left: int, width: int, height: int) -> None:
246
+ """
247
+
248
+ Args:
249
+ frame_id (int):
250
+ bbox_id (int):
251
+ top (int):
252
+ left (int):
253
+ width (int):
254
+ height (int):
255
+
256
+ Returns:
257
+ None
258
+
259
+ Raises:
260
+ ValueError: if bbox_id already exist in frame information with frame_id
261
+ ValueError: if frame_id does not exist in frames attribute
262
+ """
263
+ if self.frame_exists(frame_id):
264
+ frame = self.frames[frame_id]
265
+ if not self.bbox_exists(frame_id, bbox_id):
266
+ frame.add_bbox(bbox_id, top, left, width, height)
267
+ else:
268
+ raise ValueError(
269
+ "frame with frame_id: {} already contains the bbox with id: {} ".format(frame_id, bbox_id))
270
+ else:
271
+ raise ValueError("frame with frame_id: {} does not exist".format(frame_id))
272
+
273
+ def add_label_to_bbox(self, frame_id: int, bbox_id: int, category: str, confidence: float):
274
+ """
275
+ Args:
276
+ frame_id:
277
+ bbox_id:
278
+ category:
279
+ confidence: the confidence value returned from yolo detection
280
+
281
+ Returns:
282
+ None
283
+
284
+ Raises:
285
+ ValueError: if labels quota (top_k_labels) exceeds.
286
+ """
287
+ bbox = self.find_bbox(frame_id, bbox_id)
288
+ if not bbox.labels_full(self.top_k_labels):
289
+ bbox.add_label(category, confidence)
290
+ else:
291
+ raise ValueError("labels in frame_id: {}, bbox_id: {} is fulled".format(frame_id, bbox_id))
292
+
293
+ def add_video_details(self, frame_width: int = None, frame_height: int = None, frame_rate: int = None,
294
+ video_name: str = None):
295
+ self.video_details['frame_width'] = frame_width
296
+ self.video_details['frame_height'] = frame_height
297
+ self.video_details['frame_rate'] = frame_rate
298
+ self.video_details['video_name'] = video_name
299
+
300
+ def output(self):
301
+ output = {'video_details': self.video_details}
302
+ result = list(self.frames.values())
303
+ output['frames'] = [item.dic() for item in result]
304
+ return output
305
+
306
+ def json_output(self, output_name):
307
+ """
308
+ Args:
309
+ output_name:
310
+
311
+ Returns:
312
+ None
313
+
314
+ Notes:
315
+ It creates the json output with `output_name` name.
316
+ """
317
+ if not output_name.endswith('.json'):
318
+ output_name += '.json'
319
+ with open(output_name, 'w') as file:
320
+ json.dump(self.output(), file)
321
+ file.close()
322
+
323
+ def set_start(self):
324
+ self.start_time = datetime.now()
325
+
326
+ def schedule_output_by_time(self, output_dir=JsonMeta.PATH_TO_SAVE, hours: int = 0, minutes: int = 0,
327
+ seconds: int = 60) -> None:
328
+ """
329
+ Notes:
330
+ Creates folder and then periodically stores the jsons on that address.
331
+
332
+ Args:
333
+ output_dir (str): the directory where output files will be stored
334
+ hours (int):
335
+ minutes (int):
336
+ seconds (int):
337
+
338
+ Returns:
339
+ None
340
+
341
+ """
342
+ end = datetime.now()
343
+ interval = 0
344
+ interval += abs(min([hours, JsonMeta.HOURS]) * 3600)
345
+ interval += abs(min([minutes, JsonMeta.MINUTES]) * 60)
346
+ interval += abs(min([seconds, JsonMeta.SECONDS]))
347
+ diff = (end - self.start_time).seconds
348
+
349
+ if diff > interval:
350
+ output_name = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '.json'
351
+ if not exists(output_dir):
352
+ makedirs(output_dir)
353
+ output = join(output_dir, output_name)
354
+ self.json_output(output_name=output)
355
+ self.frames = {}
356
+ self.start_time = datetime.now()
357
+
358
+ def schedule_output_by_frames(self, frames_quota, frame_counter, output_dir=JsonMeta.PATH_TO_SAVE):
359
+ """
360
+ saves as the number of frames quota increases higher.
361
+ :param frames_quota:
362
+ :param frame_counter:
363
+ :param output_dir:
364
+ :return:
365
+ """
366
+ pass
367
+
368
+ def flush(self, output_dir):
369
+ """
370
+ Notes:
371
+ We use this function to output jsons whenever possible.
372
+ like the time that we exit the while loop of opencv.
373
+
374
+ Args:
375
+ output_dir:
376
+
377
+ Returns:
378
+ None
379
+
380
+ """
381
+ filename = self.start_time.strftime('%Y-%m-%d %H-%M-%S') + '-remaining.json'
382
+ output = join(output_dir, filename)
383
+ self.json_output(output_name=output)
deep_sort_pytorch/utils/log.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+
4
+ def get_logger(name='root'):
5
+ formatter = logging.Formatter(
6
+ # fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
7
+ fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
8
+
9
+ handler = logging.StreamHandler()
10
+ handler.setFormatter(formatter)
11
+
12
+ logger = logging.getLogger(name)
13
+ logger.setLevel(logging.INFO)
14
+ logger.addHandler(handler)
15
+ return logger
16
+
17
+
deep_sort_pytorch/utils/parser.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import yaml
3
+ from easydict import EasyDict as edict
4
+
5
+
6
+ class YamlParser(edict):
7
+ """
8
+ This is yaml parser based on EasyDict.
9
+ """
10
+
11
+ def __init__(self, cfg_dict=None, config_file=None):
12
+ if cfg_dict is None:
13
+ cfg_dict = {}
14
+
15
+ if config_file is not None:
16
+ assert(os.path.isfile(config_file))
17
+ with open(config_file, 'r') as fo:
18
+ cfg_dict.update(yaml.load(fo.read()))
19
+
20
+ super(YamlParser, self).__init__(cfg_dict)
21
+
22
+ def merge_from_file(self, config_file):
23
+ with open(config_file, 'r') as fo:
24
+ self.update(yaml.safe_load(fo.read()))
25
+
26
+ def merge_from_dict(self, config_dict):
27
+ self.update(config_dict)
28
+
29
+
30
+ def get_config(config_file=None):
31
+ return YamlParser(config_file=config_file)
32
+
33
+
34
+ if __name__ == "__main__":
35
+ cfg = YamlParser(config_file="../configs/yolov3.yaml")
36
+ cfg.merge_from_file("../configs/deep_sort.yaml")
37
+
38
+ import ipdb
39
+ ipdb.set_trace()
deep_sort_pytorch/utils/tools.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+ from time import time
3
+
4
+
5
+ def is_video(ext: str):
6
+ """
7
+ Returns true if ext exists in
8
+ allowed_exts for video files.
9
+
10
+ Args:
11
+ ext:
12
+
13
+ Returns:
14
+
15
+ """
16
+
17
+ allowed_exts = ('.mp4', '.webm', '.ogg', '.avi', '.wmv', '.mkv', '.3gp')
18
+ return any((ext.endswith(x) for x in allowed_exts))
19
+
20
+
21
+ def tik_tok(func):
22
+ """
23
+ keep track of time for each process.
24
+ Args:
25
+ func:
26
+
27
+ Returns:
28
+
29
+ """
30
+ @wraps(func)
31
+ def _time_it(*args, **kwargs):
32
+ start = time()
33
+ try:
34
+ return func(*args, **kwargs)
35
+ finally:
36
+ end_ = time()
37
+ print("time: {:.03f}s, fps: {:.03f}".format(end_ - start, 1 / (end_ - start)))
38
+
39
+ return _time_it
detect_sort.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import time
3
+ from pathlib import Path
4
+
5
+ import cv2
6
+ import torch
7
+ import torch.backends.cudnn as cudnn
8
+ import os
9
+ import sys
10
+ sys.path.insert(0, './yolov5') # Path for internal module without changing base
11
+
12
+ from yolov5.models.common import DetectMultiBackend
13
+ from yolov5.utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
14
+ from yolov5.utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
15
+ increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
16
+ from yolov5.utils.general import set_logging
17
+ from yolov5.utils.plots import Annotator, colors, save_one_box, plot_one_box
18
+ from yolov5.utils.torch_utils import select_device, time_sync
19
+
20
+
21
+ from deep_sort_pytorch.utils.parser import get_config
22
+ from deep_sort_pytorch.deep_sort import DeepSort
23
+
24
+ from graphs import bbox_rel,draw_boxes
25
+
26
+ FILE = Path(__file__).resolve()
27
+ ROOT = FILE.parents[0] # YOLOv5 root directory
28
+ if str(ROOT) not in sys.path:
29
+ sys.path.append(str(ROOT)) # add ROOT to PATH
30
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
31
+
32
+
33
+ @torch.no_grad()
34
+ def detect(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
35
+ source=ROOT / 'yolov5/data/images', # file/dir/URL/glob, 0 for webcam
36
+ data=ROOT / 'yolov5/data/coco128.yaml', # dataset.yaml path
37
+ imgsz=(640, 640), # inference size (height, width)
38
+ conf_thres=0.25, # confidence threshold
39
+ iou_thres=0.45, # NMS IOU threshold
40
+ max_det=1000, # maximum detections per image
41
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
42
+ view_img=False, # show results
43
+ save_txt=False, # save results to *.txt
44
+ save_conf=False, # save confidences in --save-txt labels
45
+ save_crop=False, # save cropped prediction boxes
46
+ nosave=False, # do not save images/videos
47
+ classes=None, # filter by class: --class 0, or --class 0 2 3
48
+ agnostic_nms=False, # class-agnostic NMS
49
+ augment=False, # augmented inference
50
+ visualize=False, # visualize features
51
+ update=False, # update all models
52
+ project=ROOT / 'runs/detect', # save results to project/name
53
+ name='exp', # save results to project/name
54
+ exist_ok=False, # existing project/name ok, do not increment
55
+ line_thickness=3, # bounding box thickness (pixels)
56
+ hide_labels=False, # hide labels
57
+ hide_conf=False, # hide confidences
58
+ half=False, # use FP16 half-precision inference
59
+ dnn=False,
60
+ config_deepsort="deep_sort_pytorch/configs/deep_sort.yaml" #Deep Sort configuration
61
+ ):
62
+ save_img = not nosave and not source.endswith('.txt') # save inference images
63
+ webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
64
+ ('rtsp://', 'rtmp://', 'http://', 'https://'))
65
+
66
+ ## initialize deepsort
67
+ cfg = get_config()
68
+ cfg.merge_from_file(opt.config_deepsort)
69
+ deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
70
+ max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
71
+ nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
72
+ max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
73
+ use_cuda=True)
74
+
75
+ # Directories
76
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
77
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
78
+
79
+ # Initialize
80
+ set_logging()
81
+ device = select_device(device)
82
+ half &= device.type != 'cpu' # half precision only supported on CUDA
83
+
84
+ # Load model
85
+ device = select_device(device)
86
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data)
87
+ stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine
88
+ imgsz = check_img_size(imgsz, s=stride) # check image size
89
+
90
+ # Half
91
+ half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA
92
+ if pt or jit:
93
+ model.model.half() if half else model.model.float()
94
+
95
+ # Second-stage classifier
96
+ classify = False
97
+ if classify:
98
+ modelc = load_classifier(name='resnet101', n=2) # initialize
99
+ modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
100
+
101
+ # Dataloader
102
+ if webcam:
103
+ view_img = check_imshow()
104
+ cudnn.benchmark = True # set True to speed up constant image size inference
105
+ dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt)
106
+ bs = len(dataset) # batch_size
107
+ else:
108
+ dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
109
+ bs = 1 # batch_size
110
+ vid_path, vid_writer = [None] * bs, [None] * bs
111
+
112
+ ## Get names and colors
113
+ #names = model.module.names if hasattr(model, 'module') else model.names
114
+
115
+
116
+ # Run inference
117
+ if device.type != 'cpu':
118
+ model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
119
+ t0 = time.time()
120
+
121
+ dt, seen = [0.0, 0.0, 0.0], 0
122
+ for path, im, im0s, vid_cap, s in dataset:
123
+ t1 = time_sync()
124
+ im = torch.from_numpy(im).to(device)
125
+ im = im.half() if half else im.float() # uint8 to fp16/32
126
+ im /= 255 # 0 - 255 to 0.0 - 1.0
127
+ if len(im.shape) == 3:
128
+ im = im[None] # expand for batch dim
129
+ t2 = time_sync()
130
+ dt[0] += t2 - t1
131
+
132
+ # Inference
133
+ visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
134
+ pred = model(im, augment=augment, visualize=visualize)
135
+ t3 = time_sync()
136
+ dt[1] += t3 - t2
137
+
138
+ # NMS
139
+ pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
140
+ dt[2] += time_sync() - t3
141
+
142
+ # Second-stage classifier (optional)
143
+ # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
144
+
145
+ # Process predictions
146
+ for i, det in enumerate(pred): # per image
147
+ seen += 1
148
+ if webcam: # batch_size >= 1
149
+ p, im0, frame = path[i], im0s[i].copy(), dataset.count
150
+ s += f'{i}: '
151
+ else:
152
+ p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
153
+
154
+ p = Path(p) # to Path
155
+ save_path = str(save_dir / p.name) # im.jpg
156
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
157
+ s += '%gx%g ' % im.shape[2:] # print string
158
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
159
+ imc = im0.copy() if save_crop else im0 # for save_crop
160
+ annotator = Annotator(im0, line_width=line_thickness, example=str(names))
161
+ if len(det):
162
+ # Rescale boxes from img_size to im0 size
163
+ det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
164
+
165
+ # Print results
166
+ for c in det[:, -1].unique():
167
+ n = (det[:, -1] == c).sum() # detections per class
168
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
169
+
170
+ bbox_xywh = []
171
+ confs = []
172
+ # Adapt detections to deep sort input format
173
+ for *xyxy, conf, cls in det:
174
+ x_c, y_c, bbox_w, bbox_h = bbox_rel(*xyxy)
175
+ obj = [x_c, y_c, bbox_w, bbox_h]
176
+ bbox_xywh.append(obj)
177
+ confs.append([conf.item()])
178
+
179
+ xywhs = torch.Tensor(bbox_xywh)
180
+ confss = torch.Tensor(confs)
181
+
182
+ # Pass detections to deepsort
183
+ outputs = deepsort.update(xywhs, confss, im0)
184
+
185
+ # draw boxes for visualization
186
+ if len(outputs) > 0:
187
+ bbox_xyxy = outputs[:, :4]
188
+ identities = outputs[:, -1]
189
+ draw_boxes(im0, bbox_xyxy, identities)
190
+
191
+
192
+ # Write MOT compliant results to file
193
+ if save_txt and len(outputs) != 0:
194
+ for j, output in enumerate(outputs):
195
+ bbox_left = output[0]
196
+ bbox_top = output[1]
197
+ bbox_w = output[2]
198
+ bbox_h = output[3]
199
+ identity = output[-1]
200
+ with open(txt_path, 'a') as f:
201
+ f.write(('%g ' * 10 + '\n') % (frame_idx, identity, bbox_left,
202
+ bbox_top, bbox_w, bbox_h, -1, -1, -1, -1)) # label format
203
+
204
+ # Write results Label
205
+ for *xyxy, conf, cls in reversed(det):
206
+ if save_txt: # Write to file
207
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
208
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
209
+ with open(txt_path + '.txt', 'a') as f:
210
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
211
+
212
+ if save_img or save_crop or view_img: # Add bbox to image
213
+ c = int(cls) # integer class
214
+ label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
215
+ plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=line_thickness)
216
+ if save_crop:
217
+ save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
218
+
219
+ else:
220
+ deepsort.increment_ages()
221
+
222
+ # Print time (inference + NMS)
223
+ print(f'{s}Done. ({t2 - t1:.3f}s)')
224
+
225
+ # Stream results
226
+ if view_img:
227
+ cv2.imshow(str(p), im0)
228
+ cv2.waitKey(1) # 1 millisecond
229
+
230
+ # Save results (image with detections)
231
+ if save_img:
232
+ if dataset.mode == 'image':
233
+ cv2.imwrite(save_path, im0)
234
+ else: # 'video' or 'stream'
235
+ if vid_path != save_path: # new video
236
+ vid_path = save_path
237
+ if isinstance(vid_writer, cv2.VideoWriter):
238
+ vid_writer.release() # release previous video writer
239
+ if vid_cap: # video
240
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
241
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
242
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
243
+ else: # stream
244
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
245
+ save_path += '.mp4'
246
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
247
+ vid_writer.write(im0)
248
+
249
+ if save_txt or save_img:
250
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
251
+ print(f"Results saved to {save_dir}{s}")
252
+
253
+ if update:
254
+ strip_optimizer(weights) # update model (to fix SourceChangeWarning)
255
+
256
+ print(f'Done. ({time.time() - t0:.3f}s)')
drift_frames/frame_0.png ADDED
drift_frames/frame_1.png ADDED
drift_frames/frame_10.png ADDED

Git LFS Details

  • SHA256: c6bd7e673aa1d36e4846594295007b7d38f0beef8af216c5347d33112c62bd5a
  • Pointer size: 132 Bytes
  • Size of remote file: 1.26 MB
drift_frames/frame_100.png ADDED
drift_frames/frame_101.png ADDED
drift_frames/frame_102.png ADDED
drift_frames/frame_103.png ADDED