matthewrt commited on
Commit
7b36907
·
1 Parent(s): 09f6845

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +213 -0
  2. .gitattributes +4 -35
  3. .github/ISSUE_TEMPLATE/bug-report.md +55 -0
  4. .github/ISSUE_TEMPLATE/feature-request.md +27 -0
  5. .github/ISSUE_TEMPLATE/question.md +13 -0
  6. .github/dependabot.yml +12 -0
  7. .github/workflows/ci-testing.yml +80 -0
  8. .github/workflows/codeql-analysis.yml +54 -0
  9. .github/workflows/greetings.yml +56 -0
  10. .github/workflows/rebase.yml +21 -0
  11. .github/workflows/stale.yml +18 -0
  12. .gitignore +249 -0
  13. Dockerfile +60 -0
  14. LICENSE +674 -0
  15. README.md +37 -7
  16. app.py +39 -0
  17. data/coco128.yaml +28 -0
  18. data/crowdhuman.yaml +8 -0
  19. data/hyp.finetune.yaml +38 -0
  20. data/hyp.scratch.yaml +33 -0
  21. data/images/bus.jpg +0 -0
  22. data/images/zidane.jpg +0 -0
  23. data/scripts/get_coco.sh +27 -0
  24. data/scripts/get_voc.sh +139 -0
  25. data/voc.yaml +21 -0
  26. detect.py +200 -0
  27. hubconf.py +146 -0
  28. models/__init__.py +0 -0
  29. models/__pycache__/__init__.cpython-310.pyc +0 -0
  30. models/__pycache__/common.cpython-310.pyc +0 -0
  31. models/__pycache__/experimental.cpython-310.pyc +0 -0
  32. models/__pycache__/yolo.cpython-310.pyc +0 -0
  33. models/common.py +308 -0
  34. models/experimental.py +133 -0
  35. models/export.py +100 -0
  36. models/hub/anchors.yaml +58 -0
  37. models/hub/yolov3-spp.yaml +51 -0
  38. models/hub/yolov3-tiny.yaml +41 -0
  39. models/hub/yolov3.yaml +51 -0
  40. models/hub/yolov5-fpn.yaml +42 -0
  41. models/hub/yolov5-p2.yaml +54 -0
  42. models/hub/yolov5-p6.yaml +56 -0
  43. models/hub/yolov5-p7.yaml +67 -0
  44. models/hub/yolov5-panet.yaml +48 -0
  45. models/hub/yolov5l6.yaml +60 -0
  46. models/hub/yolov5m6.yaml +60 -0
  47. models/hub/yolov5s6.yaml +60 -0
  48. models/hub/yolov5x6.yaml +60 -0
  49. models/yolo.py +272 -0
  50. models/yolov5l.yaml +48 -0
.dockerignore ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
2
+ #.git
3
+ .cache
4
+ .idea
5
+ output
6
+ coco
7
+ storage.googleapis.com
8
+
9
+ data/samples/*
10
+ **/results*.txt
11
+ *.jpg
12
+
13
+ # Neural Network weights -----------------------------------------------------------------------------------------------
14
+ **/*.pth
15
+ **/*.onnx
16
+ **/*.mlmodel
17
+ **/*.torchscript
18
+
19
+
20
+ # Below Copied From .gitignore -----------------------------------------------------------------------------------------
21
+ # Below Copied From .gitignore -----------------------------------------------------------------------------------------
22
+
23
+
24
+ # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
25
+ # Byte-compiled / optimized / DLL files
26
+ __pycache__/
27
+ *.py[cod]
28
+ *$py.class
29
+
30
+ # C extensions
31
+ *.so
32
+
33
+ # Distribution / packaging
34
+ .Python
35
+ env/
36
+ build/
37
+ develop-eggs/
38
+ dist/
39
+ downloads/
40
+ eggs/
41
+ .eggs/
42
+ lib/
43
+ lib64/
44
+ parts/
45
+ sdist/
46
+ var/
47
+ wheels/
48
+ *.egg-info/
49
+ wandb/
50
+ .installed.cfg
51
+ *.egg
52
+
53
+ # PyInstaller
54
+ # Usually these files are written by a python script from a template
55
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
56
+ *.manifest
57
+ *.spec
58
+
59
+ # Installer logs
60
+ pip-log.txt
61
+ pip-delete-this-directory.txt
62
+
63
+ # Unit test / coverage reports
64
+ htmlcov/
65
+ .tox/
66
+ .coverage
67
+ .coverage.*
68
+ .cache
69
+ nosetests.xml
70
+ coverage.xml
71
+ *.cover
72
+ .hypothesis/
73
+
74
+ # Translations
75
+ *.mo
76
+ *.pot
77
+
78
+ # Django stuff:
79
+ *.log
80
+ local_settings.py
81
+
82
+ # Flask stuff:
83
+ instance/
84
+ .webassets-cache
85
+
86
+ # Scrapy stuff:
87
+ .scrapy
88
+
89
+ # Sphinx documentation
90
+ docs/_build/
91
+
92
+ # PyBuilder
93
+ target/
94
+
95
+ # Jupyter Notebook
96
+ .ipynb_checkpoints
97
+
98
+ # pyenv
99
+ .python-version
100
+
101
+ # celery beat schedule file
102
+ celerybeat-schedule
103
+
104
+ # SageMath parsed files
105
+ *.sage.py
106
+
107
+ # dotenv
108
+ .env
109
+
110
+ # virtualenv
111
+ .venv*
112
+ venv*/
113
+ ENV*/
114
+
115
+ # Spyder project settings
116
+ .spyderproject
117
+ .spyproject
118
+
119
+ # Rope project settings
120
+ .ropeproject
121
+
122
+ # mkdocs documentation
123
+ /site
124
+
125
+ # mypy
126
+ .mypy_cache/
127
+
128
+
129
+ # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
130
+
131
+ # General
132
+ .DS_Store
133
+ .AppleDouble
134
+ .LSOverride
135
+
136
+ # Icon must end with two \r
137
+ Icon
138
+ Icon?
139
+
140
+ # Thumbnails
141
+ ._*
142
+
143
+ # Files that might appear in the root of a volume
144
+ .DocumentRevisions-V100
145
+ .fseventsd
146
+ .Spotlight-V100
147
+ .TemporaryItems
148
+ .Trashes
149
+ .VolumeIcon.icns
150
+ .com.apple.timemachine.donotpresent
151
+
152
+ # Directories potentially created on remote AFP share
153
+ .AppleDB
154
+ .AppleDesktop
155
+ Network Trash Folder
156
+ Temporary Items
157
+ .apdisk
158
+
159
+
160
+ # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
161
+ # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
162
+ # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
163
+
164
+ # User-specific stuff:
165
+ .idea/*
166
+ .idea/**/workspace.xml
167
+ .idea/**/tasks.xml
168
+ .idea/dictionaries
169
+ .html # Bokeh Plots
170
+ .pg # TensorFlow Frozen Graphs
171
+ .avi # videos
172
+
173
+ # Sensitive or high-churn files:
174
+ .idea/**/dataSources/
175
+ .idea/**/dataSources.ids
176
+ .idea/**/dataSources.local.xml
177
+ .idea/**/sqlDataSources.xml
178
+ .idea/**/dynamic.xml
179
+ .idea/**/uiDesigner.xml
180
+
181
+ # Gradle:
182
+ .idea/**/gradle.xml
183
+ .idea/**/libraries
184
+
185
+ # CMake
186
+ cmake-build-debug/
187
+ cmake-build-release/
188
+
189
+ # Mongo Explorer plugin:
190
+ .idea/**/mongoSettings.xml
191
+
192
+ ## File-based project format:
193
+ *.iws
194
+
195
+ ## Plugin-specific files:
196
+
197
+ # IntelliJ
198
+ out/
199
+
200
+ # mpeltonen/sbt-idea plugin
201
+ .idea_modules/
202
+
203
+ # JIRA plugin
204
+ atlassian-ide-plugin.xml
205
+
206
+ # Cursive Clojure plugin
207
+ .idea/replstate.xml
208
+
209
+ # Crashlytics plugin (for Android Studio and IntelliJ)
210
+ com_crashlytics_export_strings.xml
211
+ crashlytics.properties
212
+ crashlytics-build.properties
213
+ fabric.properties
.gitattributes CHANGED
@@ -1,35 +1,4 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ # this drop notebooks from GitHub language stats
2
+ *.ipynb linguist-vendored
3
+ runs/detect/exp14/Rishikesh_Satsangs_Aakash_Ganga_Audience_Long_shot_from_the_back-scaled.jpeg filter=lfs diff=lfs merge=lfs -text
4
+ weights/crowdhuman_yolov5m.pt filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.github/ISSUE_TEMPLATE/bug-report.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "🐛 Bug report"
3
+ about: Create a report to help us improve
4
+ title: ''
5
+ labels: bug
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, otherwise it is non-actionable, and we can not help you:
11
+ - **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo
12
+ - **Common dataset**: coco.yaml or coco128.yaml
13
+ - **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments
14
+
15
+ If this is a custom dataset/training question you **must include** your `train*.jpg`, `test*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`.
16
+
17
+
18
+ ## 🐛 Bug
19
+ A clear and concise description of what the bug is.
20
+
21
+
22
+ ## To Reproduce (REQUIRED)
23
+
24
+ Input:
25
+ ```
26
+ import torch
27
+
28
+ a = torch.tensor([5])
29
+ c = a / 0
30
+ ```
31
+
32
+ Output:
33
+ ```
34
+ Traceback (most recent call last):
35
+ File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code
36
+ exec(code_obj, self.user_global_ns, self.user_ns)
37
+ File "<ipython-input-5-be04c762b799>", line 5, in <module>
38
+ c = a / 0
39
+ RuntimeError: ZeroDivisionError
40
+ ```
41
+
42
+
43
+ ## Expected behavior
44
+ A clear and concise description of what you expected to happen.
45
+
46
+
47
+ ## Environment
48
+ If applicable, add screenshots to help explain your problem.
49
+
50
+ - OS: [e.g. Ubuntu]
51
+ - GPU [e.g. 2080 Ti]
52
+
53
+
54
+ ## Additional context
55
+ Add any other context about the problem here.
.github/ISSUE_TEMPLATE/feature-request.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "🚀 Feature request"
3
+ about: Suggest an idea for this project
4
+ title: ''
5
+ labels: enhancement
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ ## 🚀 Feature
11
+ <!-- A clear and concise description of the feature proposal -->
12
+
13
+ ## Motivation
14
+
15
+ <!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
16
+
17
+ ## Pitch
18
+
19
+ <!-- A clear and concise description of what you want to happen. -->
20
+
21
+ ## Alternatives
22
+
23
+ <!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->
24
+
25
+ ## Additional context
26
+
27
+ <!-- Add any other context or screenshots about the feature request here. -->
.github/ISSUE_TEMPLATE/question.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "❓Question"
3
+ about: Ask a general question
4
+ title: ''
5
+ labels: question
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ ## ❔Question
11
+
12
+
13
+ ## Additional context
.github/dependabot.yml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 2
2
+ updates:
3
+ - package-ecosystem: pip
4
+ directory: "/"
5
+ schedule:
6
+ interval: weekly
7
+ time: "04:00"
8
+ open-pull-requests-limit: 10
9
+ reviewers:
10
+ - glenn-jocher
11
+ labels:
12
+ - dependencies
.github/workflows/ci-testing.yml ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI CPU testing
2
+
3
+ on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows
4
+ push:
5
+ branches: [ master ]
6
+ pull_request:
7
+ # The branches below must be a subset of the branches above
8
+ branches: [ master ]
9
+ schedule:
10
+ - cron: '0 0 * * *' # Runs at 00:00 UTC every day
11
+
12
+ jobs:
13
+ cpu-tests:
14
+
15
+ runs-on: ${{ matrix.os }}
16
+ strategy:
17
+ fail-fast: false
18
+ matrix:
19
+ os: [ubuntu-latest, macos-latest, windows-latest]
20
+ python-version: [3.8]
21
+ model: ['yolov5s'] # models to test
22
+
23
+ # Timeout: https://stackoverflow.com/a/59076067/4521646
24
+ timeout-minutes: 50
25
+ steps:
26
+ - uses: actions/checkout@v2
27
+ - name: Set up Python ${{ matrix.python-version }}
28
+ uses: actions/setup-python@v2
29
+ with:
30
+ python-version: ${{ matrix.python-version }}
31
+
32
+ # Note: This uses an internal pip API and may not always work
33
+ # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow
34
+ - name: Get pip cache
35
+ id: pip-cache
36
+ run: |
37
+ python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)"
38
+
39
+ - name: Cache pip
40
+ uses: actions/cache@v1
41
+ with:
42
+ path: ${{ steps.pip-cache.outputs.dir }}
43
+ key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }}
44
+ restore-keys: |
45
+ ${{ runner.os }}-${{ matrix.python-version }}-pip-
46
+
47
+ - name: Install dependencies
48
+ run: |
49
+ python -m pip install --upgrade pip
50
+ pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html
51
+ pip install -q onnx
52
+ python --version
53
+ pip --version
54
+ pip list
55
+ shell: bash
56
+
57
+ - name: Download data
58
+ run: |
59
+ # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
60
+ # unzip -q tmp.zip -d ../
61
+ # rm tmp.zip
62
+
63
+ - name: Tests workflow
64
+ run: |
65
+ # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories
66
+ di=cpu # inference devices # define device
67
+
68
+ # train
69
+ python train.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di
70
+ # detect
71
+ python detect.py --weights weights/${{ matrix.model }}.pt --device $di
72
+ python detect.py --weights runs/train/exp/weights/last.pt --device $di
73
+ # test
74
+ python test.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --device $di
75
+ python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di
76
+
77
+ python hubconf.py # hub
78
+ python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect
79
+ python models/export.py --img 128 --batch 1 --weights weights/${{ matrix.model }}.pt # export
80
+ shell: bash
.github/workflows/codeql-analysis.yml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
2
+ # https://github.com/github/codeql-action
3
+
4
+ name: "CodeQL"
5
+
6
+ on:
7
+ schedule:
8
+ - cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month
9
+
10
+ jobs:
11
+ analyze:
12
+ name: Analyze
13
+ runs-on: ubuntu-latest
14
+
15
+ strategy:
16
+ fail-fast: false
17
+ matrix:
18
+ language: [ 'python' ]
19
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
20
+ # Learn more:
21
+ # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
22
+
23
+ steps:
24
+ - name: Checkout repository
25
+ uses: actions/checkout@v2
26
+
27
+ # Initializes the CodeQL tools for scanning.
28
+ - name: Initialize CodeQL
29
+ uses: github/codeql-action/init@v1
30
+ with:
31
+ languages: ${{ matrix.language }}
32
+ # If you wish to specify custom queries, you can do so here or in a config file.
33
+ # By default, queries listed here will override any specified in a config file.
34
+ # Prefix the list here with "+" to use these queries and those in the config file.
35
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
36
+
37
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
38
+ # If this step fails, then you should remove it and run the build manually (see below)
39
+ - name: Autobuild
40
+ uses: github/codeql-action/autobuild@v1
41
+
42
+ # ℹ️ Command-line programs to run using the OS shell.
43
+ # 📚 https://git.io/JvXDl
44
+
45
+ # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
46
+ # and modify them (or add more) to build your code if your project
47
+ # uses a compiled language
48
+
49
+ #- run: |
50
+ # make bootstrap
51
+ # make release
52
+
53
+ - name: Perform CodeQL Analysis
54
+ uses: github/codeql-action/analyze@v1
.github/workflows/greetings.yml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Greetings
2
+
3
+ on: [pull_request_target, issues]
4
+
5
+ jobs:
6
+ greeting:
7
+ runs-on: ubuntu-latest
8
+ steps:
9
+ - uses: actions/first-interaction@v1
10
+ with:
11
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
12
+ pr-message: |
13
+ 👋 Hello @${{ github.actor }}, thank you for submitting a 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to:
14
+ - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch:
15
+ ```bash
16
+ git remote add upstream https://github.com/ultralytics/yolov5.git
17
+ git fetch upstream
18
+ git checkout feature # <----- replace 'feature' with local branch name
19
+ git rebase upstream/master
20
+ git push -u origin -f
21
+ ```
22
+ - ✅ Verify all Continuous Integration (CI) **checks are passing**.
23
+ - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee
24
+
25
+ issue-message: |
26
+ 👋 Hello @${{ github.actor }}, thank you for your interest in 🚀 YOLOv5! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607).
27
+
28
+ If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you.
29
+
30
+ If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available.
31
+
32
+ For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at [email protected].
33
+
34
+ ## Requirements
35
+
36
+ Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run:
37
+ ```bash
38
+ $ pip install -r requirements.txt
39
+ ```
40
+
41
+ ## Environments
42
+
43
+ YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):
44
+
45
+ - **Google Colab and Kaggle** notebooks with free GPU: <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
46
+ - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)
47
+ - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)
48
+ - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
49
+
50
+
51
+ ## Status
52
+
53
+ ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)
54
+
55
+ If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.
56
+
.github/workflows/rebase.yml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Automatic Rebase
2
+ # https://github.com/marketplace/actions/automatic-rebase
3
+
4
+ on:
5
+ issue_comment:
6
+ types: [created]
7
+
8
+ jobs:
9
+ rebase:
10
+ name: Rebase
11
+ if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase')
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - name: Checkout the latest code
15
+ uses: actions/checkout@v2
16
+ with:
17
+ fetch-depth: 0
18
+ - name: Automatic Rebase
19
+ uses: cirrus-actions/[email protected]
20
+ env:
21
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
.github/workflows/stale.yml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Close stale issues
2
+ on:
3
+ schedule:
4
+ - cron: "0 0 * * *"
5
+
6
+ jobs:
7
+ stale:
8
+ runs-on: ubuntu-latest
9
+ steps:
10
+ - uses: actions/stale@v3
11
+ with:
12
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
13
+ stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.'
14
+ stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.'
15
+ days-before-stale: 30
16
+ days-before-close: 5
17
+ exempt-issue-labels: 'documentation,tutorial'
18
+ operations-per-run: 100 # The maximum number of operations per run, used to control rate limiting.
.gitignore ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
2
+ *.jpg
3
+ *.jpeg
4
+ *.png
5
+ *.bmp
6
+ *.tif
7
+ *.tiff
8
+ *.heic
9
+ *.JPG
10
+ *.JPEG
11
+ *.PNG
12
+ *.BMP
13
+ *.TIF
14
+ *.TIFF
15
+ *.HEIC
16
+ *.mp4
17
+ *.mov
18
+ *.MOV
19
+ *.avi
20
+ *.data
21
+ *.json
22
+
23
+ *.cfg
24
+ !cfg/yolov3*.cfg
25
+
26
+ storage.googleapis.com
27
+ data/*
28
+ !data/images/zidane.jpg
29
+ !data/images/bus.jpg
30
+ !data/coco.names
31
+ !data/coco_paper.names
32
+ !data/coco.data
33
+ !data/coco_*.data
34
+ !data/coco_*.txt
35
+ !data/trainvalno5k.shapes
36
+ !data/*.sh
37
+
38
+ pycocotools/*
39
+ results*.txt
40
+ gcp_test*.sh
41
+
42
+ # Datasets -------------------------------------------------------------------------------------------------------------
43
+ coco/
44
+ coco128/
45
+ VOC/
46
+
47
+ # MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
48
+ *.m~
49
+ *.mat
50
+ !targets*.mat
51
+
52
+ # Neural Network weights -----------------------------------------------------------------------------------------------
53
+ *.onnx
54
+ *.mlmodel
55
+ *.torchscript
56
+ darknet53.conv.74
57
+ yolov3-tiny.conv.15
58
+
59
+ # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
60
+ # Byte-compiled / optimized / DLL files
61
+ __pycache__/
62
+ *.py[cod]
63
+ *$py.class
64
+
65
+ # C extensions
66
+ *.so
67
+
68
+ # Distribution / packaging
69
+ .Python
70
+ env/
71
+ build/
72
+ develop-eggs/
73
+ dist/
74
+ downloads/
75
+ eggs/
76
+ .eggs/
77
+ lib/
78
+ lib64/
79
+ parts/
80
+ sdist/
81
+ var/
82
+ wheels/
83
+ *.egg-info/
84
+ wandb/
85
+ .installed.cfg
86
+ *.egg
87
+
88
+
89
+ # PyInstaller
90
+ # Usually these files are written by a python script from a template
91
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
92
+ *.manifest
93
+ *.spec
94
+
95
+ # Installer logs
96
+ pip-log.txt
97
+ pip-delete-this-directory.txt
98
+
99
+ # Unit test / coverage reports
100
+ htmlcov/
101
+ .tox/
102
+ .coverage
103
+ .coverage.*
104
+ .cache
105
+ nosetests.xml
106
+ coverage.xml
107
+ *.cover
108
+ .hypothesis/
109
+
110
+ # Translations
111
+ *.mo
112
+ *.pot
113
+
114
+ # Django stuff:
115
+ *.log
116
+ local_settings.py
117
+
118
+ # Flask stuff:
119
+ instance/
120
+ .webassets-cache
121
+
122
+ # Scrapy stuff:
123
+ .scrapy
124
+
125
+ # Sphinx documentation
126
+ docs/_build/
127
+
128
+ # PyBuilder
129
+ target/
130
+
131
+ # Jupyter Notebook
132
+ .ipynb_checkpoints
133
+
134
+ # pyenv
135
+ .python-version
136
+
137
+ # celery beat schedule file
138
+ celerybeat-schedule
139
+
140
+ # SageMath parsed files
141
+ *.sage.py
142
+
143
+ # dotenv
144
+ .env
145
+
146
+ # virtualenv
147
+ .venv*
148
+ venv*/
149
+ ENV*/
150
+
151
+ # Spyder project settings
152
+ .spyderproject
153
+ .spyproject
154
+
155
+ # Rope project settings
156
+ .ropeproject
157
+
158
+ # mkdocs documentation
159
+ /site
160
+
161
+ # mypy
162
+ .mypy_cache/
163
+
164
+
165
+ # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
166
+
167
+ # General
168
+ .DS_Store
169
+ .AppleDouble
170
+ .LSOverride
171
+
172
+ # Icon must end with two \r
173
+ Icon
174
+ Icon?
175
+
176
+ # Thumbnails
177
+ ._*
178
+
179
+ # Files that might appear in the root of a volume
180
+ .DocumentRevisions-V100
181
+ .fseventsd
182
+ .Spotlight-V100
183
+ .TemporaryItems
184
+ .Trashes
185
+ .VolumeIcon.icns
186
+ .com.apple.timemachine.donotpresent
187
+
188
+ # Directories potentially created on remote AFP share
189
+ .AppleDB
190
+ .AppleDesktop
191
+ Network Trash Folder
192
+ Temporary Items
193
+ .apdisk
194
+
195
+
196
+ # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
197
+ # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
198
+ # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
199
+
200
+ # User-specific stuff:
201
+ .idea/*
202
+ .idea/**/workspace.xml
203
+ .idea/**/tasks.xml
204
+ .idea/dictionaries
205
+ .html # Bokeh Plots
206
+ .pg # TensorFlow Frozen Graphs
207
+ .avi # videos
208
+
209
+ # Sensitive or high-churn files:
210
+ .idea/**/dataSources/
211
+ .idea/**/dataSources.ids
212
+ .idea/**/dataSources.local.xml
213
+ .idea/**/sqlDataSources.xml
214
+ .idea/**/dynamic.xml
215
+ .idea/**/uiDesigner.xml
216
+
217
+ # Gradle:
218
+ .idea/**/gradle.xml
219
+ .idea/**/libraries
220
+
221
+ # CMake
222
+ cmake-build-debug/
223
+ cmake-build-release/
224
+
225
+ # Mongo Explorer plugin:
226
+ .idea/**/mongoSettings.xml
227
+
228
+ ## File-based project format:
229
+ *.iws
230
+
231
+ ## Plugin-specific files:
232
+
233
+ # IntelliJ
234
+ out/
235
+
236
+ # mpeltonen/sbt-idea plugin
237
+ .idea_modules/
238
+
239
+ # JIRA plugin
240
+ atlassian-ide-plugin.xml
241
+
242
+ # Cursive Clojure plugin
243
+ .idea/replstate.xml
244
+
245
+ # Crashlytics plugin (for Android Studio and IntelliJ)
246
+ com_crashlytics_export_strings.xml
247
+ crashlytics.properties
248
+ crashlytics-build.properties
249
+ fabric.properties
Dockerfile ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
2
+ FROM nvcr.io/nvidia/pytorch:20.12-py3
3
+
4
+ # Install linux packages
5
+ RUN apt update && apt install -y zip screen libgl1-mesa-glx
6
+
7
+ # RUN apt-get install vim
8
+
9
+ # Install python dependencies
10
+ RUN python -m pip install --upgrade pip
11
+ COPY requirements.txt .
12
+ RUN pip install -r requirements.txt gsutil
13
+
14
+ # Create working directory
15
+ RUN mkdir -p /usr/src/app
16
+ WORKDIR /usr/src/app
17
+
18
+ # Copy contents
19
+ COPY . /usr/src/app
20
+
21
+ RUN git config --global --add safe.directory /usr/src/app
22
+
23
+ RUN git config --global credential.helper stores
24
+
25
+ # Copy weights
26
+ #RUN python3 -c "from models import *; \
27
+ #attempt_download('weights/yolov5s.pt'); \
28
+ #attempt_download('weights/yolov5m.pt'); \
29
+ #attempt_download('weights/yolov5l.pt')"
30
+
31
+
32
+ # --------------------------------------------------- Extras Below ---------------------------------------------------
33
+
34
+ # Build and Push
35
+ # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t
36
+ # for v in {300..303}; do t=ultralytics/coco:v$v && sudo docker build -t $t . && sudo docker push $t; done
37
+
38
+ # Pull and Run
39
+ # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
40
+
41
+ # Pull and Run with local directory access
42
+ # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t
43
+
44
+ # Kill all
45
+ # sudo docker kill $(sudo docker ps -q)
46
+
47
+ # Kill all image-based
48
+ # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
49
+
50
+ # Bash into running container
51
+ # sudo docker exec -it 5a9b5863d93d bash
52
+
53
+ # Bash into stopped container
54
+ # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash
55
+
56
+ # Send weights to GCP
57
+ # python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt
58
+
59
+ # Clean up
60
+ # docker system prune -a --volumes
LICENSE ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU GENERAL PUBLIC LICENSE
2
+ Version 3, 29 June 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU General Public License is a free, copyleft license for
11
+ software and other kinds of works.
12
+
13
+ The licenses for most software and other practical works are designed
14
+ to take away your freedom to share and change the works. By contrast,
15
+ the GNU General Public License is intended to guarantee your freedom to
16
+ share and change all versions of a program--to make sure it remains free
17
+ software for all its users. We, the Free Software Foundation, use the
18
+ GNU General Public License for most of our software; it applies also to
19
+ any other work released this way by its authors. You can apply it to
20
+ your programs, too.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ To protect your rights, we need to prevent others from denying you
30
+ these rights or asking you to surrender the rights. Therefore, you have
31
+ certain responsibilities if you distribute copies of the software, or if
32
+ you modify it: responsibilities to respect the freedom of others.
33
+
34
+ For example, if you distribute copies of such a program, whether
35
+ gratis or for a fee, you must pass on to the recipients the same
36
+ freedoms that you received. You must make sure that they, too, receive
37
+ or can get the source code. And you must show them these terms so they
38
+ know their rights.
39
+
40
+ Developers that use the GNU GPL protect your rights with two steps:
41
+ (1) assert copyright on the software, and (2) offer you this License
42
+ giving you legal permission to copy, distribute and/or modify it.
43
+
44
+ For the developers' and authors' protection, the GPL clearly explains
45
+ that there is no warranty for this free software. For both users' and
46
+ authors' sake, the GPL requires that modified versions be marked as
47
+ changed, so that their problems will not be attributed erroneously to
48
+ authors of previous versions.
49
+
50
+ Some devices are designed to deny users access to install or run
51
+ modified versions of the software inside them, although the manufacturer
52
+ can do so. This is fundamentally incompatible with the aim of
53
+ protecting users' freedom to change the software. The systematic
54
+ pattern of such abuse occurs in the area of products for individuals to
55
+ use, which is precisely where it is most unacceptable. Therefore, we
56
+ have designed this version of the GPL to prohibit the practice for those
57
+ products. If such problems arise substantially in other domains, we
58
+ stand ready to extend this provision to those domains in future versions
59
+ of the GPL, as needed to protect the freedom of users.
60
+
61
+ Finally, every program is threatened constantly by software patents.
62
+ States should not allow patents to restrict development and use of
63
+ software on general-purpose computers, but in those that do, we wish to
64
+ avoid the special danger that patents applied to a free program could
65
+ make it effectively proprietary. To prevent this, the GPL assures that
66
+ patents cannot be used to render the program non-free.
67
+
68
+ The precise terms and conditions for copying, distribution and
69
+ modification follow.
70
+
71
+ TERMS AND CONDITIONS
72
+
73
+ 0. Definitions.
74
+
75
+ "This License" refers to version 3 of the GNU General Public License.
76
+
77
+ "Copyright" also means copyright-like laws that apply to other kinds of
78
+ works, such as semiconductor masks.
79
+
80
+ "The Program" refers to any copyrightable work licensed under this
81
+ License. Each licensee is addressed as "you". "Licensees" and
82
+ "recipients" may be individuals or organizations.
83
+
84
+ To "modify" a work means to copy from or adapt all or part of the work
85
+ in a fashion requiring copyright permission, other than the making of an
86
+ exact copy. The resulting work is called a "modified version" of the
87
+ earlier work or a work "based on" the earlier work.
88
+
89
+ A "covered work" means either the unmodified Program or a work based
90
+ on the Program.
91
+
92
+ To "propagate" a work means to do anything with it that, without
93
+ permission, would make you directly or secondarily liable for
94
+ infringement under applicable copyright law, except executing it on a
95
+ computer or modifying a private copy. Propagation includes copying,
96
+ distribution (with or without modification), making available to the
97
+ public, and in some countries other activities as well.
98
+
99
+ To "convey" a work means any kind of propagation that enables other
100
+ parties to make or receive copies. Mere interaction with a user through
101
+ a computer network, with no transfer of a copy, is not conveying.
102
+
103
+ An interactive user interface displays "Appropriate Legal Notices"
104
+ to the extent that it includes a convenient and prominently visible
105
+ feature that (1) displays an appropriate copyright notice, and (2)
106
+ tells the user that there is no warranty for the work (except to the
107
+ extent that warranties are provided), that licensees may convey the
108
+ work under this License, and how to view a copy of this License. If
109
+ the interface presents a list of user commands or options, such as a
110
+ menu, a prominent item in the list meets this criterion.
111
+
112
+ 1. Source Code.
113
+
114
+ The "source code" for a work means the preferred form of the work
115
+ for making modifications to it. "Object code" means any non-source
116
+ form of a work.
117
+
118
+ A "Standard Interface" means an interface that either is an official
119
+ standard defined by a recognized standards body, or, in the case of
120
+ interfaces specified for a particular programming language, one that
121
+ is widely used among developers working in that language.
122
+
123
+ The "System Libraries" of an executable work include anything, other
124
+ than the work as a whole, that (a) is included in the normal form of
125
+ packaging a Major Component, but which is not part of that Major
126
+ Component, and (b) serves only to enable use of the work with that
127
+ Major Component, or to implement a Standard Interface for which an
128
+ implementation is available to the public in source code form. A
129
+ "Major Component", in this context, means a major essential component
130
+ (kernel, window system, and so on) of the specific operating system
131
+ (if any) on which the executable work runs, or a compiler used to
132
+ produce the work, or an object code interpreter used to run it.
133
+
134
+ The "Corresponding Source" for a work in object code form means all
135
+ the source code needed to generate, install, and (for an executable
136
+ work) run the object code and to modify the work, including scripts to
137
+ control those activities. However, it does not include the work's
138
+ System Libraries, or general-purpose tools or generally available free
139
+ programs which are used unmodified in performing those activities but
140
+ which are not part of the work. For example, Corresponding Source
141
+ includes interface definition files associated with source files for
142
+ the work, and the source code for shared libraries and dynamically
143
+ linked subprograms that the work is specifically designed to require,
144
+ such as by intimate data communication or control flow between those
145
+ subprograms and other parts of the work.
146
+
147
+ The Corresponding Source need not include anything that users
148
+ can regenerate automatically from other parts of the Corresponding
149
+ Source.
150
+
151
+ The Corresponding Source for a work in source code form is that
152
+ same work.
153
+
154
+ 2. Basic Permissions.
155
+
156
+ All rights granted under this License are granted for the term of
157
+ copyright on the Program, and are irrevocable provided the stated
158
+ conditions are met. This License explicitly affirms your unlimited
159
+ permission to run the unmodified Program. The output from running a
160
+ covered work is covered by this License only if the output, given its
161
+ content, constitutes a covered work. This License acknowledges your
162
+ rights of fair use or other equivalent, as provided by copyright law.
163
+
164
+ You may make, run and propagate covered works that you do not
165
+ convey, without conditions so long as your license otherwise remains
166
+ in force. You may convey covered works to others for the sole purpose
167
+ of having them make modifications exclusively for you, or provide you
168
+ with facilities for running those works, provided that you comply with
169
+ the terms of this License in conveying all material for which you do
170
+ not control copyright. Those thus making or running the covered works
171
+ for you must do so exclusively on your behalf, under your direction
172
+ and control, on terms that prohibit them from making any copies of
173
+ your copyrighted material outside their relationship with you.
174
+
175
+ Conveying under any other circumstances is permitted solely under
176
+ the conditions stated below. Sublicensing is not allowed; section 10
177
+ makes it unnecessary.
178
+
179
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180
+
181
+ No covered work shall be deemed part of an effective technological
182
+ measure under any applicable law fulfilling obligations under article
183
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184
+ similar laws prohibiting or restricting circumvention of such
185
+ measures.
186
+
187
+ When you convey a covered work, you waive any legal power to forbid
188
+ circumvention of technological measures to the extent such circumvention
189
+ is effected by exercising rights under this License with respect to
190
+ the covered work, and you disclaim any intention to limit operation or
191
+ modification of the work as a means of enforcing, against the work's
192
+ users, your or third parties' legal rights to forbid circumvention of
193
+ technological measures.
194
+
195
+ 4. Conveying Verbatim Copies.
196
+
197
+ You may convey verbatim copies of the Program's source code as you
198
+ receive it, in any medium, provided that you conspicuously and
199
+ appropriately publish on each copy an appropriate copyright notice;
200
+ keep intact all notices stating that this License and any
201
+ non-permissive terms added in accord with section 7 apply to the code;
202
+ keep intact all notices of the absence of any warranty; and give all
203
+ recipients a copy of this License along with the Program.
204
+
205
+ You may charge any price or no price for each copy that you convey,
206
+ and you may offer support or warranty protection for a fee.
207
+
208
+ 5. Conveying Modified Source Versions.
209
+
210
+ You may convey a work based on the Program, or the modifications to
211
+ produce it from the Program, in the form of source code under the
212
+ terms of section 4, provided that you also meet all of these conditions:
213
+
214
+ a) The work must carry prominent notices stating that you modified
215
+ it, and giving a relevant date.
216
+
217
+ b) The work must carry prominent notices stating that it is
218
+ released under this License and any conditions added under section
219
+ 7. This requirement modifies the requirement in section 4 to
220
+ "keep intact all notices".
221
+
222
+ c) You must license the entire work, as a whole, under this
223
+ License to anyone who comes into possession of a copy. This
224
+ License will therefore apply, along with any applicable section 7
225
+ additional terms, to the whole of the work, and all its parts,
226
+ regardless of how they are packaged. This License gives no
227
+ permission to license the work in any other way, but it does not
228
+ invalidate such permission if you have separately received it.
229
+
230
+ d) If the work has interactive user interfaces, each must display
231
+ Appropriate Legal Notices; however, if the Program has interactive
232
+ interfaces that do not display Appropriate Legal Notices, your
233
+ work need not make them do so.
234
+
235
+ A compilation of a covered work with other separate and independent
236
+ works, which are not by their nature extensions of the covered work,
237
+ and which are not combined with it such as to form a larger program,
238
+ in or on a volume of a storage or distribution medium, is called an
239
+ "aggregate" if the compilation and its resulting copyright are not
240
+ used to limit the access or legal rights of the compilation's users
241
+ beyond what the individual works permit. Inclusion of a covered work
242
+ in an aggregate does not cause this License to apply to the other
243
+ parts of the aggregate.
244
+
245
+ 6. Conveying Non-Source Forms.
246
+
247
+ You may convey a covered work in object code form under the terms
248
+ of sections 4 and 5, provided that you also convey the
249
+ machine-readable Corresponding Source under the terms of this License,
250
+ in one of these ways:
251
+
252
+ a) Convey the object code in, or embodied in, a physical product
253
+ (including a physical distribution medium), accompanied by the
254
+ Corresponding Source fixed on a durable physical medium
255
+ customarily used for software interchange.
256
+
257
+ b) Convey the object code in, or embodied in, a physical product
258
+ (including a physical distribution medium), accompanied by a
259
+ written offer, valid for at least three years and valid for as
260
+ long as you offer spare parts or customer support for that product
261
+ model, to give anyone who possesses the object code either (1) a
262
+ copy of the Corresponding Source for all the software in the
263
+ product that is covered by this License, on a durable physical
264
+ medium customarily used for software interchange, for a price no
265
+ more than your reasonable cost of physically performing this
266
+ conveying of source, or (2) access to copy the
267
+ Corresponding Source from a network server at no charge.
268
+
269
+ c) Convey individual copies of the object code with a copy of the
270
+ written offer to provide the Corresponding Source. This
271
+ alternative is allowed only occasionally and noncommercially, and
272
+ only if you received the object code with such an offer, in accord
273
+ with subsection 6b.
274
+
275
+ d) Convey the object code by offering access from a designated
276
+ place (gratis or for a charge), and offer equivalent access to the
277
+ Corresponding Source in the same way through the same place at no
278
+ further charge. You need not require recipients to copy the
279
+ Corresponding Source along with the object code. If the place to
280
+ copy the object code is a network server, the Corresponding Source
281
+ may be on a different server (operated by you or a third party)
282
+ that supports equivalent copying facilities, provided you maintain
283
+ clear directions next to the object code saying where to find the
284
+ Corresponding Source. Regardless of what server hosts the
285
+ Corresponding Source, you remain obligated to ensure that it is
286
+ available for as long as needed to satisfy these requirements.
287
+
288
+ e) Convey the object code using peer-to-peer transmission, provided
289
+ you inform other peers where the object code and Corresponding
290
+ Source of the work are being offered to the general public at no
291
+ charge under subsection 6d.
292
+
293
+ A separable portion of the object code, whose source code is excluded
294
+ from the Corresponding Source as a System Library, need not be
295
+ included in conveying the object code work.
296
+
297
+ A "User Product" is either (1) a "consumer product", which means any
298
+ tangible personal property which is normally used for personal, family,
299
+ or household purposes, or (2) anything designed or sold for incorporation
300
+ into a dwelling. In determining whether a product is a consumer product,
301
+ doubtful cases shall be resolved in favor of coverage. For a particular
302
+ product received by a particular user, "normally used" refers to a
303
+ typical or common use of that class of product, regardless of the status
304
+ of the particular user or of the way in which the particular user
305
+ actually uses, or expects or is expected to use, the product. A product
306
+ is a consumer product regardless of whether the product has substantial
307
+ commercial, industrial or non-consumer uses, unless such uses represent
308
+ the only significant mode of use of the product.
309
+
310
+ "Installation Information" for a User Product means any methods,
311
+ procedures, authorization keys, or other information required to install
312
+ and execute modified versions of a covered work in that User Product from
313
+ a modified version of its Corresponding Source. The information must
314
+ suffice to ensure that the continued functioning of the modified object
315
+ code is in no case prevented or interfered with solely because
316
+ modification has been made.
317
+
318
+ If you convey an object code work under this section in, or with, or
319
+ specifically for use in, a User Product, and the conveying occurs as
320
+ part of a transaction in which the right of possession and use of the
321
+ User Product is transferred to the recipient in perpetuity or for a
322
+ fixed term (regardless of how the transaction is characterized), the
323
+ Corresponding Source conveyed under this section must be accompanied
324
+ by the Installation Information. But this requirement does not apply
325
+ if neither you nor any third party retains the ability to install
326
+ modified object code on the User Product (for example, the work has
327
+ been installed in ROM).
328
+
329
+ The requirement to provide Installation Information does not include a
330
+ requirement to continue to provide support service, warranty, or updates
331
+ for a work that has been modified or installed by the recipient, or for
332
+ the User Product in which it has been modified or installed. Access to a
333
+ network may be denied when the modification itself materially and
334
+ adversely affects the operation of the network or violates the rules and
335
+ protocols for communication across the network.
336
+
337
+ Corresponding Source conveyed, and Installation Information provided,
338
+ in accord with this section must be in a format that is publicly
339
+ documented (and with an implementation available to the public in
340
+ source code form), and must require no special password or key for
341
+ unpacking, reading or copying.
342
+
343
+ 7. Additional Terms.
344
+
345
+ "Additional permissions" are terms that supplement the terms of this
346
+ License by making exceptions from one or more of its conditions.
347
+ Additional permissions that are applicable to the entire Program shall
348
+ be treated as though they were included in this License, to the extent
349
+ that they are valid under applicable law. If additional permissions
350
+ apply only to part of the Program, that part may be used separately
351
+ under those permissions, but the entire Program remains governed by
352
+ this License without regard to the additional permissions.
353
+
354
+ When you convey a copy of a covered work, you may at your option
355
+ remove any additional permissions from that copy, or from any part of
356
+ it. (Additional permissions may be written to require their own
357
+ removal in certain cases when you modify the work.) You may place
358
+ additional permissions on material, added by you to a covered work,
359
+ for which you have or can give appropriate copyright permission.
360
+
361
+ Notwithstanding any other provision of this License, for material you
362
+ add to a covered work, you may (if authorized by the copyright holders of
363
+ that material) supplement the terms of this License with terms:
364
+
365
+ a) Disclaiming warranty or limiting liability differently from the
366
+ terms of sections 15 and 16 of this License; or
367
+
368
+ b) Requiring preservation of specified reasonable legal notices or
369
+ author attributions in that material or in the Appropriate Legal
370
+ Notices displayed by works containing it; or
371
+
372
+ c) Prohibiting misrepresentation of the origin of that material, or
373
+ requiring that modified versions of such material be marked in
374
+ reasonable ways as different from the original version; or
375
+
376
+ d) Limiting the use for publicity purposes of names of licensors or
377
+ authors of the material; or
378
+
379
+ e) Declining to grant rights under trademark law for use of some
380
+ trade names, trademarks, or service marks; or
381
+
382
+ f) Requiring indemnification of licensors and authors of that
383
+ material by anyone who conveys the material (or modified versions of
384
+ it) with contractual assumptions of liability to the recipient, for
385
+ any liability that these contractual assumptions directly impose on
386
+ those licensors and authors.
387
+
388
+ All other non-permissive additional terms are considered "further
389
+ restrictions" within the meaning of section 10. If the Program as you
390
+ received it, or any part of it, contains a notice stating that it is
391
+ governed by this License along with a term that is a further
392
+ restriction, you may remove that term. If a license document contains
393
+ a further restriction but permits relicensing or conveying under this
394
+ License, you may add to a covered work material governed by the terms
395
+ of that license document, provided that the further restriction does
396
+ not survive such relicensing or conveying.
397
+
398
+ If you add terms to a covered work in accord with this section, you
399
+ must place, in the relevant source files, a statement of the
400
+ additional terms that apply to those files, or a notice indicating
401
+ where to find the applicable terms.
402
+
403
+ Additional terms, permissive or non-permissive, may be stated in the
404
+ form of a separately written license, or stated as exceptions;
405
+ the above requirements apply either way.
406
+
407
+ 8. Termination.
408
+
409
+ You may not propagate or modify a covered work except as expressly
410
+ provided under this License. Any attempt otherwise to propagate or
411
+ modify it is void, and will automatically terminate your rights under
412
+ this License (including any patent licenses granted under the third
413
+ paragraph of section 11).
414
+
415
+ However, if you cease all violation of this License, then your
416
+ license from a particular copyright holder is reinstated (a)
417
+ provisionally, unless and until the copyright holder explicitly and
418
+ finally terminates your license, and (b) permanently, if the copyright
419
+ holder fails to notify you of the violation by some reasonable means
420
+ prior to 60 days after the cessation.
421
+
422
+ Moreover, your license from a particular copyright holder is
423
+ reinstated permanently if the copyright holder notifies you of the
424
+ violation by some reasonable means, this is the first time you have
425
+ received notice of violation of this License (for any work) from that
426
+ copyright holder, and you cure the violation prior to 30 days after
427
+ your receipt of the notice.
428
+
429
+ Termination of your rights under this section does not terminate the
430
+ licenses of parties who have received copies or rights from you under
431
+ this License. If your rights have been terminated and not permanently
432
+ reinstated, you do not qualify to receive new licenses for the same
433
+ material under section 10.
434
+
435
+ 9. Acceptance Not Required for Having Copies.
436
+
437
+ You are not required to accept this License in order to receive or
438
+ run a copy of the Program. Ancillary propagation of a covered work
439
+ occurring solely as a consequence of using peer-to-peer transmission
440
+ to receive a copy likewise does not require acceptance. However,
441
+ nothing other than this License grants you permission to propagate or
442
+ modify any covered work. These actions infringe copyright if you do
443
+ not accept this License. Therefore, by modifying or propagating a
444
+ covered work, you indicate your acceptance of this License to do so.
445
+
446
+ 10. Automatic Licensing of Downstream Recipients.
447
+
448
+ Each time you convey a covered work, the recipient automatically
449
+ receives a license from the original licensors, to run, modify and
450
+ propagate that work, subject to this License. You are not responsible
451
+ for enforcing compliance by third parties with this License.
452
+
453
+ An "entity transaction" is a transaction transferring control of an
454
+ organization, or substantially all assets of one, or subdividing an
455
+ organization, or merging organizations. If propagation of a covered
456
+ work results from an entity transaction, each party to that
457
+ transaction who receives a copy of the work also receives whatever
458
+ licenses to the work the party's predecessor in interest had or could
459
+ give under the previous paragraph, plus a right to possession of the
460
+ Corresponding Source of the work from the predecessor in interest, if
461
+ the predecessor has it or can get it with reasonable efforts.
462
+
463
+ You may not impose any further restrictions on the exercise of the
464
+ rights granted or affirmed under this License. For example, you may
465
+ not impose a license fee, royalty, or other charge for exercise of
466
+ rights granted under this License, and you may not initiate litigation
467
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
468
+ any patent claim is infringed by making, using, selling, offering for
469
+ sale, or importing the Program or any portion of it.
470
+
471
+ 11. Patents.
472
+
473
+ A "contributor" is a copyright holder who authorizes use under this
474
+ License of the Program or a work on which the Program is based. The
475
+ work thus licensed is called the contributor's "contributor version".
476
+
477
+ A contributor's "essential patent claims" are all patent claims
478
+ owned or controlled by the contributor, whether already acquired or
479
+ hereafter acquired, that would be infringed by some manner, permitted
480
+ by this License, of making, using, or selling its contributor version,
481
+ but do not include claims that would be infringed only as a
482
+ consequence of further modification of the contributor version. For
483
+ purposes of this definition, "control" includes the right to grant
484
+ patent sublicenses in a manner consistent with the requirements of
485
+ this License.
486
+
487
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
488
+ patent license under the contributor's essential patent claims, to
489
+ make, use, sell, offer for sale, import and otherwise run, modify and
490
+ propagate the contents of its contributor version.
491
+
492
+ In the following three paragraphs, a "patent license" is any express
493
+ agreement or commitment, however denominated, not to enforce a patent
494
+ (such as an express permission to practice a patent or covenant not to
495
+ sue for patent infringement). To "grant" such a patent license to a
496
+ party means to make such an agreement or commitment not to enforce a
497
+ patent against the party.
498
+
499
+ If you convey a covered work, knowingly relying on a patent license,
500
+ and the Corresponding Source of the work is not available for anyone
501
+ to copy, free of charge and under the terms of this License, through a
502
+ publicly available network server or other readily accessible means,
503
+ then you must either (1) cause the Corresponding Source to be so
504
+ available, or (2) arrange to deprive yourself of the benefit of the
505
+ patent license for this particular work, or (3) arrange, in a manner
506
+ consistent with the requirements of this License, to extend the patent
507
+ license to downstream recipients. "Knowingly relying" means you have
508
+ actual knowledge that, but for the patent license, your conveying the
509
+ covered work in a country, or your recipient's use of the covered work
510
+ in a country, would infringe one or more identifiable patents in that
511
+ country that you have reason to believe are valid.
512
+
513
+ If, pursuant to or in connection with a single transaction or
514
+ arrangement, you convey, or propagate by procuring conveyance of, a
515
+ covered work, and grant a patent license to some of the parties
516
+ receiving the covered work authorizing them to use, propagate, modify
517
+ or convey a specific copy of the covered work, then the patent license
518
+ you grant is automatically extended to all recipients of the covered
519
+ work and works based on it.
520
+
521
+ A patent license is "discriminatory" if it does not include within
522
+ the scope of its coverage, prohibits the exercise of, or is
523
+ conditioned on the non-exercise of one or more of the rights that are
524
+ specifically granted under this License. You may not convey a covered
525
+ work if you are a party to an arrangement with a third party that is
526
+ in the business of distributing software, under which you make payment
527
+ to the third party based on the extent of your activity of conveying
528
+ the work, and under which the third party grants, to any of the
529
+ parties who would receive the covered work from you, a discriminatory
530
+ patent license (a) in connection with copies of the covered work
531
+ conveyed by you (or copies made from those copies), or (b) primarily
532
+ for and in connection with specific products or compilations that
533
+ contain the covered work, unless you entered into that arrangement,
534
+ or that patent license was granted, prior to 28 March 2007.
535
+
536
+ Nothing in this License shall be construed as excluding or limiting
537
+ any implied license or other defenses to infringement that may
538
+ otherwise be available to you under applicable patent law.
539
+
540
+ 12. No Surrender of Others' Freedom.
541
+
542
+ If conditions are imposed on you (whether by court order, agreement or
543
+ otherwise) that contradict the conditions of this License, they do not
544
+ excuse you from the conditions of this License. If you cannot convey a
545
+ covered work so as to satisfy simultaneously your obligations under this
546
+ License and any other pertinent obligations, then as a consequence you may
547
+ not convey it at all. For example, if you agree to terms that obligate you
548
+ to collect a royalty for further conveying from those to whom you convey
549
+ the Program, the only way you could satisfy both those terms and this
550
+ License would be to refrain entirely from conveying the Program.
551
+
552
+ 13. Use with the GNU Affero General Public License.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU Affero General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the special requirements of the GNU Affero General Public License,
560
+ section 13, concerning interaction through a network will apply to the
561
+ combination as such.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU General Public License from time to time. Such new versions will
567
+ be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU General Public License for more details.
646
+
647
+ You should have received a copy of the GNU General Public License
648
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If the program does terminal interaction, make it output a short
653
+ notice like this when it starts in an interactive mode:
654
+
655
+ <program> Copyright (C) <year> <name of author>
656
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657
+ This is free software, and you are welcome to redistribute it
658
+ under certain conditions; type `show c' for details.
659
+
660
+ The hypothetical commands `show w' and `show c' should show the appropriate
661
+ parts of the General Public License. Of course, your program's commands
662
+ might be different; for a GUI interface, you would use an "about box".
663
+
664
+ You should also get your employer (if you work as a programmer) or school,
665
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
666
+ For more information on this, and how to apply and follow the GNU GPL, see
667
+ <http://www.gnu.org/licenses/>.
668
+
669
+ The GNU General Public License does not permit incorporating your program
670
+ into proprietary programs. If your program is a subroutine library, you
671
+ may consider it more useful to permit linking proprietary applications with
672
+ the library. If this is what you want to do, use the GNU Lesser General
673
+ Public License instead of this License. But first, please read
674
+ <http://www.gnu.org/philosophy/why-not-lgpl.html>.
README.md CHANGED
@@ -1,12 +1,42 @@
1
  ---
2
- title: People Counting
3
- emoji: 👀
4
- colorFrom: green
5
- colorTo: green
6
  sdk: gradio
7
  sdk_version: 3.41.2
8
- app_file: app.py
9
- pinned: false
10
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
+ title: people-counting
3
+ app_file: app.py
 
 
4
  sdk: gradio
5
  sdk_version: 3.41.2
 
 
6
  ---
7
+ ## Head & Person Detection Model
8
+
9
+ ### Download model trained on crowd human using yolov5(m) architeture
10
+ Download Link: [YOLOv5m-crowd-human](https://drive.google.com/file/d/1gglIwqxaH2iTvy6lZlXuAcMpd_U0GCUb/view?usp=sharing)
11
+
12
+
13
+ <br/>
14
+
15
+ **Output (Crowd Human Model)**
16
+
17
+ ![image](https://drive.google.com/uc?export=view&id=1ZOhDBRXj-Ra0vPL7iG6lrxCWAFhJTAti)
18
+
19
+ <br/>
20
+
21
+
22
+
23
+ ## Test
24
+
25
+ ```bash
26
+ $ python detect.py --weights crowdhuman_yolov5m.pt --source _test/ --view-img
27
+
28
+ ```
29
+
30
+
31
+ ## Test (Only Person Class)
32
+
33
+ ```bash
34
+ python3 detect.py --weights crowdhuman_yolov5m.pt --source _test/ --view-img --person
35
+ ```
36
+
37
+
38
+ ## Test (Only Heads)
39
 
40
+ ```bash
41
+ python3 detect.py --weights crowdhuman_yolov5m.pt --source _test/ --view-img --heads
42
+ ```
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import subprocess
3
+ import os
4
+
5
+ def crowd_counting(image):
6
+ # Save the uploaded image
7
+ image_path = "test/uploaded.jpg"
8
+ image.save(image_path)
9
+
10
+ # Run the crowd counting model using subprocess
11
+
12
+
13
+ command = "python3 detect.py --weights weights/crowdhuman_yolov5m.pt --source {} --head --project runs/output --exist-ok".format(image_path)
14
+ subprocess.run(command, shell=True)
15
+
16
+ # Read the total_boxes from the file
17
+ total_boxes_path = "runs/output/output.txt"
18
+ with open(total_boxes_path, "r") as f:
19
+ total_boxes = f.read()
20
+
21
+ # Get the output image
22
+ output_image = "runs/output/output.jpg"
23
+
24
+ # Return the output image and total_boxes
25
+ return output_image, total_boxes
26
+
27
+ # Define the input and output interfaces
28
+ inputs = gr.inputs.Image(type="pil", label="Input Image")
29
+ outputs = [gr.outputs.Image(type="pil", label="Output Image"), gr.outputs.Textbox(label="Total (Head) Count")]
30
+
31
+ # Define the title and description
32
+ title = "Crowd Counting"
33
+ description = "<div style='text-align: center;'>This is a crowd counting application that uses a deep learning model to count the number of heads in an image.<br>Made by HTX (Q3) </div>"
34
+
35
+ # Create the Gradio interface without the flag button
36
+ gradio_interface = gr.Interface(fn=crowd_counting, inputs=inputs, outputs=outputs, title=title, description=description, allow_flagging="never")
37
+
38
+ # Run the Gradio interface
39
+ gradio_interface.launch()
data/coco128.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # COCO 2017 dataset http://cocodataset.org - first 128 training images
2
+ # Train command: python train.py --data coco128.yaml
3
+ # Default dataset location is next to /yolov5:
4
+ # /parent_folder
5
+ # /coco128
6
+ # /yolov5
7
+
8
+
9
+ # download command/URL (optional)
10
+ download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
11
+
12
+ # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
13
+ train: ../coco128/images/train2017/ # 128 images
14
+ val: ../coco128/images/train2017/ # 128 images
15
+
16
+ # number of classes
17
+ nc: 80
18
+
19
+ # class names
20
+ names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
21
+ 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
22
+ 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
23
+ 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
24
+ 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
25
+ 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
26
+ 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
27
+ 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
28
+ 'hair drier', 'toothbrush' ]
data/crowdhuman.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ train: train.txt
2
+ val: val.txt
3
+ test: test.txt
4
+
5
+ nc: 2
6
+
7
+ # class names
8
+ names: ['person', 'head']
data/hyp.finetune.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hyperparameters for VOC finetuning
2
+ # python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50
3
+ # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
4
+
5
+
6
+ # Hyperparameter Evolution Results
7
+ # Generations: 306
8
+ # P R mAP.5 mAP.5:.95 box obj cls
9
+ # Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146
10
+
11
+ lr0: 0.0032
12
+ lrf: 0.12
13
+ momentum: 0.843
14
+ weight_decay: 0.00036
15
+ warmup_epochs: 2.0
16
+ warmup_momentum: 0.5
17
+ warmup_bias_lr: 0.05
18
+ box: 0.0296
19
+ cls: 0.243
20
+ cls_pw: 0.631
21
+ obj: 0.301
22
+ obj_pw: 0.911
23
+ iou_t: 0.2
24
+ anchor_t: 2.91
25
+ # anchors: 3.63
26
+ fl_gamma: 0.0
27
+ hsv_h: 0.0138
28
+ hsv_s: 0.664
29
+ hsv_v: 0.464
30
+ degrees: 0.373
31
+ translate: 0.245
32
+ scale: 0.898
33
+ shear: 0.602
34
+ perspective: 0.0
35
+ flipud: 0.00856
36
+ fliplr: 0.5
37
+ mosaic: 1.0
38
+ mixup: 0.243
data/hyp.scratch.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hyperparameters for COCO training from scratch
2
+ # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300
3
+ # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
4
+
5
+
6
+ lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7
+ lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf)
8
+ momentum: 0.937 # SGD momentum/Adam beta1
9
+ weight_decay: 0.0005 # optimizer weight decay 5e-4
10
+ warmup_epochs: 3.0 # warmup epochs (fractions ok)
11
+ warmup_momentum: 0.8 # warmup initial momentum
12
+ warmup_bias_lr: 0.1 # warmup initial bias lr
13
+ box: 0.05 # box loss gain
14
+ cls: 0.5 # cls loss gain
15
+ cls_pw: 1.0 # cls BCELoss positive_weight
16
+ obj: 1.0 # obj loss gain (scale with pixels)
17
+ obj_pw: 1.0 # obj BCELoss positive_weight
18
+ iou_t: 0.20 # IoU training threshold
19
+ anchor_t: 4.0 # anchor-multiple threshold
20
+ # anchors: 3 # anchors per output layer (0 to ignore)
21
+ fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22
+ hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23
+ hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24
+ hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25
+ degrees: 0.0 # image rotation (+/- deg)
26
+ translate: 0.1 # image translation (+/- fraction)
27
+ scale: 0.5 # image scale (+/- gain)
28
+ shear: 0.0 # image shear (+/- deg)
29
+ perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30
+ flipud: 0.0 # image flip up-down (probability)
31
+ fliplr: 0.5 # image flip left-right (probability)
32
+ mosaic: 1.0 # image mosaic (probability)
33
+ mixup: 0.0 # image mixup (probability)
data/images/bus.jpg ADDED
data/images/zidane.jpg ADDED
data/scripts/get_coco.sh ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # COCO 2017 dataset http://cocodataset.org
3
+ # Download command: bash data/scripts/get_coco.sh
4
+ # Train command: python train.py --data coco.yaml
5
+ # Default dataset location is next to /yolov5:
6
+ # /parent_folder
7
+ # /coco
8
+ # /yolov5
9
+
10
+ # Download/unzip labels
11
+ d='../' # unzip directory
12
+ url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
13
+ f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB
14
+ echo 'Downloading' $url$f ' ...'
15
+ curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
16
+
17
+ # Download/unzip images
18
+ d='../coco/images' # unzip directory
19
+ url=http://images.cocodataset.org/zips/
20
+ f1='train2017.zip' # 19G, 118k images
21
+ f2='val2017.zip' # 1G, 5k images
22
+ f3='test2017.zip' # 7G, 41k images (optional)
23
+ for f in $f1 $f2; do
24
+ echo 'Downloading' $url$f '...'
25
+ curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
26
+ done
27
+ wait # finish background tasks
data/scripts/get_voc.sh ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
3
+ # Download command: bash data/scripts/get_voc.sh
4
+ # Train command: python train.py --data voc.yaml
5
+ # Default dataset location is next to /yolov5:
6
+ # /parent_folder
7
+ # /VOC
8
+ # /yolov5
9
+
10
+ start=$(date +%s)
11
+ mkdir -p ../tmp
12
+ cd ../tmp/
13
+
14
+ # Download/unzip images and labels
15
+ d='.' # unzip directory
16
+ url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
17
+ f1=VOCtrainval_06-Nov-2007.zip # 446MB, 5012 images
18
+ f2=VOCtest_06-Nov-2007.zip # 438MB, 4953 images
19
+ f3=VOCtrainval_11-May-2012.zip # 1.95GB, 17126 images
20
+ for f in $f3 $f2 $f1; do
21
+ echo 'Downloading' $url$f '...'
22
+ curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
23
+ done
24
+ wait # finish background tasks
25
+
26
+ end=$(date +%s)
27
+ runtime=$((end - start))
28
+ echo "Completed in" $runtime "seconds"
29
+
30
+ echo "Splitting dataset..."
31
+ python3 - "$@" <<END
32
+ import xml.etree.ElementTree as ET
33
+ import pickle
34
+ import os
35
+ from os import listdir, getcwd
36
+ from os.path import join
37
+
38
+ sets=[('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
39
+
40
+ classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
41
+
42
+
43
+ def convert(size, box):
44
+ dw = 1./(size[0])
45
+ dh = 1./(size[1])
46
+ x = (box[0] + box[1])/2.0 - 1
47
+ y = (box[2] + box[3])/2.0 - 1
48
+ w = box[1] - box[0]
49
+ h = box[3] - box[2]
50
+ x = x*dw
51
+ w = w*dw
52
+ y = y*dh
53
+ h = h*dh
54
+ return (x,y,w,h)
55
+
56
+ def convert_annotation(year, image_id):
57
+ in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id))
58
+ out_file = open('VOCdevkit/VOC%s/labels/%s.txt'%(year, image_id), 'w')
59
+ tree=ET.parse(in_file)
60
+ root = tree.getroot()
61
+ size = root.find('size')
62
+ w = int(size.find('width').text)
63
+ h = int(size.find('height').text)
64
+
65
+ for obj in root.iter('object'):
66
+ difficult = obj.find('difficult').text
67
+ cls = obj.find('name').text
68
+ if cls not in classes or int(difficult)==1:
69
+ continue
70
+ cls_id = classes.index(cls)
71
+ xmlbox = obj.find('bndbox')
72
+ b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
73
+ bb = convert((w,h), b)
74
+ out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
75
+
76
+ wd = getcwd()
77
+
78
+ for year, image_set in sets:
79
+ if not os.path.exists('VOCdevkit/VOC%s/labels/'%(year)):
80
+ os.makedirs('VOCdevkit/VOC%s/labels/'%(year))
81
+ image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set)).read().strip().split()
82
+ list_file = open('%s_%s.txt'%(year, image_set), 'w')
83
+ for image_id in image_ids:
84
+ list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\n'%(wd, year, image_id))
85
+ convert_annotation(year, image_id)
86
+ list_file.close()
87
+
88
+ END
89
+
90
+ cat 2007_train.txt 2007_val.txt 2012_train.txt 2012_val.txt >train.txt
91
+ cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt
92
+
93
+ python3 - "$@" <<END
94
+
95
+ import shutil
96
+ import os
97
+ os.system('mkdir ../VOC/')
98
+ os.system('mkdir ../VOC/images')
99
+ os.system('mkdir ../VOC/images/train')
100
+ os.system('mkdir ../VOC/images/val')
101
+
102
+ os.system('mkdir ../VOC/labels')
103
+ os.system('mkdir ../VOC/labels/train')
104
+ os.system('mkdir ../VOC/labels/val')
105
+
106
+ import os
107
+ print(os.path.exists('../tmp/train.txt'))
108
+ f = open('../tmp/train.txt', 'r')
109
+ lines = f.readlines()
110
+
111
+ for line in lines:
112
+ line = "/".join(line.split('/')[-5:]).strip()
113
+ if (os.path.exists("../" + line)):
114
+ os.system("cp ../"+ line + " ../VOC/images/train")
115
+
116
+ line = line.replace('JPEGImages', 'labels')
117
+ line = line.replace('jpg', 'txt')
118
+ if (os.path.exists("../" + line)):
119
+ os.system("cp ../"+ line + " ../VOC/labels/train")
120
+
121
+
122
+ print(os.path.exists('../tmp/2007_test.txt'))
123
+ f = open('../tmp/2007_test.txt', 'r')
124
+ lines = f.readlines()
125
+
126
+ for line in lines:
127
+ line = "/".join(line.split('/')[-5:]).strip()
128
+ if (os.path.exists("../" + line)):
129
+ os.system("cp ../"+ line + " ../VOC/images/val")
130
+
131
+ line = line.replace('JPEGImages', 'labels')
132
+ line = line.replace('jpg', 'txt')
133
+ if (os.path.exists("../" + line)):
134
+ os.system("cp ../"+ line + " ../VOC/labels/val")
135
+
136
+ END
137
+
138
+ rm -rf ../tmp # remove temporary directory
139
+ echo "VOC download done."
data/voc.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
2
+ # Train command: python train.py --data voc.yaml
3
+ # Default dataset location is next to /yolov5:
4
+ # /parent_folder
5
+ # /VOC
6
+ # /yolov5
7
+
8
+
9
+ # download command/URL (optional)
10
+ download: bash data/scripts/get_voc.sh
11
+
12
+ # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
13
+ train: ../VOC/images/train/ # 16551 images
14
+ val: ../VOC/images/val/ # 4952 images
15
+
16
+ # number of classes
17
+ nc: 20
18
+
19
+ # class names
20
+ names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
21
+ 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ]
detect.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import time
3
+ from pathlib import Path
4
+
5
+ import cv2
6
+ import torch
7
+ import torch.backends.cudnn as cudnn
8
+ from numpy import random
9
+
10
+ from models.experimental import attempt_load
11
+ from utils.datasets import LoadStreams, LoadImages
12
+ from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
13
+ scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
14
+ from utils.plots import plot_one_box
15
+ from utils.torch_utils import select_device, load_classifier, time_synchronized
16
+ import os
17
+
18
+
19
+ import torch.nn as nn
20
+
21
+
22
+
23
+
24
+ def detect(save_img=False):
25
+ source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
26
+ webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
27
+ ('rtsp://', 'rtmp://', 'http://'))
28
+
29
+ # Directories
30
+ save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
31
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
32
+
33
+ # Initialize
34
+ set_logging()
35
+ device = select_device(opt.device)
36
+ half = device.type != 'cpu' # half precision only supported on CUDA
37
+
38
+ # Load model
39
+ model = attempt_load(weights, map_location=device) # load FP32 model
40
+ for m in model.modules():
41
+ if isinstance(m, nn.Upsample):
42
+ m.recompute_scale_factor = None
43
+ stride = int(model.stride.max()) # model stride
44
+ imgsz = check_img_size(imgsz, s=stride) # check img_size
45
+ if half:
46
+ model.half() # to FP16
47
+
48
+ # Second-stage classifier
49
+ classify = False
50
+ if classify:
51
+ modelc = load_classifier(name='resnet101', n=2) # initialize
52
+ modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
53
+
54
+ # Set Dataloader
55
+ vid_path, vid_writer = None, None
56
+ if webcam:
57
+ view_img = check_imshow()
58
+ cudnn.benchmark = True # set True to speed up constant image size inference
59
+ dataset = LoadStreams(source, img_size=imgsz, stride=stride)
60
+ else:
61
+ save_img = True
62
+ dataset = LoadImages(source, img_size=imgsz, stride=stride)
63
+
64
+ # Get names and colors
65
+ names = model.module.names if hasattr(model, 'module') else model.names
66
+ colors = [[255, 0, 0], [0, 255, 0]]
67
+
68
+ # Run inference
69
+ if device.type != 'cpu':
70
+ model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
71
+ t0 = time.time()
72
+ for path, img, im0s, vid_cap in dataset:
73
+ img = torch.from_numpy(img).to(device)
74
+ img = img.half() if half else img.float() # uint8 to fp16/32
75
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
76
+ if img.ndimension() == 3:
77
+ img = img.unsqueeze(0)
78
+
79
+ # Inference
80
+ t1 = time_synchronized()
81
+ pred = model(img, augment=opt.augment)[0]
82
+
83
+ # Apply NMS
84
+ pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
85
+ t2 = time_synchronized()
86
+
87
+ # Apply Classifier
88
+ if classify:
89
+ pred = apply_classifier(pred, modelc, img, im0s)
90
+
91
+ # Process detections
92
+ for i, det in enumerate(pred): # detections per image
93
+ if webcam: # batch_size >= 1
94
+ p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
95
+ else:
96
+ p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
97
+
98
+ p = Path(p) # to Path
99
+ save_path = str(save_dir / "output.jpg") # img.jpg
100
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
101
+ s += '%gx%g ' % img.shape[2:] # print string
102
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
103
+ if len(det):
104
+ # Rescale boxes from img_size to im0 size
105
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
106
+
107
+ total_heads = 0
108
+ # Print results
109
+ for c in det[:, -1].unique():
110
+ n = (det[:, -1] == c).sum() # detections per class
111
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
112
+
113
+ # Write results
114
+ for *xyxy, conf, cls in reversed(det):
115
+ if save_txt: # Write to file
116
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
117
+ line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
118
+ with open(txt_path + '.txt', 'a') as f:
119
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
120
+
121
+ if save_img or view_img: # Add bbox to image
122
+ label = f'{names[int(cls)]} {conf:.2f}'
123
+ if opt.heads or opt.person:
124
+ if 'head' in label and opt.heads:
125
+ plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=50)
126
+ total_heads += 1
127
+ if 'person' in label and opt.person:
128
+ plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=50)
129
+ else:
130
+ plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=10)
131
+
132
+ print("HERE")
133
+ print(total_heads)
134
+
135
+ with open('runs/output/output.txt', "w") as file:
136
+ file.write(str(total_heads)) # Write the string to the file
137
+
138
+ # Print time (inference + NMS)
139
+ print(f'{s}Done. ({t2 - t1:.3f}s)')
140
+
141
+ # Stream results
142
+ if view_img:
143
+ cv2.imshow(str(p), im0)
144
+ cv2.waitKey(0) # 1 millisecond
145
+
146
+ # Save results (image with detections)
147
+ if save_img:
148
+ if dataset.mode == 'image':
149
+ cv2.imwrite(save_path, im0)
150
+ else: # 'video'
151
+ if vid_path != save_path: # new video
152
+ vid_path = save_path
153
+ if isinstance(vid_writer, cv2.VideoWriter):
154
+ vid_writer.release() # release previous video writer
155
+
156
+ fourcc = 'mp4v' # output video codec
157
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
158
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
159
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
160
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
161
+ vid_writer.write(im0)
162
+
163
+ if save_txt or save_img:
164
+
165
+ print(f"Results saved to {save_dir}{s}")
166
+
167
+ print(f'Done. ({time.time() - t0:.3f}s)')
168
+
169
+
170
+ if __name__ == '__main__':
171
+ parser = argparse.ArgumentParser()
172
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
173
+ parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
174
+ parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
175
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
176
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
177
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
178
+ parser.add_argument('--view-img', action='store_true', help='display results')
179
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
180
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
181
+ parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
182
+ parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
183
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
184
+ parser.add_argument('--update', action='store_true', help='update all models')
185
+ parser.add_argument('--project', default='runs/detect', help='save results to project/name')
186
+ parser.add_argument('--name', default='', help='save results to project/name')
187
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
188
+ parser.add_argument('--person', action='store_true', help='displays only person')
189
+ parser.add_argument('--heads', action='store_true', help='displays only person')
190
+ opt = parser.parse_args()
191
+ print(opt)
192
+ check_requirements()
193
+
194
+ with torch.no_grad():
195
+ if opt.update: # update all models (to fix SourceChangeWarning)
196
+ for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
197
+ detect()
198
+ strip_optimizer(opt.weights)
199
+ else:
200
+ detect()
hubconf.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/
2
+
3
+ Usage:
4
+ import torch
5
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80)
6
+ """
7
+
8
+ from pathlib import Path
9
+
10
+ import torch
11
+
12
+ from models.yolo import Model
13
+ from utils.general import set_logging
14
+ from utils.google_utils import attempt_download
15
+
16
+ dependencies = ['torch', 'yaml']
17
+ set_logging()
18
+
19
+
20
+ def create(name, pretrained, channels, classes, autoshape):
21
+ """Creates a specified YOLOv5 model
22
+
23
+ Arguments:
24
+ name (str): name of model, i.e. 'yolov5s'
25
+ pretrained (bool): load pretrained weights into the model
26
+ channels (int): number of input channels
27
+ classes (int): number of model classes
28
+
29
+ Returns:
30
+ pytorch model
31
+ """
32
+ config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path
33
+ try:
34
+ model = Model(config, channels, classes)
35
+ if pretrained:
36
+ fname = f'{name}.pt' # checkpoint filename
37
+ attempt_download(fname) # download if not found locally
38
+ ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
39
+ state_dict = ckpt['model'].float().state_dict() # to FP32
40
+ state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
41
+ model.load_state_dict(state_dict, strict=False) # load
42
+ if len(ckpt['model'].names) == classes:
43
+ model.names = ckpt['model'].names # set class names attribute
44
+ if autoshape:
45
+ model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
46
+ return model
47
+
48
+ except Exception as e:
49
+ help_url = 'https://github.com/ultralytics/yolov5/issues/36'
50
+ s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url
51
+ raise Exception(s) from e
52
+
53
+
54
+ def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True):
55
+ """YOLOv5-small model from https://github.com/ultralytics/yolov5
56
+
57
+ Arguments:
58
+ pretrained (bool): load pretrained weights into the model, default=False
59
+ channels (int): number of input channels, default=3
60
+ classes (int): number of model classes, default=80
61
+
62
+ Returns:
63
+ pytorch model
64
+ """
65
+ return create('yolov5s', pretrained, channels, classes, autoshape)
66
+
67
+
68
+ def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True):
69
+ """YOLOv5-medium model from https://github.com/ultralytics/yolov5
70
+
71
+ Arguments:
72
+ pretrained (bool): load pretrained weights into the model, default=False
73
+ channels (int): number of input channels, default=3
74
+ classes (int): number of model classes, default=80
75
+
76
+ Returns:
77
+ pytorch model
78
+ """
79
+ return create('yolov5m', pretrained, channels, classes, autoshape)
80
+
81
+
82
+ def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True):
83
+ """YOLOv5-large model from https://github.com/ultralytics/yolov5
84
+
85
+ Arguments:
86
+ pretrained (bool): load pretrained weights into the model, default=False
87
+ channels (int): number of input channels, default=3
88
+ classes (int): number of model classes, default=80
89
+
90
+ Returns:
91
+ pytorch model
92
+ """
93
+ return create('yolov5l', pretrained, channels, classes, autoshape)
94
+
95
+
96
+ def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True):
97
+ """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5
98
+
99
+ Arguments:
100
+ pretrained (bool): load pretrained weights into the model, default=False
101
+ channels (int): number of input channels, default=3
102
+ classes (int): number of model classes, default=80
103
+
104
+ Returns:
105
+ pytorch model
106
+ """
107
+ return create('yolov5x', pretrained, channels, classes, autoshape)
108
+
109
+
110
+ def custom(path_or_model='path/to/model.pt', autoshape=True):
111
+ """YOLOv5-custom model from https://github.com/ultralytics/yolov5
112
+
113
+ Arguments (3 options):
114
+ path_or_model (str): 'path/to/model.pt'
115
+ path_or_model (dict): torch.load('path/to/model.pt')
116
+ path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
117
+
118
+ Returns:
119
+ pytorch model
120
+ """
121
+ model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint
122
+ if isinstance(model, dict):
123
+ model = model['model'] # load model
124
+
125
+ hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
126
+ hub_model.load_state_dict(model.float().state_dict()) # load state_dict
127
+ hub_model.names = model.names # class names
128
+ return hub_model.autoshape() if autoshape else hub_model
129
+
130
+
131
+ if __name__ == '__main__':
132
+ model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example
133
+ # model = custom(path_or_model='path/to/model.pt') # custom example
134
+
135
+ # Verify inference
136
+ import numpy as np
137
+ from PIL import Image
138
+
139
+ imgs = [Image.open('data/images/bus.jpg'), # PIL
140
+ 'data/images/zidane.jpg', # filename
141
+ 'https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', # URI
142
+ np.zeros((640, 480, 3))] # numpy
143
+
144
+ results = model(imgs) # batched inference
145
+ results.print()
146
+ results.save()
models/__init__.py ADDED
File without changes
models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (143 Bytes). View file
 
models/__pycache__/common.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
models/__pycache__/experimental.cpython-310.pyc ADDED
Binary file (5.64 kB). View file
 
models/__pycache__/yolo.cpython-310.pyc ADDED
Binary file (9.93 kB). View file
 
models/common.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file contains modules common to various models
2
+
3
+ import math
4
+ from pathlib import Path
5
+
6
+ import numpy as np
7
+ import requests
8
+ import torch
9
+ import torch.nn as nn
10
+ from PIL import Image
11
+
12
+ from utils.datasets import letterbox
13
+ from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
14
+ from utils.plots import color_list, plot_one_box
15
+
16
+
17
+ def autopad(k, p=None): # kernel, padding
18
+ # Pad to 'same'
19
+ if p is None:
20
+ p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
21
+ return p
22
+
23
+
24
+ def DWConv(c1, c2, k=1, s=1, act=True):
25
+ # Depthwise convolution
26
+ return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
27
+
28
+
29
+ class Conv(nn.Module):
30
+ # Standard convolution
31
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
32
+ super(Conv, self).__init__()
33
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
34
+ self.bn = nn.BatchNorm2d(c2)
35
+ self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
36
+
37
+ def forward(self, x):
38
+ return self.act(self.bn(self.conv(x)))
39
+
40
+ def fuseforward(self, x):
41
+ return self.act(self.conv(x))
42
+
43
+
44
+ class Bottleneck(nn.Module):
45
+ # Standard bottleneck
46
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
47
+ super(Bottleneck, self).__init__()
48
+ c_ = int(c2 * e) # hidden channels
49
+ self.cv1 = Conv(c1, c_, 1, 1)
50
+ self.cv2 = Conv(c_, c2, 3, 1, g=g)
51
+ self.add = shortcut and c1 == c2
52
+
53
+ def forward(self, x):
54
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
55
+
56
+
57
+ class BottleneckCSP(nn.Module):
58
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
59
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
60
+ super(BottleneckCSP, self).__init__()
61
+ c_ = int(c2 * e) # hidden channels
62
+ self.cv1 = Conv(c1, c_, 1, 1)
63
+ self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
64
+ self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
65
+ self.cv4 = Conv(2 * c_, c2, 1, 1)
66
+ self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
67
+ self.act = nn.LeakyReLU(0.1, inplace=True)
68
+ self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
69
+
70
+ def forward(self, x):
71
+ y1 = self.cv3(self.m(self.cv1(x)))
72
+ y2 = self.cv2(x)
73
+ return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
74
+
75
+
76
+ class C3(nn.Module):
77
+ # CSP Bottleneck with 3 convolutions
78
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
79
+ super(C3, self).__init__()
80
+ c_ = int(c2 * e) # hidden channels
81
+ self.cv1 = Conv(c1, c_, 1, 1)
82
+ self.cv2 = Conv(c1, c_, 1, 1)
83
+ self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
84
+ self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
85
+ # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
86
+
87
+ def forward(self, x):
88
+ return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
89
+
90
+
91
+ class SPP(nn.Module):
92
+ # Spatial pyramid pooling layer used in YOLOv3-SPP
93
+ def __init__(self, c1, c2, k=(5, 9, 13)):
94
+ super(SPP, self).__init__()
95
+ c_ = c1 // 2 # hidden channels
96
+ self.cv1 = Conv(c1, c_, 1, 1)
97
+ self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
98
+ self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
99
+
100
+ def forward(self, x):
101
+ x = self.cv1(x)
102
+ return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
103
+
104
+
105
+ class Focus(nn.Module):
106
+ # Focus wh information into c-space
107
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
108
+ super(Focus, self).__init__()
109
+ self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
110
+ # self.contract = Contract(gain=2)
111
+
112
+ def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
113
+ return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
114
+ # return self.conv(self.contract(x))
115
+
116
+
117
+ class Contract(nn.Module):
118
+ # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
119
+ def __init__(self, gain=2):
120
+ super().__init__()
121
+ self.gain = gain
122
+
123
+ def forward(self, x):
124
+ N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
125
+ s = self.gain
126
+ x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
127
+ x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
128
+ return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
129
+
130
+
131
+ class Expand(nn.Module):
132
+ # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
133
+ def __init__(self, gain=2):
134
+ super().__init__()
135
+ self.gain = gain
136
+
137
+ def forward(self, x):
138
+ N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
139
+ s = self.gain
140
+ x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
141
+ x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
142
+ return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
143
+
144
+
145
+ class Concat(nn.Module):
146
+ # Concatenate a list of tensors along dimension
147
+ def __init__(self, dimension=1):
148
+ super(Concat, self).__init__()
149
+ self.d = dimension
150
+
151
+ def forward(self, x):
152
+ return torch.cat(x, self.d)
153
+
154
+
155
+ class NMS(nn.Module):
156
+ # Non-Maximum Suppression (NMS) module
157
+ conf = 0.25 # confidence threshold
158
+ iou = 0.45 # IoU threshold
159
+ classes = None # (optional list) filter by class
160
+
161
+ def __init__(self):
162
+ super(NMS, self).__init__()
163
+
164
+ def forward(self, x):
165
+ return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
166
+
167
+
168
+ class autoShape(nn.Module):
169
+ # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
170
+ img_size = 640 # inference size (pixels)
171
+ conf = 0.25 # NMS confidence threshold
172
+ iou = 0.45 # NMS IoU threshold
173
+ classes = None # (optional list) filter by class
174
+
175
+ def __init__(self, model):
176
+ super(autoShape, self).__init__()
177
+ self.model = model.eval()
178
+
179
+ def autoshape(self):
180
+ print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
181
+ return self
182
+
183
+ def forward(self, imgs, size=640, augment=False, profile=False):
184
+ # Inference from various sources. For height=720, width=1280, RGB images example inputs are:
185
+ # filename: imgs = 'data/samples/zidane.jpg'
186
+ # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
187
+ # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
188
+ # PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
189
+ # numpy: = np.zeros((720,1280,3)) # HWC
190
+ # torch: = torch.zeros(16,3,720,1280) # BCHW
191
+ # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
192
+
193
+ p = next(self.model.parameters()) # for device and type
194
+ if isinstance(imgs, torch.Tensor): # torch
195
+ return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
196
+
197
+ # Pre-process
198
+ n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
199
+ shape0, shape1, files = [], [], [] # image and inference shapes, filenames
200
+ for i, im in enumerate(imgs):
201
+ if isinstance(im, str): # filename or uri
202
+ im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im # open
203
+ im.filename = f # for uri
204
+ files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg')
205
+ im = np.array(im) # to numpy
206
+ if im.shape[0] < 5: # image in CHW
207
+ im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
208
+ im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
209
+ s = im.shape[:2] # HWC
210
+ shape0.append(s) # image shape
211
+ g = (size / max(s)) # gain
212
+ shape1.append([y * g for y in s])
213
+ imgs[i] = im # update
214
+ shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
215
+ x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
216
+ x = np.stack(x, 0) if n > 1 else x[0][None] # stack
217
+ x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
218
+ x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
219
+
220
+ # Inference
221
+ with torch.no_grad():
222
+ y = self.model(x, augment, profile)[0] # forward
223
+ y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
224
+
225
+ # Post-process
226
+ for i in range(n):
227
+ scale_coords(shape1, y[i][:, :4], shape0[i])
228
+
229
+ return Detections(imgs, y, files, self.names)
230
+
231
+
232
+ class Detections:
233
+ # detections class for YOLOv5 inference results
234
+ def __init__(self, imgs, pred, files, names=None):
235
+ super(Detections, self).__init__()
236
+ d = pred[0].device # device
237
+ gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
238
+ self.imgs = imgs # list of images as numpy arrays
239
+ self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
240
+ self.names = names # class names
241
+ self.files = files # image filenames
242
+ self.xyxy = pred # xyxy pixels
243
+ self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
244
+ self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
245
+ self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
246
+ self.n = len(self.pred)
247
+
248
+ def display(self, pprint=False, show=False, save=False, render=False, save_dir=''):
249
+ colors = color_list()
250
+ for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
251
+ str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
252
+ if pred is not None:
253
+ for c in pred[:, -1].unique():
254
+ n = (pred[:, -1] == c).sum() # detections per class
255
+ str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
256
+ if show or save or render:
257
+ for *box, conf, cls in pred: # xyxy, confidence, class
258
+ label = f'{self.names[int(cls)]} {conf:.2f}'
259
+ plot_one_box(box, img, label=label, color=colors[int(cls) % 10])
260
+ img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
261
+ if pprint:
262
+ print(str.rstrip(', '))
263
+ if show:
264
+ img.show(self.files[i]) # show
265
+ if save:
266
+ f = Path(save_dir) / self.files[i]
267
+ img.save(f) # save
268
+ print(f"{'Saving' * (i == 0)} {f},", end='' if i < self.n - 1 else ' done.\n')
269
+ if render:
270
+ self.imgs[i] = np.asarray(img)
271
+
272
+ def print(self):
273
+ self.display(pprint=True) # print results
274
+
275
+ def show(self):
276
+ self.display(show=True) # show results
277
+
278
+ def save(self, save_dir='results/'):
279
+ Path(save_dir).mkdir(exist_ok=True)
280
+ self.display(save=True, save_dir=save_dir) # save results
281
+
282
+ def render(self):
283
+ self.display(render=True) # render results
284
+ return self.imgs
285
+
286
+ def __len__(self):
287
+ return self.n
288
+
289
+ def tolist(self):
290
+ # return a list of Detections objects, i.e. 'for result in results.tolist():'
291
+ x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
292
+ for d in x:
293
+ for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
294
+ setattr(d, k, getattr(d, k)[0]) # pop out of list
295
+ return x
296
+
297
+
298
+ class Classify(nn.Module):
299
+ # Classification head, i.e. x(b,c1,20,20) to x(b,c2)
300
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
301
+ super(Classify, self).__init__()
302
+ self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
303
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
304
+ self.flat = nn.Flatten()
305
+
306
+ def forward(self, x):
307
+ z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
308
+ return self.flat(self.conv(z)) # flatten to x(b,c2)
models/experimental.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file contains experimental modules
2
+
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from models.common import Conv, DWConv
8
+ from utils.google_utils import attempt_download
9
+
10
+
11
+ class CrossConv(nn.Module):
12
+ # Cross Convolution Downsample
13
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
14
+ # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
15
+ super(CrossConv, self).__init__()
16
+ c_ = int(c2 * e) # hidden channels
17
+ self.cv1 = Conv(c1, c_, (1, k), (1, s))
18
+ self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
19
+ self.add = shortcut and c1 == c2
20
+
21
+ def forward(self, x):
22
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
23
+
24
+
25
+ class Sum(nn.Module):
26
+ # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
27
+ def __init__(self, n, weight=False): # n: number of inputs
28
+ super(Sum, self).__init__()
29
+ self.weight = weight # apply weights boolean
30
+ self.iter = range(n - 1) # iter object
31
+ if weight:
32
+ self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
33
+
34
+ def forward(self, x):
35
+ y = x[0] # no weight
36
+ if self.weight:
37
+ w = torch.sigmoid(self.w) * 2
38
+ for i in self.iter:
39
+ y = y + x[i + 1] * w[i]
40
+ else:
41
+ for i in self.iter:
42
+ y = y + x[i + 1]
43
+ return y
44
+
45
+
46
+ class GhostConv(nn.Module):
47
+ # Ghost Convolution https://github.com/huawei-noah/ghostnet
48
+ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
49
+ super(GhostConv, self).__init__()
50
+ c_ = c2 // 2 # hidden channels
51
+ self.cv1 = Conv(c1, c_, k, s, None, g, act)
52
+ self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
53
+
54
+ def forward(self, x):
55
+ y = self.cv1(x)
56
+ return torch.cat([y, self.cv2(y)], 1)
57
+
58
+
59
+ class GhostBottleneck(nn.Module):
60
+ # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
61
+ def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
62
+ super(GhostBottleneck, self).__init__()
63
+ c_ = c2 // 2
64
+ self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
65
+ DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
66
+ GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
67
+ self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
68
+ Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
69
+
70
+ def forward(self, x):
71
+ return self.conv(x) + self.shortcut(x)
72
+
73
+
74
+ class MixConv2d(nn.Module):
75
+ # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
76
+ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
77
+ super(MixConv2d, self).__init__()
78
+ groups = len(k)
79
+ if equal_ch: # equal c_ per group
80
+ i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
81
+ c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
82
+ else: # equal weight.numel() per group
83
+ b = [c2] + [0] * groups
84
+ a = np.eye(groups + 1, groups, k=-1)
85
+ a -= np.roll(a, 1, axis=1)
86
+ a *= np.array(k) ** 2
87
+ a[0] = 1
88
+ c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
89
+
90
+ self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
91
+ self.bn = nn.BatchNorm2d(c2)
92
+ self.act = nn.LeakyReLU(0.1, inplace=True)
93
+
94
+ def forward(self, x):
95
+ return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
96
+
97
+
98
+ class Ensemble(nn.ModuleList):
99
+ # Ensemble of models
100
+ def __init__(self):
101
+ super(Ensemble, self).__init__()
102
+
103
+ def forward(self, x, augment=False):
104
+ y = []
105
+ for module in self:
106
+ y.append(module(x, augment)[0])
107
+ # y = torch.stack(y).max(0)[0] # max ensemble
108
+ # y = torch.stack(y).mean(0) # mean ensemble
109
+ y = torch.cat(y, 1) # nms ensemble
110
+ return y, None # inference, train output
111
+
112
+
113
+ def attempt_load(weights, map_location=None):
114
+ # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
115
+ model = Ensemble()
116
+ for w in weights if isinstance(weights, list) else [weights]:
117
+ attempt_download(w)
118
+ model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
119
+
120
+ # Compatibility updates
121
+ for m in model.modules():
122
+ if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
123
+ m.inplace = True # pytorch 1.7.0 compatibility
124
+ elif type(m) is Conv:
125
+ m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
126
+
127
+ if len(model) == 1:
128
+ return model[-1] # return model
129
+ else:
130
+ print('Ensemble created with %s\n' % weights)
131
+ for k in ['names', 'stride']:
132
+ setattr(model, k, getattr(model[-1], k))
133
+ return model # return ensemble
models/export.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
2
+
3
+ Usage:
4
+ $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
5
+ """
6
+
7
+ import argparse
8
+ import sys
9
+ import time
10
+
11
+ sys.path.append('./') # to run '$ python *.py' files in subdirectories
12
+
13
+ import torch
14
+ import torch.nn as nn
15
+
16
+ import models
17
+ from models.experimental import attempt_load
18
+ from utils.activations import Hardswish, SiLU
19
+ from utils.general import set_logging, check_img_size
20
+
21
+ if __name__ == '__main__':
22
+ parser = argparse.ArgumentParser()
23
+ parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/
24
+ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
25
+ parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes')
26
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
27
+ opt = parser.parse_args()
28
+ opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
29
+ print(opt)
30
+ set_logging()
31
+ t = time.time()
32
+
33
+ # Load PyTorch model
34
+ model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model
35
+ labels = model.names
36
+
37
+ # Checks
38
+ gs = int(max(model.stride)) # grid size (max stride)
39
+ opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
40
+
41
+ # Input
42
+ img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection
43
+
44
+ # Update model
45
+ for k, m in model.named_modules():
46
+ m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
47
+ if isinstance(m, models.common.Conv): # assign export-friendly activations
48
+ if isinstance(m.act, nn.Hardswish):
49
+ m.act = Hardswish()
50
+ elif isinstance(m.act, nn.SiLU):
51
+ m.act = SiLU()
52
+ # elif isinstance(m, models.yolo.Detect):
53
+ # m.forward = m.forward_export # assign forward (optional)
54
+ model.model[-1].export = True # set Detect() layer export=True
55
+ y = model(img) # dry run
56
+
57
+ # TorchScript export
58
+ try:
59
+ print('\nStarting TorchScript export with torch %s...' % torch.__version__)
60
+ f = opt.weights.replace('.pt', '.torchscript.pt') # filename
61
+ ts = torch.jit.trace(model, img)
62
+ ts.save(f)
63
+ print('TorchScript export success, saved as %s' % f)
64
+ except Exception as e:
65
+ print('TorchScript export failure: %s' % e)
66
+
67
+ # ONNX export
68
+ try:
69
+ import onnx
70
+
71
+ print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
72
+ f = opt.weights.replace('.pt', '.onnx') # filename
73
+ torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
74
+ output_names=['classes', 'boxes'] if y is None else ['output'],
75
+ dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
76
+ 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None)
77
+
78
+ # Checks
79
+ onnx_model = onnx.load(f) # load onnx model
80
+ onnx.checker.check_model(onnx_model) # check onnx model
81
+ # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
82
+ print('ONNX export success, saved as %s' % f)
83
+ except Exception as e:
84
+ print('ONNX export failure: %s' % e)
85
+
86
+ # CoreML export
87
+ try:
88
+ import coremltools as ct
89
+
90
+ print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
91
+ # convert model from torchscript and apply pixel scaling as per detect.py
92
+ model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
93
+ f = opt.weights.replace('.pt', '.mlmodel') # filename
94
+ model.save(f)
95
+ print('CoreML export success, saved as %s' % f)
96
+ except Exception as e:
97
+ print('CoreML export failure: %s' % e)
98
+
99
+ # Finish
100
+ print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t))
models/hub/anchors.yaml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Default YOLOv5 anchors for COCO data
2
+
3
+
4
+ # P5 -------------------------------------------------------------------------------------------------------------------
5
+ # P5-640:
6
+ anchors_p5_640:
7
+ - [ 10,13, 16,30, 33,23 ] # P3/8
8
+ - [ 30,61, 62,45, 59,119 ] # P4/16
9
+ - [ 116,90, 156,198, 373,326 ] # P5/32
10
+
11
+
12
+ # P6 -------------------------------------------------------------------------------------------------------------------
13
+ # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
14
+ anchors_p6_640:
15
+ - [ 9,11, 21,19, 17,41 ] # P3/8
16
+ - [ 43,32, 39,70, 86,64 ] # P4/16
17
+ - [ 65,131, 134,130, 120,265 ] # P5/32
18
+ - [ 282,180, 247,354, 512,387 ] # P6/64
19
+
20
+ # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
21
+ anchors_p6_1280:
22
+ - [ 19,27, 44,40, 38,94 ] # P3/8
23
+ - [ 96,68, 86,152, 180,137 ] # P4/16
24
+ - [ 140,301, 303,264, 238,542 ] # P5/32
25
+ - [ 436,615, 739,380, 925,792 ] # P6/64
26
+
27
+ # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
28
+ anchors_p6_1920:
29
+ - [ 28,41, 67,59, 57,141 ] # P3/8
30
+ - [ 144,103, 129,227, 270,205 ] # P4/16
31
+ - [ 209,452, 455,396, 358,812 ] # P5/32
32
+ - [ 653,922, 1109,570, 1387,1187 ] # P6/64
33
+
34
+
35
+ # P7 -------------------------------------------------------------------------------------------------------------------
36
+ # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
37
+ anchors_p7_640:
38
+ - [ 11,11, 13,30, 29,20 ] # P3/8
39
+ - [ 30,46, 61,38, 39,92 ] # P4/16
40
+ - [ 78,80, 146,66, 79,163 ] # P5/32
41
+ - [ 149,150, 321,143, 157,303 ] # P6/64
42
+ - [ 257,402, 359,290, 524,372 ] # P7/128
43
+
44
+ # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
45
+ anchors_p7_1280:
46
+ - [ 19,22, 54,36, 32,77 ] # P3/8
47
+ - [ 70,83, 138,71, 75,173 ] # P4/16
48
+ - [ 165,159, 148,334, 375,151 ] # P5/32
49
+ - [ 334,317, 251,626, 499,474 ] # P6/64
50
+ - [ 750,326, 534,814, 1079,818 ] # P7/128
51
+
52
+ # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
53
+ anchors_p7_1920:
54
+ - [ 29,34, 81,55, 47,115 ] # P3/8
55
+ - [ 105,124, 207,107, 113,259 ] # P4/16
56
+ - [ 247,238, 222,500, 563,227 ] # P5/32
57
+ - [ 501,476, 376,939, 749,711 ] # P6/64
58
+ - [ 1126,489, 801,1222, 1618,1227 ] # P7/128
models/hub/yolov3-spp.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # darknet53 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [32, 3, 1]], # 0
16
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17
+ [-1, 1, Bottleneck, [64]],
18
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19
+ [-1, 2, Bottleneck, [128]],
20
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21
+ [-1, 8, Bottleneck, [256]],
22
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23
+ [-1, 8, Bottleneck, [512]],
24
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25
+ [-1, 4, Bottleneck, [1024]], # 10
26
+ ]
27
+
28
+ # YOLOv3-SPP head
29
+ head:
30
+ [[-1, 1, Bottleneck, [1024, False]],
31
+ [-1, 1, SPP, [512, [5, 9, 13]]],
32
+ [-1, 1, Conv, [1024, 3, 1]],
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35
+
36
+ [-2, 1, Conv, [256, 1, 1]],
37
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
39
+ [-1, 1, Bottleneck, [512, False]],
40
+ [-1, 1, Bottleneck, [512, False]],
41
+ [-1, 1, Conv, [256, 1, 1]],
42
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43
+
44
+ [-2, 1, Conv, [128, 1, 1]],
45
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
47
+ [-1, 1, Bottleneck, [256, False]],
48
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49
+
50
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51
+ ]
models/hub/yolov3-tiny.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [10,14, 23,27, 37,58] # P4/16
9
+ - [81,82, 135,169, 344,319] # P5/32
10
+
11
+ # YOLOv3-tiny backbone
12
+ backbone:
13
+ # [from, number, module, args]
14
+ [[-1, 1, Conv, [16, 3, 1]], # 0
15
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
16
+ [-1, 1, Conv, [32, 3, 1]],
17
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
18
+ [-1, 1, Conv, [64, 3, 1]],
19
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
20
+ [-1, 1, Conv, [128, 3, 1]],
21
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
22
+ [-1, 1, Conv, [256, 3, 1]],
23
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
24
+ [-1, 1, Conv, [512, 3, 1]],
25
+ [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
26
+ [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
27
+ ]
28
+
29
+ # YOLOv3-tiny head
30
+ head:
31
+ [[-1, 1, Conv, [1024, 3, 1]],
32
+ [-1, 1, Conv, [256, 1, 1]],
33
+ [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
34
+
35
+ [-2, 1, Conv, [128, 1, 1]],
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
38
+ [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
39
+
40
+ [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
41
+ ]
models/hub/yolov3.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # darknet53 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [32, 3, 1]], # 0
16
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17
+ [-1, 1, Bottleneck, [64]],
18
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19
+ [-1, 2, Bottleneck, [128]],
20
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21
+ [-1, 8, Bottleneck, [256]],
22
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23
+ [-1, 8, Bottleneck, [512]],
24
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25
+ [-1, 4, Bottleneck, [1024]], # 10
26
+ ]
27
+
28
+ # YOLOv3 head
29
+ head:
30
+ [[-1, 1, Bottleneck, [1024, False]],
31
+ [-1, 1, Conv, [512, [1, 1]]],
32
+ [-1, 1, Conv, [1024, 3, 1]],
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35
+
36
+ [-2, 1, Conv, [256, 1, 1]],
37
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
39
+ [-1, 1, Bottleneck, [512, False]],
40
+ [-1, 1, Bottleneck, [512, False]],
41
+ [-1, 1, Conv, [256, 1, 1]],
42
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43
+
44
+ [-2, 1, Conv, [128, 1, 1]],
45
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
47
+ [-1, 1, Bottleneck, [256, False]],
48
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49
+
50
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51
+ ]
models/hub/yolov5-fpn.yaml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, Bottleneck, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 9, BottleneckCSP, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, BottleneckCSP, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
24
+ [-1, 6, BottleneckCSP, [1024]], # 9
25
+ ]
26
+
27
+ # YOLOv5 FPN head
28
+ head:
29
+ [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large)
30
+
31
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium)
35
+
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
38
+ [-1, 1, Conv, [256, 1, 1]],
39
+ [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small)
40
+
41
+ [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
42
+ ]
models/hub/yolov5-p2.yaml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors: 3
8
+
9
+ # YOLOv5 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
13
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
14
+ [ -1, 3, C3, [ 128 ] ],
15
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
16
+ [ -1, 9, C3, [ 256 ] ],
17
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
18
+ [ -1, 9, C3, [ 512 ] ],
19
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
20
+ [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
21
+ [ -1, 3, C3, [ 1024, False ] ], # 9
22
+ ]
23
+
24
+ # YOLOv5 head
25
+ head:
26
+ [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
27
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
28
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
29
+ [ -1, 3, C3, [ 512, False ] ], # 13
30
+
31
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
32
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
33
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
34
+ [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
35
+
36
+ [ -1, 1, Conv, [ 128, 1, 1 ] ],
37
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
38
+ [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2
39
+ [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall)
40
+
41
+ [ -1, 1, Conv, [ 128, 3, 2 ] ],
42
+ [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3
43
+ [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small)
44
+
45
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
46
+ [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
47
+ [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium)
48
+
49
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
50
+ [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5
51
+ [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large)
52
+
53
+ [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
54
+ ]
models/hub/yolov5-p6.yaml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors: 3
8
+
9
+ # YOLOv5 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
13
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
14
+ [ -1, 3, C3, [ 128 ] ],
15
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
16
+ [ -1, 9, C3, [ 256 ] ],
17
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
18
+ [ -1, 9, C3, [ 512 ] ],
19
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
20
+ [ -1, 3, C3, [ 768 ] ],
21
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
22
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
23
+ [ -1, 3, C3, [ 1024, False ] ], # 11
24
+ ]
25
+
26
+ # YOLOv5 head
27
+ head:
28
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
29
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
30
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
31
+ [ -1, 3, C3, [ 768, False ] ], # 15
32
+
33
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
34
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
35
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
36
+ [ -1, 3, C3, [ 512, False ] ], # 19
37
+
38
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
39
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
40
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
41
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
42
+
43
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
44
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
45
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
46
+
47
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
48
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
49
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
50
+
51
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
52
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
53
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge)
54
+
55
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
56
+ ]
models/hub/yolov5-p7.yaml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors: 3
8
+
9
+ # YOLOv5 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
13
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
14
+ [ -1, 3, C3, [ 128 ] ],
15
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
16
+ [ -1, 9, C3, [ 256 ] ],
17
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
18
+ [ -1, 9, C3, [ 512 ] ],
19
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
20
+ [ -1, 3, C3, [ 768 ] ],
21
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
22
+ [ -1, 3, C3, [ 1024 ] ],
23
+ [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128
24
+ [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ],
25
+ [ -1, 3, C3, [ 1280, False ] ], # 13
26
+ ]
27
+
28
+ # YOLOv5 head
29
+ head:
30
+ [ [ -1, 1, Conv, [ 1024, 1, 1 ] ],
31
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
32
+ [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6
33
+ [ -1, 3, C3, [ 1024, False ] ], # 17
34
+
35
+ [ -1, 1, Conv, [ 768, 1, 1 ] ],
36
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
37
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
38
+ [ -1, 3, C3, [ 768, False ] ], # 21
39
+
40
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
41
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
42
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
43
+ [ -1, 3, C3, [ 512, False ] ], # 25
44
+
45
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
46
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
47
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
48
+ [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small)
49
+
50
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
51
+ [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4
52
+ [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium)
53
+
54
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
55
+ [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5
56
+ [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large)
57
+
58
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
59
+ [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6
60
+ [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge)
61
+
62
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ],
63
+ [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7
64
+ [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge)
65
+
66
+ [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7)
67
+ ]
models/hub/yolov5-panet.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, BottleneckCSP, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 9, BottleneckCSP, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, BottleneckCSP, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
24
+ [-1, 3, BottleneckCSP, [1024, False]], # 9
25
+ ]
26
+
27
+ # YOLOv5 PANet head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, BottleneckCSP, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
models/hub/yolov5l6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [ 19,27, 44,40, 38,94 ] # P3/8
9
+ - [ 96,68, 86,152, 180,137 ] # P4/16
10
+ - [ 140,301, 303,264, 238,542 ] # P5/32
11
+ - [ 436,615, 739,380, 925,792 ] # P6/64
12
+
13
+ # YOLOv5 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
17
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
18
+ [ -1, 3, C3, [ 128 ] ],
19
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
20
+ [ -1, 9, C3, [ 256 ] ],
21
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
22
+ [ -1, 9, C3, [ 512 ] ],
23
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
24
+ [ -1, 3, C3, [ 768 ] ],
25
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
26
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
27
+ [ -1, 3, C3, [ 1024, False ] ], # 11
28
+ ]
29
+
30
+ # YOLOv5 head
31
+ head:
32
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
33
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
34
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
35
+ [ -1, 3, C3, [ 768, False ] ], # 15
36
+
37
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
38
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
39
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
40
+ [ -1, 3, C3, [ 512, False ] ], # 19
41
+
42
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
43
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
44
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
45
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
46
+
47
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
48
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
49
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
50
+
51
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
52
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
53
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
54
+
55
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
56
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
57
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)
58
+
59
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
60
+ ]
models/hub/yolov5m6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 0.67 # model depth multiple
4
+ width_multiple: 0.75 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [ 19,27, 44,40, 38,94 ] # P3/8
9
+ - [ 96,68, 86,152, 180,137 ] # P4/16
10
+ - [ 140,301, 303,264, 238,542 ] # P5/32
11
+ - [ 436,615, 739,380, 925,792 ] # P6/64
12
+
13
+ # YOLOv5 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
17
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
18
+ [ -1, 3, C3, [ 128 ] ],
19
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
20
+ [ -1, 9, C3, [ 256 ] ],
21
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
22
+ [ -1, 9, C3, [ 512 ] ],
23
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
24
+ [ -1, 3, C3, [ 768 ] ],
25
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
26
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
27
+ [ -1, 3, C3, [ 1024, False ] ], # 11
28
+ ]
29
+
30
+ # YOLOv5 head
31
+ head:
32
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
33
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
34
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
35
+ [ -1, 3, C3, [ 768, False ] ], # 15
36
+
37
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
38
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
39
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
40
+ [ -1, 3, C3, [ 512, False ] ], # 19
41
+
42
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
43
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
44
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
45
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
46
+
47
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
48
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
49
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
50
+
51
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
52
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
53
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
54
+
55
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
56
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
57
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)
58
+
59
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
60
+ ]
models/hub/yolov5s6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 0.33 # model depth multiple
4
+ width_multiple: 0.50 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [ 19,27, 44,40, 38,94 ] # P3/8
9
+ - [ 96,68, 86,152, 180,137 ] # P4/16
10
+ - [ 140,301, 303,264, 238,542 ] # P5/32
11
+ - [ 436,615, 739,380, 925,792 ] # P6/64
12
+
13
+ # YOLOv5 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
17
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
18
+ [ -1, 3, C3, [ 128 ] ],
19
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
20
+ [ -1, 9, C3, [ 256 ] ],
21
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
22
+ [ -1, 9, C3, [ 512 ] ],
23
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
24
+ [ -1, 3, C3, [ 768 ] ],
25
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
26
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
27
+ [ -1, 3, C3, [ 1024, False ] ], # 11
28
+ ]
29
+
30
+ # YOLOv5 head
31
+ head:
32
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
33
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
34
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
35
+ [ -1, 3, C3, [ 768, False ] ], # 15
36
+
37
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
38
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
39
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
40
+ [ -1, 3, C3, [ 512, False ] ], # 19
41
+
42
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
43
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
44
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
45
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
46
+
47
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
48
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
49
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
50
+
51
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
52
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
53
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
54
+
55
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
56
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
57
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)
58
+
59
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
60
+ ]
models/hub/yolov5x6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.33 # model depth multiple
4
+ width_multiple: 1.25 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [ 19,27, 44,40, 38,94 ] # P3/8
9
+ - [ 96,68, 86,152, 180,137 ] # P4/16
10
+ - [ 140,301, 303,264, 238,542 ] # P5/32
11
+ - [ 436,615, 739,380, 925,792 ] # P6/64
12
+
13
+ # YOLOv5 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
17
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
18
+ [ -1, 3, C3, [ 128 ] ],
19
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
20
+ [ -1, 9, C3, [ 256 ] ],
21
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
22
+ [ -1, 9, C3, [ 512 ] ],
23
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
24
+ [ -1, 3, C3, [ 768 ] ],
25
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
26
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
27
+ [ -1, 3, C3, [ 1024, False ] ], # 11
28
+ ]
29
+
30
+ # YOLOv5 head
31
+ head:
32
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
33
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
34
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
35
+ [ -1, 3, C3, [ 768, False ] ], # 15
36
+
37
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
38
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
39
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
40
+ [ -1, 3, C3, [ 512, False ] ], # 19
41
+
42
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
43
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
44
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
45
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
46
+
47
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
48
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
49
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
50
+
51
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
52
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
53
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
54
+
55
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
56
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
57
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)
58
+
59
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
60
+ ]
models/yolo.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import sys
4
+ from copy import deepcopy
5
+
6
+ sys.path.append('./') # to run '$ python *.py' files in subdirectories
7
+ logger = logging.getLogger(__name__)
8
+
9
+ from models.common import *
10
+ from models.experimental import *
11
+ from utils.autoanchor import check_anchor_order
12
+ from utils.general import make_divisible, check_file, set_logging
13
+ from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
14
+ select_device, copy_attr
15
+
16
+ try:
17
+ import thop # for FLOPS computation
18
+ except ImportError:
19
+ thop = None
20
+
21
+
22
+ class Detect(nn.Module):
23
+ stride = None # strides computed during build
24
+ export = False # onnx export
25
+
26
+ def __init__(self, nc=80, anchors=(), ch=()): # detection layer
27
+ super(Detect, self).__init__()
28
+ self.nc = nc # number of classes
29
+ self.no = nc + 5 # number of outputs per anchor
30
+ self.nl = len(anchors) # number of detection layers
31
+ self.na = len(anchors[0]) // 2 # number of anchors
32
+ self.grid = [torch.zeros(1)] * self.nl # init grid
33
+ a = torch.tensor(anchors).float().view(self.nl, -1, 2)
34
+ self.register_buffer('anchors', a) # shape(nl,na,2)
35
+ self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
36
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
37
+
38
+ def forward(self, x):
39
+ # x = x.copy() # for profiling
40
+ z = [] # inference output
41
+ self.training |= self.export
42
+ for i in range(self.nl):
43
+ x[i] = self.m[i](x[i]) # conv
44
+ bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
45
+ x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
46
+
47
+ if not self.training: # inference
48
+ if self.grid[i].shape[2:4] != x[i].shape[2:4]:
49
+ self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
50
+
51
+ y = x[i].sigmoid()
52
+ y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
53
+ y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
54
+ z.append(y.view(bs, -1, self.no))
55
+
56
+ return x if self.training else (torch.cat(z, 1), x)
57
+
58
+ @staticmethod
59
+ def _make_grid(nx=20, ny=20):
60
+ yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
61
+ return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
62
+
63
+
64
+ class Model(nn.Module):
65
+ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes
66
+ super(Model, self).__init__()
67
+ if isinstance(cfg, dict):
68
+ self.yaml = cfg # model dict
69
+ else: # is *.yaml
70
+ import yaml # for torch hub
71
+ self.yaml_file = Path(cfg).name
72
+ with open(cfg) as f:
73
+ self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
74
+
75
+ # Define model
76
+ ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
77
+ if nc and nc != self.yaml['nc']:
78
+ logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
79
+ self.yaml['nc'] = nc # override yaml value
80
+ self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
81
+ self.names = [str(i) for i in range(self.yaml['nc'])] # default names
82
+ # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
83
+
84
+ # Build strides, anchors
85
+ m = self.model[-1] # Detect()
86
+ if isinstance(m, Detect):
87
+ s = 256 # 2x min stride
88
+ m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
89
+ m.anchors /= m.stride.view(-1, 1, 1)
90
+ check_anchor_order(m)
91
+ self.stride = m.stride
92
+ self._initialize_biases() # only run once
93
+ # print('Strides: %s' % m.stride.tolist())
94
+
95
+ # Init weights, biases
96
+ initialize_weights(self)
97
+ self.info()
98
+ logger.info('')
99
+
100
+ def forward(self, x, augment=False, profile=False):
101
+ if augment:
102
+ img_size = x.shape[-2:] # height, width
103
+ s = [1, 0.83, 0.67] # scales
104
+ f = [None, 3, None] # flips (2-ud, 3-lr)
105
+ y = [] # outputs
106
+ for si, fi in zip(s, f):
107
+ xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
108
+ yi = self.forward_once(xi)[0] # forward
109
+ # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
110
+ yi[..., :4] /= si # de-scale
111
+ if fi == 2:
112
+ yi[..., 1] = img_size[0] - 1 - yi[..., 1] # de-flip ud
113
+ elif fi == 3:
114
+ yi[..., 0] = img_size[1] - 1 - yi[..., 0] # de-flip lr
115
+ y.append(yi)
116
+ return torch.cat(y, 1), None # augmented inference, train
117
+ else:
118
+ return self.forward_once(x, profile) # single-scale inference, train
119
+
120
+ def forward_once(self, x, profile=False):
121
+ y, dt = [], [] # outputs
122
+ for m in self.model:
123
+ if m.f != -1: # if not from previous layer
124
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
125
+
126
+ if profile:
127
+ o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
128
+ t = time_synchronized()
129
+ for _ in range(10):
130
+ _ = m(x)
131
+ dt.append((time_synchronized() - t) * 100)
132
+ print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
133
+
134
+ x = m(x) # run
135
+ y.append(x if m.i in self.save else None) # save output
136
+
137
+ if profile:
138
+ print('%.1fms total' % sum(dt))
139
+ return x
140
+
141
+ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
142
+ # https://arxiv.org/abs/1708.02002 section 3.3
143
+ # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
144
+ m = self.model[-1] # Detect() module
145
+ for mi, s in zip(m.m, m.stride): # from
146
+ b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
147
+ b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
148
+ b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
149
+ mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
150
+
151
+ def _print_biases(self):
152
+ m = self.model[-1] # Detect() module
153
+ for mi in m.m: # from
154
+ b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
155
+ print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
156
+
157
+ # def _print_weights(self):
158
+ # for m in self.model.modules():
159
+ # if type(m) is Bottleneck:
160
+ # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
161
+
162
+ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
163
+ print('Fusing layers... ')
164
+ for m in self.model.modules():
165
+ if type(m) is Conv and hasattr(m, 'bn'):
166
+ m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
167
+ delattr(m, 'bn') # remove batchnorm
168
+ m.forward = m.fuseforward # update forward
169
+ self.info()
170
+ return self
171
+
172
+ def nms(self, mode=True): # add or remove NMS module
173
+ present = type(self.model[-1]) is NMS # last layer is NMS
174
+ if mode and not present:
175
+ print('Adding NMS... ')
176
+ m = NMS() # module
177
+ m.f = -1 # from
178
+ m.i = self.model[-1].i + 1 # index
179
+ self.model.add_module(name='%s' % m.i, module=m) # add
180
+ self.eval()
181
+ elif not mode and present:
182
+ print('Removing NMS... ')
183
+ self.model = self.model[:-1] # remove
184
+ return self
185
+
186
+ def autoshape(self): # add autoShape module
187
+ print('Adding autoShape... ')
188
+ m = autoShape(self) # wrap model
189
+ copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
190
+ return m
191
+
192
+ def info(self, verbose=False, img_size=640): # print model information
193
+ model_info(self, verbose, img_size)
194
+
195
+
196
+ def parse_model(d, ch): # model_dict, input_channels(3)
197
+ logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
198
+ anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
199
+ na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
200
+ no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
201
+
202
+ layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
203
+ for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
204
+ m = eval(m) if isinstance(m, str) else m # eval strings
205
+ for j, a in enumerate(args):
206
+ try:
207
+ args[j] = eval(a) if isinstance(a, str) else a # eval strings
208
+ except:
209
+ pass
210
+
211
+ n = max(round(n * gd), 1) if n > 1 else n # depth gain
212
+ if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
213
+ C3]:
214
+ c1, c2 = ch[f], args[0]
215
+ if c2 != no: # if not output
216
+ c2 = make_divisible(c2 * gw, 8)
217
+
218
+ args = [c1, c2, *args[1:]]
219
+ if m in [BottleneckCSP, C3]:
220
+ args.insert(2, n) # number of repeats
221
+ n = 1
222
+ elif m is nn.BatchNorm2d:
223
+ args = [ch[f]]
224
+ elif m is Concat:
225
+ c2 = sum([ch[x] for x in f])
226
+ elif m is Detect:
227
+ args.append([ch[x] for x in f])
228
+ if isinstance(args[1], int): # number of anchors
229
+ args[1] = [list(range(args[1] * 2))] * len(f)
230
+ elif m is Contract:
231
+ c2 = ch[f] * args[0] ** 2
232
+ elif m is Expand:
233
+ c2 = ch[f] // args[0] ** 2
234
+ else:
235
+ c2 = ch[f]
236
+
237
+ m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
238
+ t = str(m)[8:-2].replace('__main__.', '') # module type
239
+ np = sum([x.numel() for x in m_.parameters()]) # number params
240
+ m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
241
+ logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
242
+ save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
243
+ layers.append(m_)
244
+ if i == 0:
245
+ ch = []
246
+ ch.append(c2)
247
+ return nn.Sequential(*layers), sorted(save)
248
+
249
+
250
+ if __name__ == '__main__':
251
+ parser = argparse.ArgumentParser()
252
+ parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
253
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
254
+ opt = parser.parse_args()
255
+ opt.cfg = check_file(opt.cfg) # check file
256
+ set_logging()
257
+ device = select_device(opt.device)
258
+
259
+ # Create model
260
+ model = Model(opt.cfg).to(device)
261
+ model.train()
262
+
263
+ # Profile
264
+ # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
265
+ # y = model(img, profile=True)
266
+
267
+ # Tensorboard
268
+ # from torch.utils.tensorboard import SummaryWriter
269
+ # tb_writer = SummaryWriter()
270
+ # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
271
+ # tb_writer.add_graph(model.model, img) # add model to tensorboard
272
+ # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
models/yolov5l.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # parameters
2
+ nc: 80 # number of classes
3
+ depth_multiple: 1.0 # model depth multiple
4
+ width_multiple: 1.0 # layer channel multiple
5
+
6
+ # anchors
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 9, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
24
+ [-1, 3, C3, [1024, False]], # 9
25
+ ]
26
+
27
+ # YOLOv5 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]