Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .DS_Store +0 -0
- .gitattributes +1 -0
- tool/.DS_Store +0 -0
- tool/LICENSE +201 -0
- tool/README.md +367 -0
- tool/__pycache__/detector.cpython-310.pyc +0 -0
- tool/__pycache__/segmentor.cpython-310.pyc +0 -0
- tool/__pycache__/transfer_tools.cpython-310.pyc +0 -0
- tool/demo/create_coco_dataset.py +83 -0
- tool/demo/gradio_app.py +125 -0
- tool/demo/image_editing_with_groundingdino_gligen.ipynb +0 -0
- tool/demo/image_editing_with_groundingdino_stablediffusion.ipynb +0 -0
- tool/demo/inference_on_a_image.py +214 -0
- tool/demo/test_ap_on_coco.py +233 -0
- tool/detector.py +93 -0
- tool/environment.yaml +248 -0
- tool/groundingdino.egg-info/PKG-INFO +209 -0
- tool/groundingdino.egg-info/SOURCES.txt +42 -0
- tool/groundingdino.egg-info/dependency_links.txt +1 -0
- tool/groundingdino.egg-info/requires.txt +10 -0
- tool/groundingdino.egg-info/top_level.txt +1 -0
- tool/groundingdino/.DS_Store +0 -0
- tool/groundingdino/__init__.py +0 -0
- tool/groundingdino/__pycache__/__init__.cpython-310.pyc +0 -0
- tool/groundingdino/config/GroundingDINO_SwinB_cfg.py +43 -0
- tool/groundingdino/config/GroundingDINO_SwinT_OGC.py +43 -0
- tool/groundingdino/config/__init__.py +0 -0
- tool/groundingdino/datasets/__init__.py +0 -0
- tool/groundingdino/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
- tool/groundingdino/datasets/__pycache__/transforms.cpython-310.pyc +0 -0
- tool/groundingdino/datasets/cocogrounding_eval.py +269 -0
- tool/groundingdino/datasets/transforms.py +311 -0
- tool/groundingdino/models/.DS_Store +0 -0
- tool/groundingdino/models/GroundingDINO/__init__.py +15 -0
- tool/groundingdino/models/GroundingDINO/__pycache__/__init__.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/__pycache__/bertwarper.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/__pycache__/fuse_modules.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/__pycache__/groundingdino.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/__pycache__/ms_deform_attn.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/__pycache__/transformer.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/__pycache__/transformer_vanilla.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/__pycache__/utils.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/backbone/__init__.py +1 -0
- tool/groundingdino/models/GroundingDINO/backbone/__pycache__/__init__.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/backbone/__pycache__/backbone.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/backbone/__pycache__/position_encoding.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/backbone/__pycache__/swin_transformer.cpython-310.pyc +0 -0
- tool/groundingdino/models/GroundingDINO/backbone/backbone.py +221 -0
- tool/groundingdino/models/GroundingDINO/backbone/position_encoding.py +186 -0
- tool/groundingdino/models/GroundingDINO/backbone/swin_transformer.py +802 -0
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
.gitattributes
CHANGED
|
@@ -18,3 +18,4 @@ sam/src/groundingdino/.asset/hero_figure.png filter=lfs diff=lfs merge=lfs -text
|
|
| 18 |
tutorial/img/click_segment.jpg filter=lfs diff=lfs merge=lfs -text
|
| 19 |
tutorial/img/input_video.jpg filter=lfs diff=lfs merge=lfs -text
|
| 20 |
tutorial/img/start_tracking.jpg filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 18 |
tutorial/img/click_segment.jpg filter=lfs diff=lfs merge=lfs -text
|
| 19 |
tutorial/img/input_video.jpg filter=lfs diff=lfs merge=lfs -text
|
| 20 |
tutorial/img/start_tracking.jpg filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
tool/weights/groundingdino_swint_ogc.pth filter=lfs diff=lfs merge=lfs -text
|
tool/.DS_Store
ADDED
|
Binary file (8.2 kB). View file
|
|
|
tool/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright 2023 - present, IDEA Research.
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
tool/README.md
ADDED
|
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
<img src="./.asset/grounding_dino_logo.png" width="30%">
|
| 3 |
+
</div>
|
| 4 |
+
|
| 5 |
+
# :sauropod: Grounding DINO
|
| 6 |
+
|
| 7 |
+
[](https://paperswithcode.com/sota/zero-shot-object-detection-on-mscoco?p=grounding-dino-marrying-dino-with-grounded) [](https://paperswithcode.com/sota/zero-shot-object-detection-on-odinw?p=grounding-dino-marrying-dino-with-grounded) \
|
| 8 |
+
[](https://paperswithcode.com/sota/object-detection-on-coco-minival?p=grounding-dino-marrying-dino-with-grounded) [](https://paperswithcode.com/sota/object-detection-on-coco?p=grounding-dino-marrying-dino-with-grounded)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
**[IDEA-CVR, IDEA-Research](https://github.com/IDEA-Research)**
|
| 12 |
+
|
| 13 |
+
[Shilong Liu](http://www.lsl.zone/), [Zhaoyang Zeng](https://scholar.google.com/citations?user=U_cvvUwAAAAJ&hl=zh-CN&oi=ao), [Tianhe Ren](https://rentainhe.github.io/), [Feng Li](https://scholar.google.com/citations?user=ybRe9GcAAAAJ&hl=zh-CN), [Hao Zhang](https://scholar.google.com/citations?user=B8hPxMQAAAAJ&hl=zh-CN), [Jie Yang](https://github.com/yangjie-cv), [Chunyuan Li](https://scholar.google.com/citations?user=Zd7WmXUAAAAJ&hl=zh-CN&oi=ao), [Jianwei Yang](https://jwyang.github.io/), [Hang Su](https://scholar.google.com/citations?hl=en&user=dxN1_X0AAAAJ&view_op=list_works&sortby=pubdate), [Jun Zhu](https://scholar.google.com/citations?hl=en&user=axsP38wAAAAJ), [Lei Zhang](https://www.leizhang.org/)<sup>:email:</sup>.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
[[`Paper`](https://arxiv.org/abs/2303.05499)] [[`Demo`](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)] [[`BibTex`](#black_nib-citation)]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
PyTorch implementation and pretrained models for Grounding DINO. For details, see the paper **[Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection](https://arxiv.org/abs/2303.05499)**.
|
| 20 |
+
|
| 21 |
+
## :sun_with_face: Helpful Tutorial
|
| 22 |
+
|
| 23 |
+
- :grapes: [[Read our arXiv Paper](https://arxiv.org/abs/2303.05499)]
|
| 24 |
+
- :apple: [[Watch our simple introduction video on YouTube](https://youtu.be/wxWDt5UiwY8)]
|
| 25 |
+
- :blossom: [[Try the Colab Demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb)]
|
| 26 |
+
- :sunflower: [[Try our Official Huggingface Demo](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)]
|
| 27 |
+
- :maple_leaf: [[Watch the Step by Step Tutorial about GroundingDINO by Roboflow AI](https://youtu.be/cMa77r3YrDk)]
|
| 28 |
+
- :mushroom: [[GroundingDINO: Automated Dataset Annotation and Evaluation by Roboflow AI](https://youtu.be/C4NqaRBz_Kw)]
|
| 29 |
+
- :hibiscus: [[Accelerate Image Annotation with SAM and GroundingDINO by Roboflow AI](https://youtu.be/oEQYStnF2l8)]
|
| 30 |
+
- :white_flower: [[Autodistill: Train YOLOv8 with ZERO Annotations based on Grounding-DINO and Grounded-SAM by Roboflow AI](https://github.com/autodistill/autodistill)]
|
| 31 |
+
|
| 32 |
+
<!-- Grounding DINO Methods |
|
| 33 |
+
[](https://arxiv.org/abs/2303.05499)
|
| 34 |
+
[](https://youtu.be/wxWDt5UiwY8) -->
|
| 35 |
+
|
| 36 |
+
<!-- Grounding DINO Demos |
|
| 37 |
+
[](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) -->
|
| 38 |
+
<!-- [](https://youtu.be/cMa77r3YrDk)
|
| 39 |
+
[](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)
|
| 40 |
+
[](https://youtu.be/oEQYStnF2l8)
|
| 41 |
+
[](https://youtu.be/C4NqaRBz_Kw) -->
|
| 42 |
+
|
| 43 |
+
## :sparkles: Highlight Projects
|
| 44 |
+
|
| 45 |
+
- [Semantic-SAM: a universal image segmentation model to enable segment and recognize anything at any desired granularity.](https://github.com/UX-Decoder/Semantic-SAM),
|
| 46 |
+
- [DetGPT: Detect What You Need via Reasoning](https://github.com/OptimalScale/DetGPT)
|
| 47 |
+
- [Grounded-SAM: Marrying Grounding DINO with Segment Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything)
|
| 48 |
+
- [Grounding DINO with Stable Diffusion](demo/image_editing_with_groundingdino_stablediffusion.ipynb)
|
| 49 |
+
- [Grounding DINO with GLIGEN for Controllable Image Editing](demo/image_editing_with_groundingdino_gligen.ipynb)
|
| 50 |
+
- [OpenSeeD: A Simple and Strong Openset Segmentation Model](https://github.com/IDEA-Research/OpenSeeD)
|
| 51 |
+
- [SEEM: Segment Everything Everywhere All at Once](https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once)
|
| 52 |
+
- [X-GPT: Conversational Visual Agent supported by X-Decoder](https://github.com/microsoft/X-Decoder/tree/xgpt)
|
| 53 |
+
- [GLIGEN: Open-Set Grounded Text-to-Image Generation](https://github.com/gligen/GLIGEN)
|
| 54 |
+
- [LLaVA: Large Language and Vision Assistant](https://github.com/haotian-liu/LLaVA)
|
| 55 |
+
|
| 56 |
+
<!-- Extensions | [Grounding DINO with Segment Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything); [Grounding DINO with Stable Diffusion](demo/image_editing_with_groundingdino_stablediffusion.ipynb); [Grounding DINO with GLIGEN](demo/image_editing_with_groundingdino_gligen.ipynb) -->
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
<!-- Official PyTorch implementation of [Grounding DINO](https://arxiv.org/abs/2303.05499), a stronger open-set object detector. Code is available now! -->
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
## :bulb: Highlight
|
| 64 |
+
|
| 65 |
+
- **Open-Set Detection.** Detect **everything** with language!
|
| 66 |
+
- **High Performancce.** COCO zero-shot **52.5 AP** (training without COCO data!). COCO fine-tune **63.0 AP**.
|
| 67 |
+
- **Flexible.** Collaboration with Stable Diffusion for Image Editting.
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
## :fire: News
|
| 73 |
+
- **`2023/07/18`**: We release [Semantic-SAM](https://github.com/UX-Decoder/Semantic-SAM), a universal image segmentation model to enable segment and recognize anything at any desired granularity. **Code** and **checkpoint** are available!
|
| 74 |
+
- **`2023/06/17`**: We provide an example to evaluate Grounding DINO on COCO zero-shot performance.
|
| 75 |
+
- **`2023/04/15`**: Refer to [CV in the Wild Readings](https://github.com/Computer-Vision-in-the-Wild/CVinW_Readings) for those who are interested in open-set recognition!
|
| 76 |
+
- **`2023/04/08`**: We release [demos](demo/image_editing_with_groundingdino_gligen.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [GLIGEN](https://github.com/gligen/GLIGEN) for more controllable image editings.
|
| 77 |
+
- **`2023/04/08`**: We release [demos](demo/image_editing_with_groundingdino_stablediffusion.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) for image editings.
|
| 78 |
+
- **`2023/04/06`**: We build a new demo by marrying GroundingDINO with [Segment-Anything](https://github.com/facebookresearch/segment-anything) named **[Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything)** aims to support segmentation in GroundingDINO.
|
| 79 |
+
- **`2023/03/28`**: A YouTube [video](https://youtu.be/cMa77r3YrDk) about Grounding DINO and basic object detection prompt engineering. [[SkalskiP](https://github.com/SkalskiP)]
|
| 80 |
+
- **`2023/03/28`**: Add a [demo](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo) on Hugging Face Space!
|
| 81 |
+
- **`2023/03/27`**: Support CPU-only mode. Now the model can run on machines without GPUs.
|
| 82 |
+
- **`2023/03/25`**: A [demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) for Grounding DINO is available at Colab. [[SkalskiP](https://github.com/SkalskiP)]
|
| 83 |
+
- **`2023/03/22`**: Code is available Now!
|
| 84 |
+
|
| 85 |
+
<details open>
|
| 86 |
+
<summary><font size="4">
|
| 87 |
+
Description
|
| 88 |
+
</font></summary>
|
| 89 |
+
<a href="https://arxiv.org/abs/2303.05499">Paper</a> introduction.
|
| 90 |
+
<img src=".asset/hero_figure.png" alt="ODinW" width="100%">
|
| 91 |
+
Marrying <a href="https://github.com/IDEA-Research/GroundingDINO">Grounding DINO</a> and <a href="https://github.com/gligen/GLIGEN">GLIGEN</a>
|
| 92 |
+
<img src="https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/GD_GLIGEN.png" alt="gd_gligen" width="100%">
|
| 93 |
+
</details>
|
| 94 |
+
|
| 95 |
+
## :star: Explanations/Tips for Grounding DINO Inputs and Outputs
|
| 96 |
+
- Grounding DINO accepts an `(image, text)` pair as inputs.
|
| 97 |
+
- It outputs `900` (by default) object boxes. Each box has similarity scores across all input words. (as shown in Figures below.)
|
| 98 |
+
- We defaultly choose the boxes whose highest similarities are higher than a `box_threshold`.
|
| 99 |
+
- We extract the words whose similarities are higher than the `text_threshold` as predicted labels.
|
| 100 |
+
- If you want to obtain objects of specific phrases, like the `dogs` in the sentence `two dogs with a stick.`, you can select the boxes with highest text similarities with `dogs` as final outputs.
|
| 101 |
+
- Note that each word can be split to **more than one** tokens with different tokenlizers. The number of words in a sentence may not equal to the number of text tokens.
|
| 102 |
+
- We suggest separating different category names with `.` for Grounding DINO.
|
| 103 |
+

|
| 104 |
+

|
| 105 |
+
|
| 106 |
+
## :label: TODO
|
| 107 |
+
|
| 108 |
+
- [x] Release inference code and demo.
|
| 109 |
+
- [x] Release checkpoints.
|
| 110 |
+
- [x] Grounding DINO with Stable Diffusion and GLIGEN demos.
|
| 111 |
+
- [ ] Release training codes.
|
| 112 |
+
|
| 113 |
+
## :hammer_and_wrench: Install
|
| 114 |
+
|
| 115 |
+
**Note:**
|
| 116 |
+
|
| 117 |
+
0. If you have a CUDA environment, please make sure the environment variable `CUDA_HOME` is set. It will be compiled under CPU-only mode if no CUDA available.
|
| 118 |
+
|
| 119 |
+
Please make sure following the installation steps strictly, otherwise the program may produce:
|
| 120 |
+
```bash
|
| 121 |
+
NameError: name '_C' is not defined
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
If this happened, please reinstalled the groundingDINO by reclone the git and do all the installation steps again.
|
| 125 |
+
|
| 126 |
+
#### how to check cuda:
|
| 127 |
+
```bash
|
| 128 |
+
echo $CUDA_HOME
|
| 129 |
+
```
|
| 130 |
+
If it print nothing, then it means you haven't set up the path/
|
| 131 |
+
|
| 132 |
+
Run this so the environment variable will be set under current shell.
|
| 133 |
+
```bash
|
| 134 |
+
export CUDA_HOME=/path/to/cuda-11.3
|
| 135 |
+
```
|
| 136 |
+
|
| 137 |
+
Notice the version of cuda should be aligned with your CUDA runtime, for there might exists multiple cuda at the same time.
|
| 138 |
+
|
| 139 |
+
If you want to set the CUDA_HOME permanently, store it using:
|
| 140 |
+
|
| 141 |
+
```bash
|
| 142 |
+
echo 'export CUDA_HOME=/path/to/cuda' >> ~/.bashrc
|
| 143 |
+
```
|
| 144 |
+
after that, source the bashrc file and check CUDA_HOME:
|
| 145 |
+
```bash
|
| 146 |
+
source ~/.bashrc
|
| 147 |
+
echo $CUDA_HOME
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
In this example, /path/to/cuda-11.3 should be replaced with the path where your CUDA toolkit is installed. You can find this by typing **which nvcc** in your terminal:
|
| 151 |
+
|
| 152 |
+
For instance,
|
| 153 |
+
if the output is /usr/local/cuda/bin/nvcc, then:
|
| 154 |
+
```bash
|
| 155 |
+
export CUDA_HOME=/usr/local/cuda
|
| 156 |
+
```
|
| 157 |
+
**Installation:**
|
| 158 |
+
|
| 159 |
+
1.Clone the GroundingDINO repository from GitHub.
|
| 160 |
+
|
| 161 |
+
```bash
|
| 162 |
+
git clone https://github.com/IDEA-Research/GroundingDINO.git
|
| 163 |
+
```
|
| 164 |
+
|
| 165 |
+
2. Change the current directory to the GroundingDINO folder.
|
| 166 |
+
|
| 167 |
+
```bash
|
| 168 |
+
cd GroundingDINO/
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
3. Install the required dependencies in the current directory.
|
| 172 |
+
|
| 173 |
+
```bash
|
| 174 |
+
pip install -e .
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
4. Download pre-trained model weights.
|
| 178 |
+
|
| 179 |
+
```bash
|
| 180 |
+
mkdir weights
|
| 181 |
+
cd weights
|
| 182 |
+
wget -q https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth
|
| 183 |
+
cd ..
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
## :arrow_forward: Demo
|
| 187 |
+
Check your GPU ID (only if you're using a GPU)
|
| 188 |
+
|
| 189 |
+
```bash
|
| 190 |
+
nvidia-smi
|
| 191 |
+
```
|
| 192 |
+
Replace `{GPU ID}`, `image_you_want_to_detect.jpg`, and `"dir you want to save the output"` with appropriate values in the following command
|
| 193 |
+
```bash
|
| 194 |
+
CUDA_VISIBLE_DEVICES={GPU ID} python demo/inference_on_a_image.py \
|
| 195 |
+
-c groundingdino/config/GroundingDINO_SwinT_OGC.py \
|
| 196 |
+
-p weights/groundingdino_swint_ogc.pth \
|
| 197 |
+
-i image_you_want_to_detect.jpg \
|
| 198 |
+
-o "dir you want to save the output" \
|
| 199 |
+
-t "chair"
|
| 200 |
+
[--cpu-only] # open it for cpu mode
|
| 201 |
+
```
|
| 202 |
+
|
| 203 |
+
If you would like to specify the phrases to detect, here is a demo:
|
| 204 |
+
```bash
|
| 205 |
+
CUDA_VISIBLE_DEVICES={GPU ID} python demo/inference_on_a_image.py \
|
| 206 |
+
-c groundingdino/config/GroundingDINO_SwinT_OGC.py \
|
| 207 |
+
-p ./groundingdino_swint_ogc.pth \
|
| 208 |
+
-i .asset/cat_dog.jpeg \
|
| 209 |
+
-o logs/1111 \
|
| 210 |
+
-t "There is a cat and a dog in the image ." \
|
| 211 |
+
--token_spans "[[[9, 10], [11, 14]], [[19, 20], [21, 24]]]"
|
| 212 |
+
[--cpu-only] # open it for cpu mode
|
| 213 |
+
```
|
| 214 |
+
The token_spans specify the start and end positions of a phrases. For example, the first phrase is `[[9, 10], [11, 14]]`. `"There is a cat and a dog in the image ."[9:10] = 'a'`, `"There is a cat and a dog in the image ."[11:14] = 'cat'`. Hence it refers to the phrase `a cat` . Similarly, the `[[19, 20], [21, 24]]` refers to the phrase `a dog`.
|
| 215 |
+
|
| 216 |
+
See the `demo/inference_on_a_image.py` for more details.
|
| 217 |
+
|
| 218 |
+
**Running with Python:**
|
| 219 |
+
|
| 220 |
+
```python
|
| 221 |
+
from groundingdino.util.inference import load_model, load_image, predict, annotate
|
| 222 |
+
import cv2
|
| 223 |
+
|
| 224 |
+
model = load_model("groundingdino/config/GroundingDINO_SwinT_OGC.py", "weights/groundingdino_swint_ogc.pth")
|
| 225 |
+
IMAGE_PATH = "weights/dog-3.jpeg"
|
| 226 |
+
TEXT_PROMPT = "chair . person . dog ."
|
| 227 |
+
BOX_TRESHOLD = 0.35
|
| 228 |
+
TEXT_TRESHOLD = 0.25
|
| 229 |
+
|
| 230 |
+
image_source, image = load_image(IMAGE_PATH)
|
| 231 |
+
|
| 232 |
+
boxes, logits, phrases = predict(
|
| 233 |
+
model=model,
|
| 234 |
+
image=image,
|
| 235 |
+
caption=TEXT_PROMPT,
|
| 236 |
+
box_threshold=BOX_TRESHOLD,
|
| 237 |
+
text_threshold=TEXT_TRESHOLD
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
annotated_frame = annotate(image_source=image_source, boxes=boxes, logits=logits, phrases=phrases)
|
| 241 |
+
cv2.imwrite("annotated_image.jpg", annotated_frame)
|
| 242 |
+
```
|
| 243 |
+
**Web UI**
|
| 244 |
+
|
| 245 |
+
We also provide a demo code to integrate Grounding DINO with Gradio Web UI. See the file `demo/gradio_app.py` for more details.
|
| 246 |
+
|
| 247 |
+
**Notebooks**
|
| 248 |
+
|
| 249 |
+
- We release [demos](demo/image_editing_with_groundingdino_gligen.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [GLIGEN](https://github.com/gligen/GLIGEN) for more controllable image editings.
|
| 250 |
+
- We release [demos](demo/image_editing_with_groundingdino_stablediffusion.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) for image editings.
|
| 251 |
+
|
| 252 |
+
## COCO Zero-shot Evaluations
|
| 253 |
+
|
| 254 |
+
We provide an example to evaluate Grounding DINO zero-shot performance on COCO. The results should be **48.5**.
|
| 255 |
+
|
| 256 |
+
```bash
|
| 257 |
+
CUDA_VISIBLE_DEVICES=0 \
|
| 258 |
+
python demo/test_ap_on_coco.py \
|
| 259 |
+
-c groundingdino/config/GroundingDINO_SwinT_OGC.py \
|
| 260 |
+
-p weights/groundingdino_swint_ogc.pth \
|
| 261 |
+
--anno_path /path/to/annoataions/ie/instances_val2017.json \
|
| 262 |
+
--image_dir /path/to/imagedir/ie/val2017
|
| 263 |
+
```
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
## :luggage: Checkpoints
|
| 267 |
+
|
| 268 |
+
<!-- insert a table -->
|
| 269 |
+
<table>
|
| 270 |
+
<thead>
|
| 271 |
+
<tr style="text-align: right;">
|
| 272 |
+
<th></th>
|
| 273 |
+
<th>name</th>
|
| 274 |
+
<th>backbone</th>
|
| 275 |
+
<th>Data</th>
|
| 276 |
+
<th>box AP on COCO</th>
|
| 277 |
+
<th>Checkpoint</th>
|
| 278 |
+
<th>Config</th>
|
| 279 |
+
</tr>
|
| 280 |
+
</thead>
|
| 281 |
+
<tbody>
|
| 282 |
+
<tr>
|
| 283 |
+
<th>1</th>
|
| 284 |
+
<td>GroundingDINO-T</td>
|
| 285 |
+
<td>Swin-T</td>
|
| 286 |
+
<td>O365,GoldG,Cap4M</td>
|
| 287 |
+
<td>48.4 (zero-shot) / 57.2 (fine-tune)</td>
|
| 288 |
+
<td><a href="https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth">GitHub link</a> | <a href="https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/groundingdino_swint_ogc.pth">HF link</a></td>
|
| 289 |
+
<td><a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/groundingdino/config/GroundingDINO_SwinT_OGC.py">link</a></td>
|
| 290 |
+
</tr>
|
| 291 |
+
<tr>
|
| 292 |
+
<th>2</th>
|
| 293 |
+
<td>GroundingDINO-B</td>
|
| 294 |
+
<td>Swin-B</td>
|
| 295 |
+
<td>COCO,O365,GoldG,Cap4M,OpenImage,ODinW-35,RefCOCO</td>
|
| 296 |
+
<td>56.7 </td>
|
| 297 |
+
<td><a href="https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha2/groundingdino_swinb_cogcoor.pth">GitHub link</a> | <a href="https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/groundingdino_swinb_cogcoor.pth">HF link</a>
|
| 298 |
+
<td><a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/groundingdino/config/GroundingDINO_SwinB.cfg.py">link</a></td>
|
| 299 |
+
</tr>
|
| 300 |
+
</tbody>
|
| 301 |
+
</table>
|
| 302 |
+
|
| 303 |
+
## :medal_military: Results
|
| 304 |
+
|
| 305 |
+
<details open>
|
| 306 |
+
<summary><font size="4">
|
| 307 |
+
COCO Object Detection Results
|
| 308 |
+
</font></summary>
|
| 309 |
+
<img src=".asset/COCO.png" alt="COCO" width="100%">
|
| 310 |
+
</details>
|
| 311 |
+
|
| 312 |
+
<details open>
|
| 313 |
+
<summary><font size="4">
|
| 314 |
+
ODinW Object Detection Results
|
| 315 |
+
</font></summary>
|
| 316 |
+
<img src=".asset/ODinW.png" alt="ODinW" width="100%">
|
| 317 |
+
</details>
|
| 318 |
+
|
| 319 |
+
<details open>
|
| 320 |
+
<summary><font size="4">
|
| 321 |
+
Marrying Grounding DINO with <a href="https://github.com/Stability-AI/StableDiffusion">Stable Diffusion</a> for Image Editing
|
| 322 |
+
</font></summary>
|
| 323 |
+
See our example <a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/demo/image_editing_with_groundingdino_stablediffusion.ipynb">notebook</a> for more details.
|
| 324 |
+
<img src=".asset/GD_SD.png" alt="GD_SD" width="100%">
|
| 325 |
+
</details>
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
<details open>
|
| 329 |
+
<summary><font size="4">
|
| 330 |
+
Marrying Grounding DINO with <a href="https://github.com/gligen/GLIGEN">GLIGEN</a> for more Detailed Image Editing.
|
| 331 |
+
</font></summary>
|
| 332 |
+
See our example <a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/demo/image_editing_with_groundingdino_gligen.ipynb">notebook</a> for more details.
|
| 333 |
+
<img src=".asset/GD_GLIGEN.png" alt="GD_GLIGEN" width="100%">
|
| 334 |
+
</details>
|
| 335 |
+
|
| 336 |
+
## :sauropod: Model: Grounding DINO
|
| 337 |
+
|
| 338 |
+
Includes: a text backbone, an image backbone, a feature enhancer, a language-guided query selection, and a cross-modality decoder.
|
| 339 |
+
|
| 340 |
+

|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
## :hearts: Acknowledgement
|
| 344 |
+
|
| 345 |
+
Our model is related to [DINO](https://github.com/IDEA-Research/DINO) and [GLIP](https://github.com/microsoft/GLIP). Thanks for their great work!
|
| 346 |
+
|
| 347 |
+
We also thank great previous work including DETR, Deformable DETR, SMCA, Conditional DETR, Anchor DETR, Dynamic DETR, DAB-DETR, DN-DETR, etc. More related work are available at [Awesome Detection Transformer](https://github.com/IDEACVR/awesome-detection-transformer). A new toolbox [detrex](https://github.com/IDEA-Research/detrex) is available as well.
|
| 348 |
+
|
| 349 |
+
Thanks [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) and [GLIGEN](https://github.com/gligen/GLIGEN) for their awesome models.
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
## :black_nib: Citation
|
| 353 |
+
|
| 354 |
+
If you find our work helpful for your research, please consider citing the following BibTeX entry.
|
| 355 |
+
|
| 356 |
+
```bibtex
|
| 357 |
+
@article{liu2023grounding,
|
| 358 |
+
title={Grounding dino: Marrying dino with grounded pre-training for open-set object detection},
|
| 359 |
+
author={Liu, Shilong and Zeng, Zhaoyang and Ren, Tianhe and Li, Feng and Zhang, Hao and Yang, Jie and Li, Chunyuan and Yang, Jianwei and Su, Hang and Zhu, Jun and others},
|
| 360 |
+
journal={arXiv preprint arXiv:2303.05499},
|
| 361 |
+
year={2023}
|
| 362 |
+
}
|
| 363 |
+
```
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
|
tool/__pycache__/detector.cpython-310.pyc
ADDED
|
Binary file (3.73 kB). View file
|
|
|
tool/__pycache__/segmentor.cpython-310.pyc
ADDED
|
Binary file (3.01 kB). View file
|
|
|
tool/__pycache__/transfer_tools.cpython-310.pyc
ADDED
|
Binary file (1.65 kB). View file
|
|
|
tool/demo/create_coco_dataset.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import typer
|
| 2 |
+
from groundingdino.util.inference import load_model, load_image, predict
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import torchvision
|
| 5 |
+
import torch
|
| 6 |
+
import fiftyone as fo
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def main(
|
| 10 |
+
image_directory: str = 'test_grounding_dino',
|
| 11 |
+
text_prompt: str = 'bus, car',
|
| 12 |
+
box_threshold: float = 0.15,
|
| 13 |
+
text_threshold: float = 0.10,
|
| 14 |
+
export_dataset: bool = False,
|
| 15 |
+
view_dataset: bool = False,
|
| 16 |
+
export_annotated_images: bool = True,
|
| 17 |
+
weights_path : str = "groundingdino_swint_ogc.pth",
|
| 18 |
+
config_path: str = "../../GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py",
|
| 19 |
+
subsample: int = None,
|
| 20 |
+
):
|
| 21 |
+
|
| 22 |
+
model = load_model(config_path, weights_path)
|
| 23 |
+
|
| 24 |
+
dataset = fo.Dataset.from_images_dir(image_directory)
|
| 25 |
+
|
| 26 |
+
samples = []
|
| 27 |
+
|
| 28 |
+
if subsample is not None:
|
| 29 |
+
|
| 30 |
+
if subsample < len(dataset):
|
| 31 |
+
dataset = dataset.take(subsample).clone()
|
| 32 |
+
|
| 33 |
+
for sample in tqdm(dataset):
|
| 34 |
+
|
| 35 |
+
image_source, image = load_image(sample.filepath)
|
| 36 |
+
|
| 37 |
+
boxes, logits, phrases = predict(
|
| 38 |
+
model=model,
|
| 39 |
+
image=image,
|
| 40 |
+
caption=text_prompt,
|
| 41 |
+
box_threshold=box_threshold,
|
| 42 |
+
text_threshold=text_threshold,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
detections = []
|
| 46 |
+
|
| 47 |
+
for box, logit, phrase in zip(boxes, logits, phrases):
|
| 48 |
+
|
| 49 |
+
rel_box = torchvision.ops.box_convert(box, 'cxcywh', 'xywh')
|
| 50 |
+
|
| 51 |
+
detections.append(
|
| 52 |
+
fo.Detection(
|
| 53 |
+
label=phrase,
|
| 54 |
+
bounding_box=rel_box,
|
| 55 |
+
confidence=logit,
|
| 56 |
+
))
|
| 57 |
+
|
| 58 |
+
# Store detections in a field name of your choice
|
| 59 |
+
sample["detections"] = fo.Detections(detections=detections)
|
| 60 |
+
sample.save()
|
| 61 |
+
|
| 62 |
+
# loads the voxel fiftyone UI ready for viewing the dataset.
|
| 63 |
+
if view_dataset:
|
| 64 |
+
session = fo.launch_app(dataset)
|
| 65 |
+
session.wait()
|
| 66 |
+
|
| 67 |
+
# exports COCO dataset ready for training
|
| 68 |
+
if export_dataset:
|
| 69 |
+
dataset.export(
|
| 70 |
+
'coco_dataset',
|
| 71 |
+
dataset_type=fo.types.COCODetectionDataset,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# saves bounding boxes plotted on the input images to disk
|
| 75 |
+
if export_annotated_images:
|
| 76 |
+
dataset.draw_labels(
|
| 77 |
+
'images_with_bounding_boxes',
|
| 78 |
+
label_fields=['detections']
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
if __name__ == '__main__':
|
| 83 |
+
typer.run(main)
|
tool/demo/gradio_app.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from functools import partial
|
| 3 |
+
import cv2
|
| 4 |
+
import requests
|
| 5 |
+
import os
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import numpy as np
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
import warnings
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
|
| 16 |
+
# prepare the environment
|
| 17 |
+
os.system("python setup.py build develop --user")
|
| 18 |
+
os.system("pip install packaging==21.3")
|
| 19 |
+
os.system("pip install gradio")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
warnings.filterwarnings("ignore")
|
| 23 |
+
|
| 24 |
+
import gradio as gr
|
| 25 |
+
|
| 26 |
+
from groundingdino.models import build_model
|
| 27 |
+
from groundingdino.util.slconfig import SLConfig
|
| 28 |
+
from groundingdino.util.utils import clean_state_dict
|
| 29 |
+
from groundingdino.util.inference import annotate, load_image, predict
|
| 30 |
+
import groundingdino.datasets.transforms as T
|
| 31 |
+
|
| 32 |
+
from huggingface_hub import hf_hub_download
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# Use this command for evaluate the Grounding DINO model
|
| 37 |
+
config_file = "groundingdino/config/GroundingDINO_SwinT_OGC.py"
|
| 38 |
+
ckpt_repo_id = "ShilongLiu/GroundingDINO"
|
| 39 |
+
ckpt_filenmae = "groundingdino_swint_ogc.pth"
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def load_model_hf(model_config_path, repo_id, filename, device='cpu'):
|
| 43 |
+
args = SLConfig.fromfile(model_config_path)
|
| 44 |
+
model = build_model(args)
|
| 45 |
+
args.device = device
|
| 46 |
+
|
| 47 |
+
cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
|
| 48 |
+
checkpoint = torch.load(cache_file, map_location='cpu')
|
| 49 |
+
log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
|
| 50 |
+
print("Model loaded from {} \n => {}".format(cache_file, log))
|
| 51 |
+
_ = model.eval()
|
| 52 |
+
return model
|
| 53 |
+
|
| 54 |
+
def image_transform_grounding(init_image):
|
| 55 |
+
transform = T.Compose([
|
| 56 |
+
T.RandomResize([800], max_size=1333),
|
| 57 |
+
T.ToTensor(),
|
| 58 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
| 59 |
+
])
|
| 60 |
+
image, _ = transform(init_image, None) # 3, h, w
|
| 61 |
+
return init_image, image
|
| 62 |
+
|
| 63 |
+
def image_transform_grounding_for_vis(init_image):
|
| 64 |
+
transform = T.Compose([
|
| 65 |
+
T.RandomResize([800], max_size=1333),
|
| 66 |
+
])
|
| 67 |
+
image, _ = transform(init_image, None) # 3, h, w
|
| 68 |
+
return image
|
| 69 |
+
|
| 70 |
+
model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae)
|
| 71 |
+
|
| 72 |
+
def run_grounding(input_image, grounding_caption, box_threshold, text_threshold):
|
| 73 |
+
init_image = input_image.convert("RGB")
|
| 74 |
+
original_size = init_image.size
|
| 75 |
+
|
| 76 |
+
_, image_tensor = image_transform_grounding(init_image)
|
| 77 |
+
image_pil: Image = image_transform_grounding_for_vis(init_image)
|
| 78 |
+
|
| 79 |
+
# run grounidng
|
| 80 |
+
boxes, logits, phrases = predict(model, image_tensor, grounding_caption, box_threshold, text_threshold, device='cpu')
|
| 81 |
+
annotated_frame = annotate(image_source=np.asarray(image_pil), boxes=boxes, logits=logits, phrases=phrases)
|
| 82 |
+
image_with_box = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
return image_with_box
|
| 86 |
+
|
| 87 |
+
if __name__ == "__main__":
|
| 88 |
+
|
| 89 |
+
parser = argparse.ArgumentParser("Grounding DINO demo", add_help=True)
|
| 90 |
+
parser.add_argument("--debug", action="store_true", help="using debug mode")
|
| 91 |
+
parser.add_argument("--share", action="store_true", help="share the app")
|
| 92 |
+
args = parser.parse_args()
|
| 93 |
+
|
| 94 |
+
block = gr.Blocks().queue()
|
| 95 |
+
with block:
|
| 96 |
+
gr.Markdown("# [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO)")
|
| 97 |
+
gr.Markdown("### Open-World Detection with Grounding DINO")
|
| 98 |
+
|
| 99 |
+
with gr.Row():
|
| 100 |
+
with gr.Column():
|
| 101 |
+
input_image = gr.Image(source='upload', type="pil")
|
| 102 |
+
grounding_caption = gr.Textbox(label="Detection Prompt")
|
| 103 |
+
run_button = gr.Button(label="Run")
|
| 104 |
+
with gr.Accordion("Advanced options", open=False):
|
| 105 |
+
box_threshold = gr.Slider(
|
| 106 |
+
label="Box Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001
|
| 107 |
+
)
|
| 108 |
+
text_threshold = gr.Slider(
|
| 109 |
+
label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
with gr.Column():
|
| 113 |
+
gallery = gr.outputs.Image(
|
| 114 |
+
type="pil",
|
| 115 |
+
# label="grounding results"
|
| 116 |
+
).style(full_width=True, full_height=True)
|
| 117 |
+
# gallery = gr.Gallery(label="Generated images", show_label=False).style(
|
| 118 |
+
# grid=[1], height="auto", container=True, full_width=True, full_height=True)
|
| 119 |
+
|
| 120 |
+
run_button.click(fn=run_grounding, inputs=[
|
| 121 |
+
input_image, grounding_caption, box_threshold, text_threshold], outputs=[gallery])
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
block.launch(server_name='0.0.0.0', server_port=7579, debug=args.debug, share=args.share)
|
| 125 |
+
|
tool/demo/image_editing_with_groundingdino_gligen.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tool/demo/image_editing_with_groundingdino_stablediffusion.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tool/demo/inference_on_a_image.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 8 |
+
|
| 9 |
+
import groundingdino.datasets.transforms as T
|
| 10 |
+
from groundingdino.models import build_model
|
| 11 |
+
from groundingdino.util import box_ops
|
| 12 |
+
from groundingdino.util.slconfig import SLConfig
|
| 13 |
+
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
|
| 14 |
+
from groundingdino.util.vl_utils import create_positive_map_from_span
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def plot_boxes_to_image(image_pil, tgt):
|
| 18 |
+
H, W = tgt["size"]
|
| 19 |
+
boxes = tgt["boxes"]
|
| 20 |
+
labels = tgt["labels"]
|
| 21 |
+
assert len(boxes) == len(labels), "boxes and labels must have same length"
|
| 22 |
+
|
| 23 |
+
draw = ImageDraw.Draw(image_pil)
|
| 24 |
+
mask = Image.new("L", image_pil.size, 0)
|
| 25 |
+
mask_draw = ImageDraw.Draw(mask)
|
| 26 |
+
|
| 27 |
+
# draw boxes and masks
|
| 28 |
+
for box, label in zip(boxes, labels):
|
| 29 |
+
# from 0..1 to 0..W, 0..H
|
| 30 |
+
box = box * torch.Tensor([W, H, W, H])
|
| 31 |
+
# from xywh to xyxy
|
| 32 |
+
box[:2] -= box[2:] / 2
|
| 33 |
+
box[2:] += box[:2]
|
| 34 |
+
# random color
|
| 35 |
+
color = tuple(np.random.randint(0, 255, size=3).tolist())
|
| 36 |
+
# draw
|
| 37 |
+
x0, y0, x1, y1 = box
|
| 38 |
+
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
|
| 39 |
+
|
| 40 |
+
draw.rectangle([x0, y0, x1, y1], outline=color, width=6)
|
| 41 |
+
# draw.text((x0, y0), str(label), fill=color)
|
| 42 |
+
|
| 43 |
+
font = ImageFont.load_default()
|
| 44 |
+
if hasattr(font, "getbbox"):
|
| 45 |
+
bbox = draw.textbbox((x0, y0), str(label), font)
|
| 46 |
+
else:
|
| 47 |
+
w, h = draw.textsize(str(label), font)
|
| 48 |
+
bbox = (x0, y0, w + x0, y0 + h)
|
| 49 |
+
# bbox = draw.textbbox((x0, y0), str(label))
|
| 50 |
+
draw.rectangle(bbox, fill=color)
|
| 51 |
+
draw.text((x0, y0), str(label), fill="white")
|
| 52 |
+
|
| 53 |
+
mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=6)
|
| 54 |
+
|
| 55 |
+
return image_pil, mask
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def load_image(image_path):
|
| 59 |
+
# load image
|
| 60 |
+
image_pil = Image.open(image_path).convert("RGB") # load image
|
| 61 |
+
|
| 62 |
+
transform = T.Compose(
|
| 63 |
+
[
|
| 64 |
+
T.RandomResize([800], max_size=1333),
|
| 65 |
+
T.ToTensor(),
|
| 66 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
| 67 |
+
]
|
| 68 |
+
)
|
| 69 |
+
image, _ = transform(image_pil, None) # 3, h, w
|
| 70 |
+
return image_pil, image
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def load_model(model_config_path, model_checkpoint_path, cpu_only=False):
|
| 74 |
+
args = SLConfig.fromfile(model_config_path)
|
| 75 |
+
args.device = "cuda" if not cpu_only else "cpu"
|
| 76 |
+
model = build_model(args)
|
| 77 |
+
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
|
| 78 |
+
load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
|
| 79 |
+
print(load_res)
|
| 80 |
+
_ = model.eval()
|
| 81 |
+
return model
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_grounding_output(model, image, caption, box_threshold, text_threshold=None, with_logits=True, cpu_only=False, token_spans=None):
|
| 85 |
+
assert text_threshold is not None or token_spans is not None, "text_threshould and token_spans should not be None at the same time!"
|
| 86 |
+
caption = caption.lower()
|
| 87 |
+
caption = caption.strip()
|
| 88 |
+
if not caption.endswith("."):
|
| 89 |
+
caption = caption + "."
|
| 90 |
+
device = "cuda" if not cpu_only else "cpu"
|
| 91 |
+
model = model.to(device)
|
| 92 |
+
image = image.to(device)
|
| 93 |
+
with torch.no_grad():
|
| 94 |
+
outputs = model(image[None], captions=[caption])
|
| 95 |
+
logits = outputs["pred_logits"].sigmoid()[0] # (nq, 256)
|
| 96 |
+
boxes = outputs["pred_boxes"][0] # (nq, 4)
|
| 97 |
+
|
| 98 |
+
# filter output
|
| 99 |
+
if token_spans is None:
|
| 100 |
+
logits_filt = logits.cpu().clone()
|
| 101 |
+
boxes_filt = boxes.cpu().clone()
|
| 102 |
+
filt_mask = logits_filt.max(dim=1)[0] > box_threshold
|
| 103 |
+
logits_filt = logits_filt[filt_mask] # num_filt, 256
|
| 104 |
+
boxes_filt = boxes_filt[filt_mask] # num_filt, 4
|
| 105 |
+
|
| 106 |
+
# get phrase
|
| 107 |
+
tokenlizer = model.tokenizer
|
| 108 |
+
tokenized = tokenlizer(caption)
|
| 109 |
+
# build pred
|
| 110 |
+
pred_phrases = []
|
| 111 |
+
for logit, box in zip(logits_filt, boxes_filt):
|
| 112 |
+
pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
|
| 113 |
+
if with_logits:
|
| 114 |
+
pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
|
| 115 |
+
else:
|
| 116 |
+
pred_phrases.append(pred_phrase)
|
| 117 |
+
else:
|
| 118 |
+
# given-phrase mode
|
| 119 |
+
positive_maps = create_positive_map_from_span(
|
| 120 |
+
model.tokenizer(text_prompt),
|
| 121 |
+
token_span=token_spans
|
| 122 |
+
).to(image.device) # n_phrase, 256
|
| 123 |
+
|
| 124 |
+
logits_for_phrases = positive_maps @ logits.T # n_phrase, nq
|
| 125 |
+
all_logits = []
|
| 126 |
+
all_phrases = []
|
| 127 |
+
all_boxes = []
|
| 128 |
+
for (token_span, logit_phr) in zip(token_spans, logits_for_phrases):
|
| 129 |
+
# get phrase
|
| 130 |
+
phrase = ' '.join([caption[_s:_e] for (_s, _e) in token_span])
|
| 131 |
+
# get mask
|
| 132 |
+
filt_mask = logit_phr > box_threshold
|
| 133 |
+
# filt box
|
| 134 |
+
all_boxes.append(boxes[filt_mask])
|
| 135 |
+
# filt logits
|
| 136 |
+
all_logits.append(logit_phr[filt_mask])
|
| 137 |
+
if with_logits:
|
| 138 |
+
logit_phr_num = logit_phr[filt_mask]
|
| 139 |
+
all_phrases.extend([phrase + f"({str(logit.item())[:4]})" for logit in logit_phr_num])
|
| 140 |
+
else:
|
| 141 |
+
all_phrases.extend([phrase for _ in range(len(filt_mask))])
|
| 142 |
+
boxes_filt = torch.cat(all_boxes, dim=0).cpu()
|
| 143 |
+
pred_phrases = all_phrases
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
return boxes_filt, pred_phrases
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
if __name__ == "__main__":
|
| 150 |
+
|
| 151 |
+
parser = argparse.ArgumentParser("Grounding DINO example", add_help=True)
|
| 152 |
+
parser.add_argument("--config_file", "-c", type=str, required=True, help="path to config file")
|
| 153 |
+
parser.add_argument(
|
| 154 |
+
"--checkpoint_path", "-p", type=str, required=True, help="path to checkpoint file"
|
| 155 |
+
)
|
| 156 |
+
parser.add_argument("--image_path", "-i", type=str, required=True, help="path to image file")
|
| 157 |
+
parser.add_argument("--text_prompt", "-t", type=str, required=True, help="text prompt")
|
| 158 |
+
parser.add_argument(
|
| 159 |
+
"--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
|
| 163 |
+
parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
|
| 164 |
+
parser.add_argument("--token_spans", type=str, default=None, help=
|
| 165 |
+
"The positions of start and end positions of phrases of interest. \
|
| 166 |
+
For example, a caption is 'a cat and a dog', \
|
| 167 |
+
if you would like to detect 'cat', the token_spans should be '[[[2, 5]], ]', since 'a cat and a dog'[2:5] is 'cat'. \
|
| 168 |
+
if you would like to detect 'a cat', the token_spans should be '[[[0, 1], [2, 5]], ]', since 'a cat and a dog'[0:1] is 'a', and 'a cat and a dog'[2:5] is 'cat'. \
|
| 169 |
+
")
|
| 170 |
+
|
| 171 |
+
parser.add_argument("--cpu-only", action="store_true", help="running on cpu only!, default=False")
|
| 172 |
+
args = parser.parse_args()
|
| 173 |
+
|
| 174 |
+
# cfg
|
| 175 |
+
config_file = args.config_file # change the path of the model config file
|
| 176 |
+
checkpoint_path = args.checkpoint_path # change the path of the model
|
| 177 |
+
image_path = args.image_path
|
| 178 |
+
text_prompt = args.text_prompt
|
| 179 |
+
output_dir = args.output_dir
|
| 180 |
+
box_threshold = args.box_threshold
|
| 181 |
+
text_threshold = args.text_threshold
|
| 182 |
+
token_spans = args.token_spans
|
| 183 |
+
|
| 184 |
+
# make dir
|
| 185 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 186 |
+
# load image
|
| 187 |
+
image_pil, image = load_image(image_path)
|
| 188 |
+
# load model
|
| 189 |
+
model = load_model(config_file, checkpoint_path, cpu_only=args.cpu_only)
|
| 190 |
+
|
| 191 |
+
# visualize raw image
|
| 192 |
+
image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
|
| 193 |
+
|
| 194 |
+
# set the text_threshold to None if token_spans is set.
|
| 195 |
+
if token_spans is not None:
|
| 196 |
+
text_threshold = None
|
| 197 |
+
print("Using token_spans. Set the text_threshold to None.")
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# run model
|
| 201 |
+
boxes_filt, pred_phrases = get_grounding_output(
|
| 202 |
+
model, image, text_prompt, box_threshold, text_threshold, cpu_only=args.cpu_only, token_spans=eval(f"{token_spans}")
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
# visualize pred
|
| 206 |
+
size = image_pil.size
|
| 207 |
+
pred_dict = {
|
| 208 |
+
"boxes": boxes_filt,
|
| 209 |
+
"size": [size[1], size[0]], # H,W
|
| 210 |
+
"labels": pred_phrases,
|
| 211 |
+
}
|
| 212 |
+
# import ipdb; ipdb.set_trace()
|
| 213 |
+
image_with_box = plot_boxes_to_image(image_pil, pred_dict)[0]
|
| 214 |
+
image_with_box.save(os.path.join(output_dir, "pred.jpg"))
|
tool/demo/test_ap_on_coco.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torch.utils.data import DataLoader, DistributedSampler
|
| 10 |
+
|
| 11 |
+
from groundingdino.models import build_model
|
| 12 |
+
import groundingdino.datasets.transforms as T
|
| 13 |
+
from groundingdino.util import box_ops, get_tokenlizer
|
| 14 |
+
from groundingdino.util.misc import clean_state_dict, collate_fn
|
| 15 |
+
from groundingdino.util.slconfig import SLConfig
|
| 16 |
+
|
| 17 |
+
# from torchvision.datasets import CocoDetection
|
| 18 |
+
import torchvision
|
| 19 |
+
|
| 20 |
+
from groundingdino.util.vl_utils import build_captions_and_token_span, create_positive_map_from_span
|
| 21 |
+
from groundingdino.datasets.cocogrounding_eval import CocoGroundingEvaluator
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def load_model(model_config_path: str, model_checkpoint_path: str, device: str = "cuda"):
|
| 25 |
+
args = SLConfig.fromfile(model_config_path)
|
| 26 |
+
args.device = device
|
| 27 |
+
model = build_model(args)
|
| 28 |
+
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
|
| 29 |
+
model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
|
| 30 |
+
model.eval()
|
| 31 |
+
return model
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class CocoDetection(torchvision.datasets.CocoDetection):
|
| 35 |
+
def __init__(self, img_folder, ann_file, transforms):
|
| 36 |
+
super().__init__(img_folder, ann_file)
|
| 37 |
+
self._transforms = transforms
|
| 38 |
+
|
| 39 |
+
def __getitem__(self, idx):
|
| 40 |
+
img, target = super().__getitem__(idx) # target: list
|
| 41 |
+
|
| 42 |
+
# import ipdb; ipdb.set_trace()
|
| 43 |
+
|
| 44 |
+
w, h = img.size
|
| 45 |
+
boxes = [obj["bbox"] for obj in target]
|
| 46 |
+
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
|
| 47 |
+
boxes[:, 2:] += boxes[:, :2] # xywh -> xyxy
|
| 48 |
+
boxes[:, 0::2].clamp_(min=0, max=w)
|
| 49 |
+
boxes[:, 1::2].clamp_(min=0, max=h)
|
| 50 |
+
# filt invalid boxes/masks/keypoints
|
| 51 |
+
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
|
| 52 |
+
boxes = boxes[keep]
|
| 53 |
+
|
| 54 |
+
target_new = {}
|
| 55 |
+
image_id = self.ids[idx]
|
| 56 |
+
target_new["image_id"] = image_id
|
| 57 |
+
target_new["boxes"] = boxes
|
| 58 |
+
target_new["orig_size"] = torch.as_tensor([int(h), int(w)])
|
| 59 |
+
|
| 60 |
+
if self._transforms is not None:
|
| 61 |
+
img, target = self._transforms(img, target_new)
|
| 62 |
+
|
| 63 |
+
return img, target
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class PostProcessCocoGrounding(nn.Module):
|
| 67 |
+
""" This module converts the model's output into the format expected by the coco api"""
|
| 68 |
+
|
| 69 |
+
def __init__(self, num_select=300, coco_api=None, tokenlizer=None) -> None:
|
| 70 |
+
super().__init__()
|
| 71 |
+
self.num_select = num_select
|
| 72 |
+
|
| 73 |
+
assert coco_api is not None
|
| 74 |
+
category_dict = coco_api.dataset['categories']
|
| 75 |
+
cat_list = [item['name'] for item in category_dict]
|
| 76 |
+
captions, cat2tokenspan = build_captions_and_token_span(cat_list, True)
|
| 77 |
+
tokenspanlist = [cat2tokenspan[cat] for cat in cat_list]
|
| 78 |
+
positive_map = create_positive_map_from_span(
|
| 79 |
+
tokenlizer(captions), tokenspanlist) # 80, 256. normed
|
| 80 |
+
|
| 81 |
+
id_map = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31, 27: 32, 28: 33, 29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43, 39: 44, 40: 46,
|
| 82 |
+
41: 47, 42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56, 51: 57, 52: 58, 53: 59, 54: 60, 55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72, 63: 73, 64: 74, 65: 75, 66: 76, 67: 77, 68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85, 75: 86, 76: 87, 77: 88, 78: 89, 79: 90}
|
| 83 |
+
|
| 84 |
+
# build a mapping from label_id to pos_map
|
| 85 |
+
new_pos_map = torch.zeros((91, 256))
|
| 86 |
+
for k, v in id_map.items():
|
| 87 |
+
new_pos_map[v] = positive_map[k]
|
| 88 |
+
self.positive_map = new_pos_map
|
| 89 |
+
|
| 90 |
+
@torch.no_grad()
|
| 91 |
+
def forward(self, outputs, target_sizes, not_to_xyxy=False):
|
| 92 |
+
""" Perform the computation
|
| 93 |
+
Parameters:
|
| 94 |
+
outputs: raw outputs of the model
|
| 95 |
+
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
|
| 96 |
+
For evaluation, this must be the original image size (before any data augmentation)
|
| 97 |
+
For visualization, this should be the image size after data augment, but before padding
|
| 98 |
+
"""
|
| 99 |
+
num_select = self.num_select
|
| 100 |
+
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
|
| 101 |
+
|
| 102 |
+
# pos map to logit
|
| 103 |
+
prob_to_token = out_logits.sigmoid() # bs, 100, 256
|
| 104 |
+
pos_maps = self.positive_map.to(prob_to_token.device)
|
| 105 |
+
# (bs, 100, 256) @ (91, 256).T -> (bs, 100, 91)
|
| 106 |
+
prob_to_label = prob_to_token @ pos_maps.T
|
| 107 |
+
|
| 108 |
+
# if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
|
| 109 |
+
# import ipdb; ipdb.set_trace()
|
| 110 |
+
|
| 111 |
+
assert len(out_logits) == len(target_sizes)
|
| 112 |
+
assert target_sizes.shape[1] == 2
|
| 113 |
+
|
| 114 |
+
prob = prob_to_label
|
| 115 |
+
topk_values, topk_indexes = torch.topk(
|
| 116 |
+
prob.view(out_logits.shape[0], -1), num_select, dim=1)
|
| 117 |
+
scores = topk_values
|
| 118 |
+
topk_boxes = topk_indexes // prob.shape[2]
|
| 119 |
+
labels = topk_indexes % prob.shape[2]
|
| 120 |
+
|
| 121 |
+
if not_to_xyxy:
|
| 122 |
+
boxes = out_bbox
|
| 123 |
+
else:
|
| 124 |
+
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
|
| 125 |
+
|
| 126 |
+
boxes = torch.gather(
|
| 127 |
+
boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
|
| 128 |
+
|
| 129 |
+
# and from relative [0, 1] to absolute [0, height] coordinates
|
| 130 |
+
img_h, img_w = target_sizes.unbind(1)
|
| 131 |
+
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
|
| 132 |
+
boxes = boxes * scale_fct[:, None, :]
|
| 133 |
+
|
| 134 |
+
results = [{'scores': s, 'labels': l, 'boxes': b}
|
| 135 |
+
for s, l, b in zip(scores, labels, boxes)]
|
| 136 |
+
|
| 137 |
+
return results
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def main(args):
|
| 141 |
+
# config
|
| 142 |
+
cfg = SLConfig.fromfile(args.config_file)
|
| 143 |
+
|
| 144 |
+
# build model
|
| 145 |
+
model = load_model(args.config_file, args.checkpoint_path)
|
| 146 |
+
model = model.to(args.device)
|
| 147 |
+
model = model.eval()
|
| 148 |
+
|
| 149 |
+
# build dataloader
|
| 150 |
+
transform = T.Compose(
|
| 151 |
+
[
|
| 152 |
+
T.RandomResize([800], max_size=1333),
|
| 153 |
+
T.ToTensor(),
|
| 154 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
| 155 |
+
]
|
| 156 |
+
)
|
| 157 |
+
dataset = CocoDetection(
|
| 158 |
+
args.image_dir, args.anno_path, transforms=transform)
|
| 159 |
+
data_loader = DataLoader(
|
| 160 |
+
dataset, batch_size=1, shuffle=False, num_workers=args.num_workers, collate_fn=collate_fn)
|
| 161 |
+
|
| 162 |
+
# build post processor
|
| 163 |
+
tokenlizer = get_tokenlizer.get_tokenlizer(cfg.text_encoder_type)
|
| 164 |
+
postprocessor = PostProcessCocoGrounding(
|
| 165 |
+
coco_api=dataset.coco, tokenlizer=tokenlizer)
|
| 166 |
+
|
| 167 |
+
# build evaluator
|
| 168 |
+
evaluator = CocoGroundingEvaluator(
|
| 169 |
+
dataset.coco, iou_types=("bbox",), useCats=True)
|
| 170 |
+
|
| 171 |
+
# build captions
|
| 172 |
+
category_dict = dataset.coco.dataset['categories']
|
| 173 |
+
cat_list = [item['name'] for item in category_dict]
|
| 174 |
+
caption = " . ".join(cat_list) + ' .'
|
| 175 |
+
print("Input text prompt:", caption)
|
| 176 |
+
|
| 177 |
+
# run inference
|
| 178 |
+
start = time.time()
|
| 179 |
+
for i, (images, targets) in enumerate(data_loader):
|
| 180 |
+
# get images and captions
|
| 181 |
+
images = images.tensors.to(args.device)
|
| 182 |
+
bs = images.shape[0]
|
| 183 |
+
input_captions = [caption] * bs
|
| 184 |
+
|
| 185 |
+
# feed to the model
|
| 186 |
+
outputs = model(images, captions=input_captions)
|
| 187 |
+
|
| 188 |
+
orig_target_sizes = torch.stack(
|
| 189 |
+
[t["orig_size"] for t in targets], dim=0).to(images.device)
|
| 190 |
+
results = postprocessor(outputs, orig_target_sizes)
|
| 191 |
+
cocogrounding_res = {
|
| 192 |
+
target["image_id"]: output for target, output in zip(targets, results)}
|
| 193 |
+
evaluator.update(cocogrounding_res)
|
| 194 |
+
|
| 195 |
+
if (i+1) % 30 == 0:
|
| 196 |
+
used_time = time.time() - start
|
| 197 |
+
eta = len(data_loader) / (i+1e-5) * used_time - used_time
|
| 198 |
+
print(
|
| 199 |
+
f"processed {i}/{len(data_loader)} images. time: {used_time:.2f}s, ETA: {eta:.2f}s")
|
| 200 |
+
|
| 201 |
+
evaluator.synchronize_between_processes()
|
| 202 |
+
evaluator.accumulate()
|
| 203 |
+
evaluator.summarize()
|
| 204 |
+
|
| 205 |
+
print("Final results:", evaluator.coco_eval["bbox"].stats.tolist())
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
if __name__ == "__main__":
|
| 209 |
+
parser = argparse.ArgumentParser(
|
| 210 |
+
"Grounding DINO eval on COCO", add_help=True)
|
| 211 |
+
# load model
|
| 212 |
+
parser.add_argument("--config_file", "-c", type=str,
|
| 213 |
+
required=True, help="path to config file")
|
| 214 |
+
parser.add_argument(
|
| 215 |
+
"--checkpoint_path", "-p", type=str, required=True, help="path to checkpoint file"
|
| 216 |
+
)
|
| 217 |
+
parser.add_argument("--device", type=str, default="cuda",
|
| 218 |
+
help="running device (default: cuda)")
|
| 219 |
+
|
| 220 |
+
# post processing
|
| 221 |
+
parser.add_argument("--num_select", type=int, default=300,
|
| 222 |
+
help="number of topk to select")
|
| 223 |
+
|
| 224 |
+
# coco info
|
| 225 |
+
parser.add_argument("--anno_path", type=str,
|
| 226 |
+
required=True, help="coco root")
|
| 227 |
+
parser.add_argument("--image_dir", type=str,
|
| 228 |
+
required=True, help="coco image dir")
|
| 229 |
+
parser.add_argument("--num_workers", type=int, default=4,
|
| 230 |
+
help="number of workers for dataloader")
|
| 231 |
+
args = parser.parse_args()
|
| 232 |
+
|
| 233 |
+
main(args)
|
tool/detector.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
import PIL
|
| 5 |
+
|
| 6 |
+
from groundingdino.models import build_model as build_grounding_dino
|
| 7 |
+
from groundingdino.util.slconfig import SLConfig
|
| 8 |
+
from groundingdino.util.utils import clean_state_dict
|
| 9 |
+
from groundingdino.util.inference import annotate, load_image, predict
|
| 10 |
+
import groundingdino.datasets.transforms as T
|
| 11 |
+
|
| 12 |
+
from torchvision.ops import box_convert
|
| 13 |
+
|
| 14 |
+
class Detector:
|
| 15 |
+
def __init__(self, device):
|
| 16 |
+
config_file = "src/groundingdino/groundingdino/config/GroundingDINO_SwinT_OGC.py"
|
| 17 |
+
grounding_dino_ckpt = './ckpt/groundingdino_swint_ogc.pth'
|
| 18 |
+
args = SLConfig.fromfile(config_file)
|
| 19 |
+
args.device = device
|
| 20 |
+
self.deivce = device
|
| 21 |
+
self.gd = build_grounding_dino(args)
|
| 22 |
+
|
| 23 |
+
checkpoint = torch.load(grounding_dino_ckpt, map_location='cpu')
|
| 24 |
+
log = self.gd.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
|
| 25 |
+
print("Model loaded from {} \n => {}".format(grounding_dino_ckpt, log))
|
| 26 |
+
self.gd.eval()
|
| 27 |
+
|
| 28 |
+
def image_transform_grounding(self, init_image):
|
| 29 |
+
transform = T.Compose([
|
| 30 |
+
T.RandomResize([800], max_size=1333),
|
| 31 |
+
T.ToTensor(),
|
| 32 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
| 33 |
+
])
|
| 34 |
+
image, _ = transform(init_image, None) # 3, h, w
|
| 35 |
+
return init_image, image
|
| 36 |
+
|
| 37 |
+
def image_transform_grounding_for_vis(self, init_image):
|
| 38 |
+
transform = T.Compose([
|
| 39 |
+
T.RandomResize([800], max_size=1333),
|
| 40 |
+
])
|
| 41 |
+
image, _ = transform(init_image, None) # 3, h, w
|
| 42 |
+
return image
|
| 43 |
+
|
| 44 |
+
def transfer_boxes_format(self, boxes, height, width):
|
| 45 |
+
boxes = boxes * torch.Tensor([width, height, width, height])
|
| 46 |
+
boxes = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy")
|
| 47 |
+
|
| 48 |
+
transfered_boxes = []
|
| 49 |
+
for i in range(len(boxes)):
|
| 50 |
+
box = boxes[i]
|
| 51 |
+
transfered_box = [[int(box[0]), int(box[1])], [int(box[2]), int(box[3])]]
|
| 52 |
+
transfered_boxes.append(transfered_box)
|
| 53 |
+
|
| 54 |
+
transfered_boxes = np.array(transfered_boxes)
|
| 55 |
+
return transfered_boxes
|
| 56 |
+
|
| 57 |
+
@torch.no_grad()
|
| 58 |
+
def run_grounding(self, origin_frame, grounding_caption, box_threshold, text_threshold):
|
| 59 |
+
'''
|
| 60 |
+
return:
|
| 61 |
+
annotated_frame:nd.array
|
| 62 |
+
transfered_boxes: nd.array [N, 4]: [[x0, y0], [x1, y1]]
|
| 63 |
+
'''
|
| 64 |
+
height, width, _ = origin_frame.shape
|
| 65 |
+
img_pil = PIL.Image.fromarray(origin_frame)
|
| 66 |
+
re_width, re_height = img_pil.size
|
| 67 |
+
_, image_tensor = self.image_transform_grounding(img_pil)
|
| 68 |
+
# img_pil = self.image_transform_grounding_for_vis(img_pil)
|
| 69 |
+
|
| 70 |
+
# run grounidng
|
| 71 |
+
boxes, logits, phrases = predict(self.gd, image_tensor, grounding_caption, box_threshold, text_threshold, device=self.deivce)
|
| 72 |
+
annotated_frame = annotate(image_source=np.asarray(img_pil), boxes=boxes, logits=logits, phrases=phrases)[:, :, ::-1]
|
| 73 |
+
annotated_frame = cv2.resize(annotated_frame, (width, height), interpolation=cv2.INTER_LINEAR)
|
| 74 |
+
|
| 75 |
+
# transfer boxes to sam-format
|
| 76 |
+
transfered_boxes = self.transfer_boxes_format(boxes, re_height, re_width)
|
| 77 |
+
return annotated_frame, transfered_boxes
|
| 78 |
+
|
| 79 |
+
if __name__ == "__main__":
|
| 80 |
+
detector = Detector("cuda")
|
| 81 |
+
origin_frame = cv2.imread('./debug/point.png')
|
| 82 |
+
origin_frame = cv2.cvtColor(origin_frame, cv2.COLOR_BGR2RGB)
|
| 83 |
+
grounding_caption = "swan.water"
|
| 84 |
+
box_threshold = 0.25
|
| 85 |
+
text_threshold = 0.25
|
| 86 |
+
|
| 87 |
+
annotated_frame, boxes = detector.run_grounding(origin_frame, grounding_caption, box_threshold, text_threshold)
|
| 88 |
+
cv2.imwrite('./debug/x.png', annotated_frame)
|
| 89 |
+
|
| 90 |
+
for i in range(len(boxes)):
|
| 91 |
+
bbox = boxes[i]
|
| 92 |
+
origin_frame = cv2.rectangle(origin_frame, bbox[0], bbox[1], (0, 0, 255))
|
| 93 |
+
cv2.imwrite('./debug/bbox_frame.png', origin_frame)
|
tool/environment.yaml
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: dino
|
| 2 |
+
channels:
|
| 3 |
+
- pytorch
|
| 4 |
+
- nvidia
|
| 5 |
+
- conda-forge
|
| 6 |
+
- defaults
|
| 7 |
+
dependencies:
|
| 8 |
+
- addict=2.4.0=pyhd8ed1ab_2
|
| 9 |
+
- aiohttp=3.8.5=py39ha55989b_0
|
| 10 |
+
- aiosignal=1.3.1=pyhd8ed1ab_0
|
| 11 |
+
- asttokens=2.0.5=pyhd3eb1b0_0
|
| 12 |
+
- async-timeout=4.0.3=pyhd8ed1ab_0
|
| 13 |
+
- attrs=23.1.0=pyh71513ae_1
|
| 14 |
+
- aws-c-auth=0.7.0=h6f3c987_2
|
| 15 |
+
- aws-c-cal=0.6.0=h6ba3258_0
|
| 16 |
+
- aws-c-common=0.8.23=hcfcfb64_0
|
| 17 |
+
- aws-c-compression=0.2.17=h420beca_1
|
| 18 |
+
- aws-c-event-stream=0.3.1=had47b81_1
|
| 19 |
+
- aws-c-http=0.7.11=h72ba615_0
|
| 20 |
+
- aws-c-io=0.13.28=ha35c040_0
|
| 21 |
+
- aws-c-mqtt=0.8.14=h4941efa_2
|
| 22 |
+
- aws-c-s3=0.3.13=he04eaa7_2
|
| 23 |
+
- aws-c-sdkutils=0.1.11=h420beca_1
|
| 24 |
+
- aws-checksums=0.1.16=h420beca_1
|
| 25 |
+
- aws-crt-cpp=0.20.3=h247a981_4
|
| 26 |
+
- aws-sdk-cpp=1.10.57=h1a0519f_17
|
| 27 |
+
- backcall=0.2.0=pyhd3eb1b0_0
|
| 28 |
+
- blas=2.118=mkl
|
| 29 |
+
- blas-devel=3.9.0=18_win64_mkl
|
| 30 |
+
- brotli=1.0.9=hcfcfb64_9
|
| 31 |
+
- brotli-bin=1.0.9=hcfcfb64_9
|
| 32 |
+
- brotli-python=1.0.9=py39h99910a6_9
|
| 33 |
+
- bzip2=1.0.8=h8ffe710_4
|
| 34 |
+
- c-ares=1.19.1=hcfcfb64_0
|
| 35 |
+
- ca-certificates=2023.08.22=haa95532_0
|
| 36 |
+
- certifi=2023.7.22=py39haa95532_0
|
| 37 |
+
- charset-normalizer=3.2.0=pyhd8ed1ab_0
|
| 38 |
+
- click=8.1.7=win_pyh7428d3b_0
|
| 39 |
+
- colorama=0.4.6=pyhd8ed1ab_0
|
| 40 |
+
- comm=0.1.2=py39haa95532_0
|
| 41 |
+
- contourpy=1.1.1=py39h1f6ef14_1
|
| 42 |
+
- cuda-cccl=12.2.140=0
|
| 43 |
+
- cuda-cudart=11.8.89=0
|
| 44 |
+
- cuda-cudart-dev=11.8.89=0
|
| 45 |
+
- cuda-cupti=11.8.87=0
|
| 46 |
+
- cuda-libraries=11.8.0=0
|
| 47 |
+
- cuda-libraries-dev=11.8.0=0
|
| 48 |
+
- cuda-nvrtc=11.8.89=0
|
| 49 |
+
- cuda-nvrtc-dev=11.8.89=0
|
| 50 |
+
- cuda-nvtx=11.8.86=0
|
| 51 |
+
- cuda-profiler-api=12.2.140=0
|
| 52 |
+
- cuda-runtime=11.8.0=0
|
| 53 |
+
- cycler=0.11.0=pyhd8ed1ab_0
|
| 54 |
+
- cython=3.0.0=py39h2bbff1b_0
|
| 55 |
+
- dataclasses=0.8=pyhc8e2a94_3
|
| 56 |
+
- datasets=2.14.5=pyhd8ed1ab_0
|
| 57 |
+
- debugpy=1.6.7=py39hd77b12b_0
|
| 58 |
+
- decorator=5.1.1=pyhd3eb1b0_0
|
| 59 |
+
- dill=0.3.7=pyhd8ed1ab_0
|
| 60 |
+
- exceptiongroup=1.0.4=py39haa95532_0
|
| 61 |
+
- executing=0.8.3=pyhd3eb1b0_0
|
| 62 |
+
- filelock=3.12.4=pyhd8ed1ab_0
|
| 63 |
+
- fonttools=4.42.1=py39ha55989b_0
|
| 64 |
+
- freeglut=3.2.2=h63175ca_2
|
| 65 |
+
- freetype=2.12.1=hdaf720e_2
|
| 66 |
+
- frozenlist=1.4.0=py39ha55989b_1
|
| 67 |
+
- fsspec=2023.6.0=pyh1a96a4e_0
|
| 68 |
+
- gettext=0.21.1=h5728263_0
|
| 69 |
+
- glib=2.78.0=h12be248_0
|
| 70 |
+
- glib-tools=2.78.0=h12be248_0
|
| 71 |
+
- gst-plugins-base=1.22.6=h001b923_1
|
| 72 |
+
- gstreamer=1.22.6=hb4038d2_1
|
| 73 |
+
- huggingface_hub=0.17.3=pyhd8ed1ab_0
|
| 74 |
+
- icu=70.1=h0e60522_0
|
| 75 |
+
- idna=3.4=pyhd8ed1ab_0
|
| 76 |
+
- importlib-metadata=6.8.0=pyha770c72_0
|
| 77 |
+
- importlib-resources=6.1.0=pyhd8ed1ab_0
|
| 78 |
+
- importlib_metadata=6.8.0=hd8ed1ab_0
|
| 79 |
+
- importlib_resources=6.1.0=pyhd8ed1ab_0
|
| 80 |
+
- intel-openmp=2023.2.0=h57928b3_49503
|
| 81 |
+
- ipykernel=6.25.0=py39h9909e9c_0
|
| 82 |
+
- ipython=8.15.0=py39haa95532_0
|
| 83 |
+
- jasper=2.0.33=hc2e4405_1
|
| 84 |
+
- jedi=0.18.1=py39haa95532_1
|
| 85 |
+
- jinja2=3.1.2=pyhd8ed1ab_1
|
| 86 |
+
- joblib=1.3.2=pyhd8ed1ab_0
|
| 87 |
+
- jpeg=9e=hcfcfb64_3
|
| 88 |
+
- jupyter_client=8.1.0=py39haa95532_0
|
| 89 |
+
- jupyter_core=5.3.0=py39haa95532_0
|
| 90 |
+
- kiwisolver=1.4.5=py39h1f6ef14_1
|
| 91 |
+
- krb5=1.20.1=heb0366b_0
|
| 92 |
+
- lcms2=2.14=h90d422f_0
|
| 93 |
+
- lerc=4.0.0=h63175ca_0
|
| 94 |
+
- libabseil=20230125.3=cxx17_h63175ca_0
|
| 95 |
+
- libarrow=12.0.1=h12e5d06_5_cpu
|
| 96 |
+
- libblas=3.9.0=18_win64_mkl
|
| 97 |
+
- libbrotlicommon=1.0.9=hcfcfb64_9
|
| 98 |
+
- libbrotlidec=1.0.9=hcfcfb64_9
|
| 99 |
+
- libbrotlienc=1.0.9=hcfcfb64_9
|
| 100 |
+
- libcblas=3.9.0=18_win64_mkl
|
| 101 |
+
- libclang=15.0.7=default_h77d9078_3
|
| 102 |
+
- libclang13=15.0.7=default_h77d9078_3
|
| 103 |
+
- libcrc32c=1.1.2=h0e60522_0
|
| 104 |
+
- libcublas=11.11.3.6=0
|
| 105 |
+
- libcublas-dev=11.11.3.6=0
|
| 106 |
+
- libcufft=10.9.0.58=0
|
| 107 |
+
- libcufft-dev=10.9.0.58=0
|
| 108 |
+
- libcurand=10.3.3.141=0
|
| 109 |
+
- libcurand-dev=10.3.3.141=0
|
| 110 |
+
- libcurl=8.1.2=h68f0423_0
|
| 111 |
+
- libcusolver=11.4.1.48=0
|
| 112 |
+
- libcusolver-dev=11.4.1.48=0
|
| 113 |
+
- libcusparse=11.7.5.86=0
|
| 114 |
+
- libcusparse-dev=11.7.5.86=0
|
| 115 |
+
- libdeflate=1.14=hcfcfb64_0
|
| 116 |
+
- libevent=2.1.12=h3671451_1
|
| 117 |
+
- libffi=3.4.2=h8ffe710_5
|
| 118 |
+
- libglib=2.78.0=he8f3873_0
|
| 119 |
+
- libgoogle-cloud=2.12.0=h00b2bdc_1
|
| 120 |
+
- libgrpc=1.54.3=ha177ca7_0
|
| 121 |
+
- libhwloc=2.9.3=default_haede6df_1009
|
| 122 |
+
- libiconv=1.17=h8ffe710_0
|
| 123 |
+
- liblapack=3.9.0=18_win64_mkl
|
| 124 |
+
- liblapacke=3.9.0=18_win64_mkl
|
| 125 |
+
- libnpp=11.8.0.86=0
|
| 126 |
+
- libnpp-dev=11.8.0.86=0
|
| 127 |
+
- libnvjpeg=11.9.0.86=0
|
| 128 |
+
- libnvjpeg-dev=11.9.0.86=0
|
| 129 |
+
- libogg=1.3.4=h8ffe710_1
|
| 130 |
+
- libopencv=4.5.3=py39h488c12c_8
|
| 131 |
+
- libpng=1.6.39=h19919ed_0
|
| 132 |
+
- libprotobuf=3.21.12=h12be248_2
|
| 133 |
+
- libsodium=1.0.18=h62dcd97_0
|
| 134 |
+
- libsqlite=3.43.0=hcfcfb64_0
|
| 135 |
+
- libssh2=1.11.0=h7dfc565_0
|
| 136 |
+
- libthrift=0.18.1=h06f6336_2
|
| 137 |
+
- libtiff=4.4.0=hc4f729c_5
|
| 138 |
+
- libutf8proc=2.8.0=h82a8f57_0
|
| 139 |
+
- libuv=1.44.2=hcfcfb64_1
|
| 140 |
+
- libvorbis=1.3.7=h0e60522_0
|
| 141 |
+
- libwebp-base=1.3.2=hcfcfb64_0
|
| 142 |
+
- libxcb=1.13=hcd874cb_1004
|
| 143 |
+
- libxml2=2.11.5=hc3477c8_1
|
| 144 |
+
- libzlib=1.2.13=hcfcfb64_5
|
| 145 |
+
- lz4-c=1.9.4=hcfcfb64_0
|
| 146 |
+
- m2w64-gcc-libgfortran=5.3.0=6
|
| 147 |
+
- m2w64-gcc-libs=5.3.0=7
|
| 148 |
+
- m2w64-gcc-libs-core=5.3.0=7
|
| 149 |
+
- m2w64-gmp=6.1.0=2
|
| 150 |
+
- m2w64-libwinpthread-git=5.0.0.4634.697f757=2
|
| 151 |
+
- markupsafe=2.1.3=py39ha55989b_1
|
| 152 |
+
- matplotlib-base=3.8.0=py39hf19769e_1
|
| 153 |
+
- matplotlib-inline=0.1.6=py39haa95532_0
|
| 154 |
+
- mkl=2022.1.0=h6a75c08_874
|
| 155 |
+
- mkl-devel=2022.1.0=h57928b3_875
|
| 156 |
+
- mkl-include=2022.1.0=h6a75c08_874
|
| 157 |
+
- mpmath=1.3.0=pyhd8ed1ab_0
|
| 158 |
+
- msys2-conda-epoch=20160418=1
|
| 159 |
+
- multidict=6.0.4=py39ha55989b_0
|
| 160 |
+
- multiprocess=0.70.15=py39ha55989b_1
|
| 161 |
+
- munkres=1.1.4=pyh9f0ad1d_0
|
| 162 |
+
- nest-asyncio=1.5.6=py39haa95532_0
|
| 163 |
+
- networkx=3.1=pyhd8ed1ab_0
|
| 164 |
+
- numpy=1.26.0=py39hddb5d58_0
|
| 165 |
+
- opencv=4.5.3=py39hcbf5309_8
|
| 166 |
+
- openjpeg=2.5.0=hc9384bd_1
|
| 167 |
+
- openssl=3.1.3=hcfcfb64_0
|
| 168 |
+
- orc=1.9.0=hada7b9e_1
|
| 169 |
+
- packaging=23.1=pyhd8ed1ab_0
|
| 170 |
+
- pandas=2.1.1=py39h32e6231_0
|
| 171 |
+
- parso=0.8.3=pyhd3eb1b0_0
|
| 172 |
+
- pcre2=10.40=h17e33f8_0
|
| 173 |
+
- pickleshare=0.7.5=pyhd3eb1b0_1003
|
| 174 |
+
- pillow=9.2.0=py39h595c93f_3
|
| 175 |
+
- pip=23.2.1=pyhd8ed1ab_0
|
| 176 |
+
- platformdirs=3.10.0=pyhd8ed1ab_0
|
| 177 |
+
- prompt-toolkit=3.0.36=py39haa95532_0
|
| 178 |
+
- psutil=5.9.0=py39h2bbff1b_0
|
| 179 |
+
- pthread-stubs=0.4=hcd874cb_1001
|
| 180 |
+
- pthreads-win32=2.9.1=hfa6e2cd_3
|
| 181 |
+
- pure_eval=0.2.2=pyhd3eb1b0_0
|
| 182 |
+
- py-opencv=4.5.3=py39h00e5391_8
|
| 183 |
+
- pyarrow=12.0.1=py39hca4e8af_5_cpu
|
| 184 |
+
- pycocotools=2.0.6=py39hc266a54_1
|
| 185 |
+
- pygments=2.15.1=py39haa95532_1
|
| 186 |
+
- pyparsing=3.1.1=pyhd8ed1ab_0
|
| 187 |
+
- pysocks=1.7.1=pyh0701188_6
|
| 188 |
+
- python=3.9.18=h4de0772_0_cpython
|
| 189 |
+
- python-dateutil=2.8.2=pyhd8ed1ab_0
|
| 190 |
+
- python-tzdata=2023.3=pyhd8ed1ab_0
|
| 191 |
+
- python-xxhash=3.3.0=py39ha55989b_1
|
| 192 |
+
- python_abi=3.9=4_cp39
|
| 193 |
+
- pytorch=2.0.1=py3.9_cuda11.8_cudnn8_0
|
| 194 |
+
- pytorch-cuda=11.8=h24eeafa_5
|
| 195 |
+
- pytorch-mutex=1.0=cuda
|
| 196 |
+
- pytz=2023.3.post1=pyhd8ed1ab_0
|
| 197 |
+
- pywin32=305=py39h2bbff1b_0
|
| 198 |
+
- pyyaml=6.0.1=py39ha55989b_1
|
| 199 |
+
- pyzmq=25.1.0=py39hd77b12b_0
|
| 200 |
+
- qt-main=5.15.8=h720456b_6
|
| 201 |
+
- re2=2023.03.02=hd4eee63_0
|
| 202 |
+
- regex=2023.8.8=py39ha55989b_1
|
| 203 |
+
- requests=2.31.0=pyhd8ed1ab_0
|
| 204 |
+
- sacremoses=0.0.53=pyhd8ed1ab_0
|
| 205 |
+
- safetensors=0.3.3=py39hf21820d_1
|
| 206 |
+
- setuptools=68.2.2=pyhd8ed1ab_0
|
| 207 |
+
- six=1.16.0=pyh6c4a22f_0
|
| 208 |
+
- snappy=1.1.10=hfb803bf_0
|
| 209 |
+
- stack_data=0.2.0=pyhd3eb1b0_0
|
| 210 |
+
- sympy=1.12=pyh04b8f61_3
|
| 211 |
+
- tbb=2021.10.0=h91493d7_1
|
| 212 |
+
- timm=0.9.7=pyhd8ed1ab_0
|
| 213 |
+
- tk=8.6.13=hcfcfb64_0
|
| 214 |
+
- tokenizers=0.13.3=py39hca44cb7_0
|
| 215 |
+
- tomli=2.0.1=pyhd8ed1ab_0
|
| 216 |
+
- tornado=6.3.2=py39h2bbff1b_0
|
| 217 |
+
- tqdm=4.66.1=pyhd8ed1ab_0
|
| 218 |
+
- traitlets=5.7.1=py39haa95532_0
|
| 219 |
+
- transformers=4.33.2=pyhd8ed1ab_0
|
| 220 |
+
- typing-extensions=4.8.0=hd8ed1ab_0
|
| 221 |
+
- typing_extensions=4.8.0=pyha770c72_0
|
| 222 |
+
- tzdata=2023c=h71feb2d_0
|
| 223 |
+
- ucrt=10.0.22621.0=h57928b3_0
|
| 224 |
+
- unicodedata2=15.0.0=py39ha55989b_1
|
| 225 |
+
- urllib3=2.0.5=pyhd8ed1ab_0
|
| 226 |
+
- vc=14.3=h64f974e_17
|
| 227 |
+
- vc14_runtime=14.36.32532=hdcecf7f_17
|
| 228 |
+
- vs2015_runtime=14.36.32532=h05e6639_17
|
| 229 |
+
- wcwidth=0.2.5=pyhd3eb1b0_0
|
| 230 |
+
- wheel=0.41.2=pyhd8ed1ab_0
|
| 231 |
+
- win_inet_pton=1.1.0=pyhd8ed1ab_6
|
| 232 |
+
- xorg-libxau=1.0.11=hcd874cb_0
|
| 233 |
+
- xorg-libxdmcp=1.1.3=hcd874cb_0
|
| 234 |
+
- xxhash=0.8.2=hcfcfb64_0
|
| 235 |
+
- xz=5.2.6=h8d14728_0
|
| 236 |
+
- yaml=0.2.5=h8ffe710_2
|
| 237 |
+
- yapf=0.40.1=pyhd8ed1ab_0
|
| 238 |
+
- yarl=1.9.2=py39ha55989b_0
|
| 239 |
+
- zeromq=4.3.4=hd77b12b_0
|
| 240 |
+
- zipp=3.17.0=pyhd8ed1ab_0
|
| 241 |
+
- zlib=1.2.13=hcfcfb64_5
|
| 242 |
+
- zstd=1.5.5=h12be248_0
|
| 243 |
+
- pip:
|
| 244 |
+
- opencv-python==4.8.0.76
|
| 245 |
+
- supervision==0.6.0
|
| 246 |
+
- torchaudio==2.0.2
|
| 247 |
+
- torchvision==0.15.2
|
| 248 |
+
prefix: C:\Users\Makoto\miniconda3\envs\dino
|
tool/groundingdino.egg-info/PKG-INFO
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: groundingdino
|
| 3 |
+
Version: 0.1.0
|
| 4 |
+
Summary: open-set object detector
|
| 5 |
+
Home-page: https://github.com/IDEA-Research/GroundingDINO
|
| 6 |
+
Author: International Digital Economy Academy, Shilong Liu
|
| 7 |
+
License: Apache License
|
| 8 |
+
Version 2.0, January 2004
|
| 9 |
+
http://www.apache.org/licenses/
|
| 10 |
+
|
| 11 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 12 |
+
|
| 13 |
+
1. Definitions.
|
| 14 |
+
|
| 15 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 16 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 17 |
+
|
| 18 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 19 |
+
the copyright owner that is granting the License.
|
| 20 |
+
|
| 21 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 22 |
+
other entities that control, are controlled by, or are under common
|
| 23 |
+
control with that entity. For the purposes of this definition,
|
| 24 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 25 |
+
direction or management of such entity, whether by contract or
|
| 26 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 27 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 28 |
+
|
| 29 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 30 |
+
exercising permissions granted by this License.
|
| 31 |
+
|
| 32 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 33 |
+
including but not limited to software source code, documentation
|
| 34 |
+
source, and configuration files.
|
| 35 |
+
|
| 36 |
+
"Object" form shall mean any form resulting from mechanical
|
| 37 |
+
transformation or translation of a Source form, including but
|
| 38 |
+
not limited to compiled object code, generated documentation,
|
| 39 |
+
and conversions to other media types.
|
| 40 |
+
|
| 41 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 42 |
+
Object form, made available under the License, as indicated by a
|
| 43 |
+
copyright notice that is included in or attached to the work
|
| 44 |
+
(an example is provided in the Appendix below).
|
| 45 |
+
|
| 46 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 47 |
+
form, that is based on (or derived from) the Work and for which the
|
| 48 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 49 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 50 |
+
of this License, Derivative Works shall not include works that remain
|
| 51 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 52 |
+
the Work and Derivative Works thereof.
|
| 53 |
+
|
| 54 |
+
"Contribution" shall mean any work of authorship, including
|
| 55 |
+
the original version of the Work and any modifications or additions
|
| 56 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 57 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 58 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 59 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 60 |
+
means any form of electronic, verbal, or written communication sent
|
| 61 |
+
to the Licensor or its representatives, including but not limited to
|
| 62 |
+
communication on electronic mailing lists, source code control systems,
|
| 63 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 64 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 65 |
+
excluding communication that is conspicuously marked or otherwise
|
| 66 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 67 |
+
|
| 68 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 69 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 70 |
+
subsequently incorporated within the Work.
|
| 71 |
+
|
| 72 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 73 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 74 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 75 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 76 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 77 |
+
Work and such Derivative Works in Source or Object form.
|
| 78 |
+
|
| 79 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 80 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 81 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 82 |
+
(except as stated in this section) patent license to make, have made,
|
| 83 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 84 |
+
where such license applies only to those patent claims licensable
|
| 85 |
+
by such Contributor that are necessarily infringed by their
|
| 86 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 87 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 88 |
+
institute patent litigation against any entity (including a
|
| 89 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 90 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 91 |
+
or contributory patent infringement, then any patent licenses
|
| 92 |
+
granted to You under this License for that Work shall terminate
|
| 93 |
+
as of the date such litigation is filed.
|
| 94 |
+
|
| 95 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 96 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 97 |
+
modifications, and in Source or Object form, provided that You
|
| 98 |
+
meet the following conditions:
|
| 99 |
+
|
| 100 |
+
(a) You must give any other recipients of the Work or
|
| 101 |
+
Derivative Works a copy of this License; and
|
| 102 |
+
|
| 103 |
+
(b) You must cause any modified files to carry prominent notices
|
| 104 |
+
stating that You changed the files; and
|
| 105 |
+
|
| 106 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 107 |
+
that You distribute, all copyright, patent, trademark, and
|
| 108 |
+
attribution notices from the Source form of the Work,
|
| 109 |
+
excluding those notices that do not pertain to any part of
|
| 110 |
+
the Derivative Works; and
|
| 111 |
+
|
| 112 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 113 |
+
distribution, then any Derivative Works that You distribute must
|
| 114 |
+
include a readable copy of the attribution notices contained
|
| 115 |
+
within such NOTICE file, excluding those notices that do not
|
| 116 |
+
pertain to any part of the Derivative Works, in at least one
|
| 117 |
+
of the following places: within a NOTICE text file distributed
|
| 118 |
+
as part of the Derivative Works; within the Source form or
|
| 119 |
+
documentation, if provided along with the Derivative Works; or,
|
| 120 |
+
within a display generated by the Derivative Works, if and
|
| 121 |
+
wherever such third-party notices normally appear. The contents
|
| 122 |
+
of the NOTICE file are for informational purposes only and
|
| 123 |
+
do not modify the License. You may add Your own attribution
|
| 124 |
+
notices within Derivative Works that You distribute, alongside
|
| 125 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 126 |
+
that such additional attribution notices cannot be construed
|
| 127 |
+
as modifying the License.
|
| 128 |
+
|
| 129 |
+
You may add Your own copyright statement to Your modifications and
|
| 130 |
+
may provide additional or different license terms and conditions
|
| 131 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 132 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 133 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 134 |
+
the conditions stated in this License.
|
| 135 |
+
|
| 136 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 137 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 138 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 139 |
+
this License, without any additional terms or conditions.
|
| 140 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 141 |
+
the terms of any separate license agreement you may have executed
|
| 142 |
+
with Licensor regarding such Contributions.
|
| 143 |
+
|
| 144 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 145 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 146 |
+
except as required for reasonable and customary use in describing the
|
| 147 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 148 |
+
|
| 149 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 150 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 151 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 152 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 153 |
+
implied, including, without limitation, any warranties or conditions
|
| 154 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 155 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 156 |
+
appropriateness of using or redistributing the Work and assume any
|
| 157 |
+
risks associated with Your exercise of permissions under this License.
|
| 158 |
+
|
| 159 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 160 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 161 |
+
unless required by applicable law (such as deliberate and grossly
|
| 162 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 163 |
+
liable to You for damages, including any direct, indirect, special,
|
| 164 |
+
incidental, or consequential damages of any character arising as a
|
| 165 |
+
result of this License or out of the use or inability to use the
|
| 166 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 167 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 168 |
+
other commercial damages or losses), even if such Contributor
|
| 169 |
+
has been advised of the possibility of such damages.
|
| 170 |
+
|
| 171 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 172 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 173 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 174 |
+
or other liability obligations and/or rights consistent with this
|
| 175 |
+
License. However, in accepting such obligations, You may act only
|
| 176 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 177 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 178 |
+
defend, and hold each Contributor harmless for any liability
|
| 179 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 180 |
+
of your accepting any such warranty or additional liability.
|
| 181 |
+
|
| 182 |
+
END OF TERMS AND CONDITIONS
|
| 183 |
+
|
| 184 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 185 |
+
|
| 186 |
+
To apply the Apache License to your work, attach the following
|
| 187 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 188 |
+
replaced with your own identifying information. (Don't include
|
| 189 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 190 |
+
comment syntax for the file format. We also recommend that a
|
| 191 |
+
file or class name and description of purpose be included on the
|
| 192 |
+
same "printed page" as the copyright notice for easier
|
| 193 |
+
identification within third-party archives.
|
| 194 |
+
|
| 195 |
+
Copyright 2023 - present, IDEA Research.
|
| 196 |
+
|
| 197 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 198 |
+
you may not use this file except in compliance with the License.
|
| 199 |
+
You may obtain a copy of the License at
|
| 200 |
+
|
| 201 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 202 |
+
|
| 203 |
+
Unless required by applicable law or agreed to in writing, software
|
| 204 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 205 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 206 |
+
See the License for the specific language governing permissions and
|
| 207 |
+
limitations under the License.
|
| 208 |
+
|
| 209 |
+
License-File: LICENSE
|
tool/groundingdino.egg-info/SOURCES.txt
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LICENSE
|
| 2 |
+
README.md
|
| 3 |
+
setup.py
|
| 4 |
+
groundingdino/__init__.py
|
| 5 |
+
groundingdino/version.py
|
| 6 |
+
groundingdino.egg-info/PKG-INFO
|
| 7 |
+
groundingdino.egg-info/SOURCES.txt
|
| 8 |
+
groundingdino.egg-info/dependency_links.txt
|
| 9 |
+
groundingdino.egg-info/requires.txt
|
| 10 |
+
groundingdino.egg-info/top_level.txt
|
| 11 |
+
groundingdino/config/GroundingDINO_SwinB_cfg.py
|
| 12 |
+
groundingdino/config/GroundingDINO_SwinT_OGC.py
|
| 13 |
+
groundingdino/config/__init__.py
|
| 14 |
+
groundingdino/datasets/__init__.py
|
| 15 |
+
groundingdino/datasets/cocogrounding_eval.py
|
| 16 |
+
groundingdino/datasets/transforms.py
|
| 17 |
+
groundingdino/models/__init__.py
|
| 18 |
+
groundingdino/models/registry.py
|
| 19 |
+
groundingdino/models/GroundingDINO/__init__.py
|
| 20 |
+
groundingdino/models/GroundingDINO/bertwarper.py
|
| 21 |
+
groundingdino/models/GroundingDINO/fuse_modules.py
|
| 22 |
+
groundingdino/models/GroundingDINO/groundingdino.py
|
| 23 |
+
groundingdino/models/GroundingDINO/ms_deform_attn.py
|
| 24 |
+
groundingdino/models/GroundingDINO/transformer.py
|
| 25 |
+
groundingdino/models/GroundingDINO/transformer_vanilla.py
|
| 26 |
+
groundingdino/models/GroundingDINO/utils.py
|
| 27 |
+
groundingdino/models/GroundingDINO/backbone/__init__.py
|
| 28 |
+
groundingdino/models/GroundingDINO/backbone/backbone.py
|
| 29 |
+
groundingdino/models/GroundingDINO/backbone/position_encoding.py
|
| 30 |
+
groundingdino/models/GroundingDINO/backbone/swin_transformer.py
|
| 31 |
+
groundingdino/util/__init__.py
|
| 32 |
+
groundingdino/util/box_ops.py
|
| 33 |
+
groundingdino/util/get_tokenlizer.py
|
| 34 |
+
groundingdino/util/inference.py
|
| 35 |
+
groundingdino/util/logger.py
|
| 36 |
+
groundingdino/util/misc.py
|
| 37 |
+
groundingdino/util/slconfig.py
|
| 38 |
+
groundingdino/util/slio.py
|
| 39 |
+
groundingdino/util/time_counter.py
|
| 40 |
+
groundingdino/util/utils.py
|
| 41 |
+
groundingdino/util/visualizer.py
|
| 42 |
+
groundingdino/util/vl_utils.py
|
tool/groundingdino.egg-info/dependency_links.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
tool/groundingdino.egg-info/requires.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch
|
| 2 |
+
torchvision
|
| 3 |
+
transformers
|
| 4 |
+
addict
|
| 5 |
+
yapf
|
| 6 |
+
timm
|
| 7 |
+
numpy
|
| 8 |
+
opencv-python
|
| 9 |
+
supervision==0.6.0
|
| 10 |
+
pycocotools
|
tool/groundingdino.egg-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
groundingdino
|
tool/groundingdino/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
tool/groundingdino/__init__.py
ADDED
|
File without changes
|
tool/groundingdino/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (205 Bytes). View file
|
|
|
tool/groundingdino/config/GroundingDINO_SwinB_cfg.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size = 1
|
| 2 |
+
modelname = "groundingdino"
|
| 3 |
+
backbone = "swin_B_384_22k"
|
| 4 |
+
position_embedding = "sine"
|
| 5 |
+
pe_temperatureH = 20
|
| 6 |
+
pe_temperatureW = 20
|
| 7 |
+
return_interm_indices = [1, 2, 3]
|
| 8 |
+
backbone_freeze_keywords = None
|
| 9 |
+
enc_layers = 6
|
| 10 |
+
dec_layers = 6
|
| 11 |
+
pre_norm = False
|
| 12 |
+
dim_feedforward = 2048
|
| 13 |
+
hidden_dim = 256
|
| 14 |
+
dropout = 0.0
|
| 15 |
+
nheads = 8
|
| 16 |
+
num_queries = 900
|
| 17 |
+
query_dim = 4
|
| 18 |
+
num_patterns = 0
|
| 19 |
+
num_feature_levels = 4
|
| 20 |
+
enc_n_points = 4
|
| 21 |
+
dec_n_points = 4
|
| 22 |
+
two_stage_type = "standard"
|
| 23 |
+
two_stage_bbox_embed_share = False
|
| 24 |
+
two_stage_class_embed_share = False
|
| 25 |
+
transformer_activation = "relu"
|
| 26 |
+
dec_pred_bbox_embed_share = True
|
| 27 |
+
dn_box_noise_scale = 1.0
|
| 28 |
+
dn_label_noise_ratio = 0.5
|
| 29 |
+
dn_label_coef = 1.0
|
| 30 |
+
dn_bbox_coef = 1.0
|
| 31 |
+
embed_init_tgt = True
|
| 32 |
+
dn_labelbook_size = 2000
|
| 33 |
+
max_text_len = 256
|
| 34 |
+
text_encoder_type = "bert-base-uncased"
|
| 35 |
+
use_text_enhancer = True
|
| 36 |
+
use_fusion_layer = True
|
| 37 |
+
use_checkpoint = True
|
| 38 |
+
use_transformer_ckpt = True
|
| 39 |
+
use_text_cross_attention = True
|
| 40 |
+
text_dropout = 0.0
|
| 41 |
+
fusion_dropout = 0.0
|
| 42 |
+
fusion_droppath = 0.1
|
| 43 |
+
sub_sentence_present = True
|
tool/groundingdino/config/GroundingDINO_SwinT_OGC.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size = 1
|
| 2 |
+
modelname = "groundingdino"
|
| 3 |
+
backbone = "swin_T_224_1k"
|
| 4 |
+
position_embedding = "sine"
|
| 5 |
+
pe_temperatureH = 20
|
| 6 |
+
pe_temperatureW = 20
|
| 7 |
+
return_interm_indices = [1, 2, 3]
|
| 8 |
+
backbone_freeze_keywords = None
|
| 9 |
+
enc_layers = 6
|
| 10 |
+
dec_layers = 6
|
| 11 |
+
pre_norm = False
|
| 12 |
+
dim_feedforward = 2048
|
| 13 |
+
hidden_dim = 256
|
| 14 |
+
dropout = 0.0
|
| 15 |
+
nheads = 8
|
| 16 |
+
num_queries = 900
|
| 17 |
+
query_dim = 4
|
| 18 |
+
num_patterns = 0
|
| 19 |
+
num_feature_levels = 4
|
| 20 |
+
enc_n_points = 4
|
| 21 |
+
dec_n_points = 4
|
| 22 |
+
two_stage_type = "standard"
|
| 23 |
+
two_stage_bbox_embed_share = False
|
| 24 |
+
two_stage_class_embed_share = False
|
| 25 |
+
transformer_activation = "relu"
|
| 26 |
+
dec_pred_bbox_embed_share = True
|
| 27 |
+
dn_box_noise_scale = 1.0
|
| 28 |
+
dn_label_noise_ratio = 0.5
|
| 29 |
+
dn_label_coef = 1.0
|
| 30 |
+
dn_bbox_coef = 1.0
|
| 31 |
+
embed_init_tgt = True
|
| 32 |
+
dn_labelbook_size = 2000
|
| 33 |
+
max_text_len = 256
|
| 34 |
+
text_encoder_type = "bert-base-uncased"
|
| 35 |
+
use_text_enhancer = True
|
| 36 |
+
use_fusion_layer = True
|
| 37 |
+
use_checkpoint = True
|
| 38 |
+
use_transformer_ckpt = True
|
| 39 |
+
use_text_cross_attention = True
|
| 40 |
+
text_dropout = 0.0
|
| 41 |
+
fusion_dropout = 0.0
|
| 42 |
+
fusion_droppath = 0.1
|
| 43 |
+
sub_sentence_present = True
|
tool/groundingdino/config/__init__.py
ADDED
|
File without changes
|
tool/groundingdino/datasets/__init__.py
ADDED
|
File without changes
|
tool/groundingdino/datasets/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (214 Bytes). View file
|
|
|
tool/groundingdino/datasets/__pycache__/transforms.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
tool/groundingdino/datasets/cocogrounding_eval.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------
|
| 2 |
+
# Grounding DINO. Midified by Shilong Liu.
|
| 3 |
+
# url: https://github.com/IDEA-Research/GroundingDINO
|
| 4 |
+
# Copyright (c) 2023 IDEA. All Rights Reserved.
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 6 |
+
# ------------------------------------------------------------------------
|
| 7 |
+
# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
|
| 8 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 9 |
+
"""
|
| 10 |
+
COCO evaluator that works in distributed mode.
|
| 11 |
+
|
| 12 |
+
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
|
| 13 |
+
The difference is that there is less copy-pasting from pycocotools
|
| 14 |
+
in the end of the file, as python3 can suppress prints with contextlib
|
| 15 |
+
"""
|
| 16 |
+
import contextlib
|
| 17 |
+
import copy
|
| 18 |
+
import os
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import pycocotools.mask as mask_util
|
| 22 |
+
import torch
|
| 23 |
+
from pycocotools.coco import COCO
|
| 24 |
+
from pycocotools.cocoeval import COCOeval
|
| 25 |
+
|
| 26 |
+
from groundingdino.util.misc import all_gather
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class CocoGroundingEvaluator(object):
|
| 30 |
+
def __init__(self, coco_gt, iou_types, useCats=True):
|
| 31 |
+
assert isinstance(iou_types, (list, tuple))
|
| 32 |
+
coco_gt = copy.deepcopy(coco_gt)
|
| 33 |
+
self.coco_gt = coco_gt
|
| 34 |
+
|
| 35 |
+
self.iou_types = iou_types
|
| 36 |
+
self.coco_eval = {}
|
| 37 |
+
for iou_type in iou_types:
|
| 38 |
+
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
|
| 39 |
+
self.coco_eval[iou_type].useCats = useCats
|
| 40 |
+
|
| 41 |
+
self.img_ids = []
|
| 42 |
+
self.eval_imgs = {k: [] for k in iou_types}
|
| 43 |
+
self.useCats = useCats
|
| 44 |
+
|
| 45 |
+
def update(self, predictions):
|
| 46 |
+
img_ids = list(np.unique(list(predictions.keys())))
|
| 47 |
+
self.img_ids.extend(img_ids)
|
| 48 |
+
|
| 49 |
+
for iou_type in self.iou_types:
|
| 50 |
+
results = self.prepare(predictions, iou_type)
|
| 51 |
+
|
| 52 |
+
# suppress pycocotools prints
|
| 53 |
+
with open(os.devnull, "w") as devnull:
|
| 54 |
+
with contextlib.redirect_stdout(devnull):
|
| 55 |
+
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
|
| 56 |
+
|
| 57 |
+
coco_eval = self.coco_eval[iou_type]
|
| 58 |
+
|
| 59 |
+
coco_eval.cocoDt = coco_dt
|
| 60 |
+
coco_eval.params.imgIds = list(img_ids)
|
| 61 |
+
coco_eval.params.useCats = self.useCats
|
| 62 |
+
img_ids, eval_imgs = evaluate(coco_eval)
|
| 63 |
+
|
| 64 |
+
self.eval_imgs[iou_type].append(eval_imgs)
|
| 65 |
+
|
| 66 |
+
def synchronize_between_processes(self):
|
| 67 |
+
for iou_type in self.iou_types:
|
| 68 |
+
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
|
| 69 |
+
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
|
| 70 |
+
|
| 71 |
+
def accumulate(self):
|
| 72 |
+
for coco_eval in self.coco_eval.values():
|
| 73 |
+
coco_eval.accumulate()
|
| 74 |
+
|
| 75 |
+
def summarize(self):
|
| 76 |
+
for iou_type, coco_eval in self.coco_eval.items():
|
| 77 |
+
print("IoU metric: {}".format(iou_type))
|
| 78 |
+
coco_eval.summarize()
|
| 79 |
+
|
| 80 |
+
def prepare(self, predictions, iou_type):
|
| 81 |
+
if iou_type == "bbox":
|
| 82 |
+
return self.prepare_for_coco_detection(predictions)
|
| 83 |
+
elif iou_type == "segm":
|
| 84 |
+
return self.prepare_for_coco_segmentation(predictions)
|
| 85 |
+
elif iou_type == "keypoints":
|
| 86 |
+
return self.prepare_for_coco_keypoint(predictions)
|
| 87 |
+
else:
|
| 88 |
+
raise ValueError("Unknown iou type {}".format(iou_type))
|
| 89 |
+
|
| 90 |
+
def prepare_for_coco_detection(self, predictions):
|
| 91 |
+
coco_results = []
|
| 92 |
+
for original_id, prediction in predictions.items():
|
| 93 |
+
if len(prediction) == 0:
|
| 94 |
+
continue
|
| 95 |
+
|
| 96 |
+
boxes = prediction["boxes"]
|
| 97 |
+
boxes = convert_to_xywh(boxes).tolist()
|
| 98 |
+
scores = prediction["scores"].tolist()
|
| 99 |
+
labels = prediction["labels"].tolist()
|
| 100 |
+
|
| 101 |
+
coco_results.extend(
|
| 102 |
+
[
|
| 103 |
+
{
|
| 104 |
+
"image_id": original_id,
|
| 105 |
+
"category_id": labels[k],
|
| 106 |
+
"bbox": box,
|
| 107 |
+
"score": scores[k],
|
| 108 |
+
}
|
| 109 |
+
for k, box in enumerate(boxes)
|
| 110 |
+
]
|
| 111 |
+
)
|
| 112 |
+
return coco_results
|
| 113 |
+
|
| 114 |
+
def prepare_for_coco_segmentation(self, predictions):
|
| 115 |
+
coco_results = []
|
| 116 |
+
for original_id, prediction in predictions.items():
|
| 117 |
+
if len(prediction) == 0:
|
| 118 |
+
continue
|
| 119 |
+
|
| 120 |
+
scores = prediction["scores"]
|
| 121 |
+
labels = prediction["labels"]
|
| 122 |
+
masks = prediction["masks"]
|
| 123 |
+
|
| 124 |
+
masks = masks > 0.5
|
| 125 |
+
|
| 126 |
+
scores = prediction["scores"].tolist()
|
| 127 |
+
labels = prediction["labels"].tolist()
|
| 128 |
+
|
| 129 |
+
rles = [
|
| 130 |
+
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
|
| 131 |
+
for mask in masks
|
| 132 |
+
]
|
| 133 |
+
for rle in rles:
|
| 134 |
+
rle["counts"] = rle["counts"].decode("utf-8")
|
| 135 |
+
|
| 136 |
+
coco_results.extend(
|
| 137 |
+
[
|
| 138 |
+
{
|
| 139 |
+
"image_id": original_id,
|
| 140 |
+
"category_id": labels[k],
|
| 141 |
+
"segmentation": rle,
|
| 142 |
+
"score": scores[k],
|
| 143 |
+
}
|
| 144 |
+
for k, rle in enumerate(rles)
|
| 145 |
+
]
|
| 146 |
+
)
|
| 147 |
+
return coco_results
|
| 148 |
+
|
| 149 |
+
def prepare_for_coco_keypoint(self, predictions):
|
| 150 |
+
coco_results = []
|
| 151 |
+
for original_id, prediction in predictions.items():
|
| 152 |
+
if len(prediction) == 0:
|
| 153 |
+
continue
|
| 154 |
+
|
| 155 |
+
boxes = prediction["boxes"]
|
| 156 |
+
boxes = convert_to_xywh(boxes).tolist()
|
| 157 |
+
scores = prediction["scores"].tolist()
|
| 158 |
+
labels = prediction["labels"].tolist()
|
| 159 |
+
keypoints = prediction["keypoints"]
|
| 160 |
+
keypoints = keypoints.flatten(start_dim=1).tolist()
|
| 161 |
+
|
| 162 |
+
coco_results.extend(
|
| 163 |
+
[
|
| 164 |
+
{
|
| 165 |
+
"image_id": original_id,
|
| 166 |
+
"category_id": labels[k],
|
| 167 |
+
"keypoints": keypoint,
|
| 168 |
+
"score": scores[k],
|
| 169 |
+
}
|
| 170 |
+
for k, keypoint in enumerate(keypoints)
|
| 171 |
+
]
|
| 172 |
+
)
|
| 173 |
+
return coco_results
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def convert_to_xywh(boxes):
|
| 177 |
+
xmin, ymin, xmax, ymax = boxes.unbind(1)
|
| 178 |
+
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def merge(img_ids, eval_imgs):
|
| 182 |
+
all_img_ids = all_gather(img_ids)
|
| 183 |
+
all_eval_imgs = all_gather(eval_imgs)
|
| 184 |
+
|
| 185 |
+
merged_img_ids = []
|
| 186 |
+
for p in all_img_ids:
|
| 187 |
+
merged_img_ids.extend(p)
|
| 188 |
+
|
| 189 |
+
merged_eval_imgs = []
|
| 190 |
+
for p in all_eval_imgs:
|
| 191 |
+
merged_eval_imgs.append(p)
|
| 192 |
+
|
| 193 |
+
merged_img_ids = np.array(merged_img_ids)
|
| 194 |
+
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
|
| 195 |
+
|
| 196 |
+
# keep only unique (and in sorted order) images
|
| 197 |
+
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
|
| 198 |
+
merged_eval_imgs = merged_eval_imgs[..., idx]
|
| 199 |
+
|
| 200 |
+
return merged_img_ids, merged_eval_imgs
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
|
| 204 |
+
img_ids, eval_imgs = merge(img_ids, eval_imgs)
|
| 205 |
+
img_ids = list(img_ids)
|
| 206 |
+
eval_imgs = list(eval_imgs.flatten())
|
| 207 |
+
|
| 208 |
+
coco_eval.evalImgs = eval_imgs
|
| 209 |
+
coco_eval.params.imgIds = img_ids
|
| 210 |
+
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
#################################################################
|
| 214 |
+
# From pycocotools, just removed the prints and fixed
|
| 215 |
+
# a Python3 bug about unicode not defined
|
| 216 |
+
#################################################################
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def evaluate(self):
|
| 220 |
+
"""
|
| 221 |
+
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
|
| 222 |
+
:return: None
|
| 223 |
+
"""
|
| 224 |
+
# tic = time.time()
|
| 225 |
+
# print('Running per image evaluation...')
|
| 226 |
+
p = self.params
|
| 227 |
+
# add backward compatibility if useSegm is specified in params
|
| 228 |
+
if p.useSegm is not None:
|
| 229 |
+
p.iouType = "segm" if p.useSegm == 1 else "bbox"
|
| 230 |
+
print("useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType))
|
| 231 |
+
# print('Evaluate annotation type *{}*'.format(p.iouType))
|
| 232 |
+
p.imgIds = list(np.unique(p.imgIds))
|
| 233 |
+
if p.useCats:
|
| 234 |
+
p.catIds = list(np.unique(p.catIds))
|
| 235 |
+
p.maxDets = sorted(p.maxDets)
|
| 236 |
+
self.params = p
|
| 237 |
+
|
| 238 |
+
self._prepare()
|
| 239 |
+
# loop through images, area range, max detection number
|
| 240 |
+
catIds = p.catIds if p.useCats else [-1]
|
| 241 |
+
|
| 242 |
+
if p.iouType == "segm" or p.iouType == "bbox":
|
| 243 |
+
computeIoU = self.computeIoU
|
| 244 |
+
elif p.iouType == "keypoints":
|
| 245 |
+
computeIoU = self.computeOks
|
| 246 |
+
self.ious = {
|
| 247 |
+
(imgId, catId): computeIoU(imgId, catId)
|
| 248 |
+
for imgId in p.imgIds
|
| 249 |
+
for catId in catIds}
|
| 250 |
+
|
| 251 |
+
evaluateImg = self.evaluateImg
|
| 252 |
+
maxDet = p.maxDets[-1]
|
| 253 |
+
evalImgs = [
|
| 254 |
+
evaluateImg(imgId, catId, areaRng, maxDet)
|
| 255 |
+
for catId in catIds
|
| 256 |
+
for areaRng in p.areaRng
|
| 257 |
+
for imgId in p.imgIds
|
| 258 |
+
]
|
| 259 |
+
# this is NOT in the pycocotools code, but could be done outside
|
| 260 |
+
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
|
| 261 |
+
self._paramsEval = copy.deepcopy(self.params)
|
| 262 |
+
# toc = time.time()
|
| 263 |
+
# print('DONE (t={:0.2f}s).'.format(toc-tic))
|
| 264 |
+
return p.imgIds, evalImgs
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
#################################################################
|
| 268 |
+
# end of straight copy from pycocotools, just removing the prints
|
| 269 |
+
#################################################################
|
tool/groundingdino/datasets/transforms.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Transforms and data augmentation for both image + bbox.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import random
|
| 7 |
+
|
| 8 |
+
import PIL
|
| 9 |
+
import torch
|
| 10 |
+
import torchvision.transforms as T
|
| 11 |
+
import torchvision.transforms.functional as F
|
| 12 |
+
|
| 13 |
+
from groundingdino.util.box_ops import box_xyxy_to_cxcywh
|
| 14 |
+
from groundingdino.util.misc import interpolate
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def crop(image, target, region):
|
| 18 |
+
cropped_image = F.crop(image, *region)
|
| 19 |
+
|
| 20 |
+
target = target.copy()
|
| 21 |
+
i, j, h, w = region
|
| 22 |
+
|
| 23 |
+
# should we do something wrt the original size?
|
| 24 |
+
target["size"] = torch.tensor([h, w])
|
| 25 |
+
|
| 26 |
+
fields = ["labels", "area", "iscrowd", "positive_map"]
|
| 27 |
+
|
| 28 |
+
if "boxes" in target:
|
| 29 |
+
boxes = target["boxes"]
|
| 30 |
+
max_size = torch.as_tensor([w, h], dtype=torch.float32)
|
| 31 |
+
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
|
| 32 |
+
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
|
| 33 |
+
cropped_boxes = cropped_boxes.clamp(min=0)
|
| 34 |
+
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
|
| 35 |
+
target["boxes"] = cropped_boxes.reshape(-1, 4)
|
| 36 |
+
target["area"] = area
|
| 37 |
+
fields.append("boxes")
|
| 38 |
+
|
| 39 |
+
if "masks" in target:
|
| 40 |
+
# FIXME should we update the area here if there are no boxes?
|
| 41 |
+
target["masks"] = target["masks"][:, i : i + h, j : j + w]
|
| 42 |
+
fields.append("masks")
|
| 43 |
+
|
| 44 |
+
# remove elements for which the boxes or masks that have zero area
|
| 45 |
+
if "boxes" in target or "masks" in target:
|
| 46 |
+
# favor boxes selection when defining which elements to keep
|
| 47 |
+
# this is compatible with previous implementation
|
| 48 |
+
if "boxes" in target:
|
| 49 |
+
cropped_boxes = target["boxes"].reshape(-1, 2, 2)
|
| 50 |
+
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
|
| 51 |
+
else:
|
| 52 |
+
keep = target["masks"].flatten(1).any(1)
|
| 53 |
+
|
| 54 |
+
for field in fields:
|
| 55 |
+
if field in target:
|
| 56 |
+
target[field] = target[field][keep]
|
| 57 |
+
|
| 58 |
+
if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO":
|
| 59 |
+
# for debug and visualization only.
|
| 60 |
+
if "strings_positive" in target:
|
| 61 |
+
target["strings_positive"] = [
|
| 62 |
+
_i for _i, _j in zip(target["strings_positive"], keep) if _j
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
return cropped_image, target
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def hflip(image, target):
|
| 69 |
+
flipped_image = F.hflip(image)
|
| 70 |
+
|
| 71 |
+
w, h = image.size
|
| 72 |
+
|
| 73 |
+
target = target.copy()
|
| 74 |
+
if "boxes" in target:
|
| 75 |
+
boxes = target["boxes"]
|
| 76 |
+
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor(
|
| 77 |
+
[w, 0, w, 0]
|
| 78 |
+
)
|
| 79 |
+
target["boxes"] = boxes
|
| 80 |
+
|
| 81 |
+
if "masks" in target:
|
| 82 |
+
target["masks"] = target["masks"].flip(-1)
|
| 83 |
+
|
| 84 |
+
return flipped_image, target
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def resize(image, target, size, max_size=None):
|
| 88 |
+
# size can be min_size (scalar) or (w, h) tuple
|
| 89 |
+
|
| 90 |
+
def get_size_with_aspect_ratio(image_size, size, max_size=None):
|
| 91 |
+
w, h = image_size
|
| 92 |
+
if max_size is not None:
|
| 93 |
+
min_original_size = float(min((w, h)))
|
| 94 |
+
max_original_size = float(max((w, h)))
|
| 95 |
+
if max_original_size / min_original_size * size > max_size:
|
| 96 |
+
size = int(round(max_size * min_original_size / max_original_size))
|
| 97 |
+
|
| 98 |
+
if (w <= h and w == size) or (h <= w and h == size):
|
| 99 |
+
return (h, w)
|
| 100 |
+
|
| 101 |
+
if w < h:
|
| 102 |
+
ow = size
|
| 103 |
+
oh = int(size * h / w)
|
| 104 |
+
else:
|
| 105 |
+
oh = size
|
| 106 |
+
ow = int(size * w / h)
|
| 107 |
+
|
| 108 |
+
return (oh, ow)
|
| 109 |
+
|
| 110 |
+
def get_size(image_size, size, max_size=None):
|
| 111 |
+
if isinstance(size, (list, tuple)):
|
| 112 |
+
return size[::-1]
|
| 113 |
+
else:
|
| 114 |
+
return get_size_with_aspect_ratio(image_size, size, max_size)
|
| 115 |
+
|
| 116 |
+
size = get_size(image.size, size, max_size)
|
| 117 |
+
rescaled_image = F.resize(image, size)
|
| 118 |
+
|
| 119 |
+
if target is None:
|
| 120 |
+
return rescaled_image, None
|
| 121 |
+
|
| 122 |
+
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
|
| 123 |
+
ratio_width, ratio_height = ratios
|
| 124 |
+
|
| 125 |
+
target = target.copy()
|
| 126 |
+
if "boxes" in target:
|
| 127 |
+
boxes = target["boxes"]
|
| 128 |
+
scaled_boxes = boxes * torch.as_tensor(
|
| 129 |
+
[ratio_width, ratio_height, ratio_width, ratio_height]
|
| 130 |
+
)
|
| 131 |
+
target["boxes"] = scaled_boxes
|
| 132 |
+
|
| 133 |
+
if "area" in target:
|
| 134 |
+
area = target["area"]
|
| 135 |
+
scaled_area = area * (ratio_width * ratio_height)
|
| 136 |
+
target["area"] = scaled_area
|
| 137 |
+
|
| 138 |
+
h, w = size
|
| 139 |
+
target["size"] = torch.tensor([h, w])
|
| 140 |
+
|
| 141 |
+
if "masks" in target:
|
| 142 |
+
target["masks"] = (
|
| 143 |
+
interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
return rescaled_image, target
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def pad(image, target, padding):
|
| 150 |
+
# assumes that we only pad on the bottom right corners
|
| 151 |
+
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
|
| 152 |
+
if target is None:
|
| 153 |
+
return padded_image, None
|
| 154 |
+
target = target.copy()
|
| 155 |
+
# should we do something wrt the original size?
|
| 156 |
+
target["size"] = torch.tensor(padded_image.size[::-1])
|
| 157 |
+
if "masks" in target:
|
| 158 |
+
target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1]))
|
| 159 |
+
return padded_image, target
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class ResizeDebug(object):
|
| 163 |
+
def __init__(self, size):
|
| 164 |
+
self.size = size
|
| 165 |
+
|
| 166 |
+
def __call__(self, img, target):
|
| 167 |
+
return resize(img, target, self.size)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class RandomCrop(object):
|
| 171 |
+
def __init__(self, size):
|
| 172 |
+
self.size = size
|
| 173 |
+
|
| 174 |
+
def __call__(self, img, target):
|
| 175 |
+
region = T.RandomCrop.get_params(img, self.size)
|
| 176 |
+
return crop(img, target, region)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class RandomSizeCrop(object):
|
| 180 |
+
def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False):
|
| 181 |
+
# respect_boxes: True to keep all boxes
|
| 182 |
+
# False to tolerence box filter
|
| 183 |
+
self.min_size = min_size
|
| 184 |
+
self.max_size = max_size
|
| 185 |
+
self.respect_boxes = respect_boxes
|
| 186 |
+
|
| 187 |
+
def __call__(self, img: PIL.Image.Image, target: dict):
|
| 188 |
+
init_boxes = len(target["boxes"])
|
| 189 |
+
max_patience = 10
|
| 190 |
+
for i in range(max_patience):
|
| 191 |
+
w = random.randint(self.min_size, min(img.width, self.max_size))
|
| 192 |
+
h = random.randint(self.min_size, min(img.height, self.max_size))
|
| 193 |
+
region = T.RandomCrop.get_params(img, [h, w])
|
| 194 |
+
result_img, result_target = crop(img, target, region)
|
| 195 |
+
if (
|
| 196 |
+
not self.respect_boxes
|
| 197 |
+
or len(result_target["boxes"]) == init_boxes
|
| 198 |
+
or i == max_patience - 1
|
| 199 |
+
):
|
| 200 |
+
return result_img, result_target
|
| 201 |
+
return result_img, result_target
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
class CenterCrop(object):
|
| 205 |
+
def __init__(self, size):
|
| 206 |
+
self.size = size
|
| 207 |
+
|
| 208 |
+
def __call__(self, img, target):
|
| 209 |
+
image_width, image_height = img.size
|
| 210 |
+
crop_height, crop_width = self.size
|
| 211 |
+
crop_top = int(round((image_height - crop_height) / 2.0))
|
| 212 |
+
crop_left = int(round((image_width - crop_width) / 2.0))
|
| 213 |
+
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
class RandomHorizontalFlip(object):
|
| 217 |
+
def __init__(self, p=0.5):
|
| 218 |
+
self.p = p
|
| 219 |
+
|
| 220 |
+
def __call__(self, img, target):
|
| 221 |
+
if random.random() < self.p:
|
| 222 |
+
return hflip(img, target)
|
| 223 |
+
return img, target
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class RandomResize(object):
|
| 227 |
+
def __init__(self, sizes, max_size=None):
|
| 228 |
+
assert isinstance(sizes, (list, tuple))
|
| 229 |
+
self.sizes = sizes
|
| 230 |
+
self.max_size = max_size
|
| 231 |
+
|
| 232 |
+
def __call__(self, img, target=None):
|
| 233 |
+
size = random.choice(self.sizes)
|
| 234 |
+
return resize(img, target, size, self.max_size)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
class RandomPad(object):
|
| 238 |
+
def __init__(self, max_pad):
|
| 239 |
+
self.max_pad = max_pad
|
| 240 |
+
|
| 241 |
+
def __call__(self, img, target):
|
| 242 |
+
pad_x = random.randint(0, self.max_pad)
|
| 243 |
+
pad_y = random.randint(0, self.max_pad)
|
| 244 |
+
return pad(img, target, (pad_x, pad_y))
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
class RandomSelect(object):
|
| 248 |
+
"""
|
| 249 |
+
Randomly selects between transforms1 and transforms2,
|
| 250 |
+
with probability p for transforms1 and (1 - p) for transforms2
|
| 251 |
+
"""
|
| 252 |
+
|
| 253 |
+
def __init__(self, transforms1, transforms2, p=0.5):
|
| 254 |
+
self.transforms1 = transforms1
|
| 255 |
+
self.transforms2 = transforms2
|
| 256 |
+
self.p = p
|
| 257 |
+
|
| 258 |
+
def __call__(self, img, target):
|
| 259 |
+
if random.random() < self.p:
|
| 260 |
+
return self.transforms1(img, target)
|
| 261 |
+
return self.transforms2(img, target)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
class ToTensor(object):
|
| 265 |
+
def __call__(self, img, target):
|
| 266 |
+
return F.to_tensor(img), target
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class RandomErasing(object):
|
| 270 |
+
def __init__(self, *args, **kwargs):
|
| 271 |
+
self.eraser = T.RandomErasing(*args, **kwargs)
|
| 272 |
+
|
| 273 |
+
def __call__(self, img, target):
|
| 274 |
+
return self.eraser(img), target
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class Normalize(object):
|
| 278 |
+
def __init__(self, mean, std):
|
| 279 |
+
self.mean = mean
|
| 280 |
+
self.std = std
|
| 281 |
+
|
| 282 |
+
def __call__(self, image, target=None):
|
| 283 |
+
image = F.normalize(image, mean=self.mean, std=self.std)
|
| 284 |
+
if target is None:
|
| 285 |
+
return image, None
|
| 286 |
+
target = target.copy()
|
| 287 |
+
h, w = image.shape[-2:]
|
| 288 |
+
if "boxes" in target:
|
| 289 |
+
boxes = target["boxes"]
|
| 290 |
+
boxes = box_xyxy_to_cxcywh(boxes)
|
| 291 |
+
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
|
| 292 |
+
target["boxes"] = boxes
|
| 293 |
+
return image, target
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class Compose(object):
|
| 297 |
+
def __init__(self, transforms):
|
| 298 |
+
self.transforms = transforms
|
| 299 |
+
|
| 300 |
+
def __call__(self, image, target):
|
| 301 |
+
for t in self.transforms:
|
| 302 |
+
image, target = t(image, target)
|
| 303 |
+
return image, target
|
| 304 |
+
|
| 305 |
+
def __repr__(self):
|
| 306 |
+
format_string = self.__class__.__name__ + "("
|
| 307 |
+
for t in self.transforms:
|
| 308 |
+
format_string += "\n"
|
| 309 |
+
format_string += " {0}".format(t)
|
| 310 |
+
format_string += "\n)"
|
| 311 |
+
return format_string
|
tool/groundingdino/models/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------
|
| 2 |
+
# Grounding DINO
|
| 3 |
+
# url: https://github.com/IDEA-Research/GroundingDINO
|
| 4 |
+
# Copyright (c) 2023 IDEA. All Rights Reserved.
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 6 |
+
# ------------------------------------------------------------------------
|
| 7 |
+
# Conditional DETR
|
| 8 |
+
# Copyright (c) 2021 Microsoft. All Rights Reserved.
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 10 |
+
# ------------------------------------------------------------------------
|
| 11 |
+
# Copied from DETR (https://github.com/facebookresearch/detr)
|
| 12 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
| 13 |
+
# ------------------------------------------------------------------------
|
| 14 |
+
|
| 15 |
+
from .groundingdino import build_groundingdino
|
tool/groundingdino/models/GroundingDINO/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (283 Bytes). View file
|
|
|
tool/groundingdino/models/GroundingDINO/__pycache__/bertwarper.cpython-310.pyc
ADDED
|
Binary file (7.25 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/__pycache__/fuse_modules.cpython-310.pyc
ADDED
|
Binary file (7.81 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/__pycache__/groundingdino.cpython-310.pyc
ADDED
|
Binary file (11.4 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/__pycache__/ms_deform_attn.cpython-310.pyc
ADDED
|
Binary file (11.8 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/__pycache__/transformer.cpython-310.pyc
ADDED
|
Binary file (19.4 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/__pycache__/transformer_vanilla.cpython-310.pyc
ADDED
|
Binary file (3.48 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (9.6 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/backbone/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .backbone import build_backbone
|
tool/groundingdino/models/GroundingDINO/backbone/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (282 Bytes). View file
|
|
|
tool/groundingdino/models/GroundingDINO/backbone/__pycache__/backbone.cpython-310.pyc
ADDED
|
Binary file (6.28 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/backbone/__pycache__/position_encoding.cpython-310.pyc
ADDED
|
Binary file (5.18 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/backbone/__pycache__/swin_transformer.cpython-310.pyc
ADDED
|
Binary file (20.6 kB). View file
|
|
|
tool/groundingdino/models/GroundingDINO/backbone/backbone.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------
|
| 2 |
+
# Grounding DINO
|
| 3 |
+
# url: https://github.com/IDEA-Research/GroundingDINO
|
| 4 |
+
# Copyright (c) 2023 IDEA. All Rights Reserved.
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 6 |
+
# ------------------------------------------------------------------------
|
| 7 |
+
# Conditional DETR
|
| 8 |
+
# Copyright (c) 2021 Microsoft. All Rights Reserved.
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 10 |
+
# ------------------------------------------------------------------------
|
| 11 |
+
# Copied from DETR (https://github.com/facebookresearch/detr)
|
| 12 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
| 13 |
+
# ------------------------------------------------------------------------
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
Backbone modules.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
from typing import Dict, List
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
import torchvision
|
| 24 |
+
from torch import nn
|
| 25 |
+
from torchvision.models._utils import IntermediateLayerGetter
|
| 26 |
+
|
| 27 |
+
from groundingdino.util.misc import NestedTensor, clean_state_dict, is_main_process
|
| 28 |
+
|
| 29 |
+
from .position_encoding import build_position_encoding
|
| 30 |
+
from .swin_transformer import build_swin_transformer
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class FrozenBatchNorm2d(torch.nn.Module):
|
| 34 |
+
"""
|
| 35 |
+
BatchNorm2d where the batch statistics and the affine parameters are fixed.
|
| 36 |
+
|
| 37 |
+
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
|
| 38 |
+
without which any other models than torchvision.models.resnet[18,34,50,101]
|
| 39 |
+
produce nans.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def __init__(self, n):
|
| 43 |
+
super(FrozenBatchNorm2d, self).__init__()
|
| 44 |
+
self.register_buffer("weight", torch.ones(n))
|
| 45 |
+
self.register_buffer("bias", torch.zeros(n))
|
| 46 |
+
self.register_buffer("running_mean", torch.zeros(n))
|
| 47 |
+
self.register_buffer("running_var", torch.ones(n))
|
| 48 |
+
|
| 49 |
+
def _load_from_state_dict(
|
| 50 |
+
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
| 51 |
+
):
|
| 52 |
+
num_batches_tracked_key = prefix + "num_batches_tracked"
|
| 53 |
+
if num_batches_tracked_key in state_dict:
|
| 54 |
+
del state_dict[num_batches_tracked_key]
|
| 55 |
+
|
| 56 |
+
super(FrozenBatchNorm2d, self)._load_from_state_dict(
|
| 57 |
+
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
def forward(self, x):
|
| 61 |
+
# move reshapes to the beginning
|
| 62 |
+
# to make it fuser-friendly
|
| 63 |
+
w = self.weight.reshape(1, -1, 1, 1)
|
| 64 |
+
b = self.bias.reshape(1, -1, 1, 1)
|
| 65 |
+
rv = self.running_var.reshape(1, -1, 1, 1)
|
| 66 |
+
rm = self.running_mean.reshape(1, -1, 1, 1)
|
| 67 |
+
eps = 1e-5
|
| 68 |
+
scale = w * (rv + eps).rsqrt()
|
| 69 |
+
bias = b - rm * scale
|
| 70 |
+
return x * scale + bias
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class BackboneBase(nn.Module):
|
| 74 |
+
def __init__(
|
| 75 |
+
self,
|
| 76 |
+
backbone: nn.Module,
|
| 77 |
+
train_backbone: bool,
|
| 78 |
+
num_channels: int,
|
| 79 |
+
return_interm_indices: list,
|
| 80 |
+
):
|
| 81 |
+
super().__init__()
|
| 82 |
+
for name, parameter in backbone.named_parameters():
|
| 83 |
+
if (
|
| 84 |
+
not train_backbone
|
| 85 |
+
or "layer2" not in name
|
| 86 |
+
and "layer3" not in name
|
| 87 |
+
and "layer4" not in name
|
| 88 |
+
):
|
| 89 |
+
parameter.requires_grad_(False)
|
| 90 |
+
|
| 91 |
+
return_layers = {}
|
| 92 |
+
for idx, layer_index in enumerate(return_interm_indices):
|
| 93 |
+
return_layers.update(
|
| 94 |
+
{"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)}
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# if len:
|
| 98 |
+
# if use_stage1_feature:
|
| 99 |
+
# return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
|
| 100 |
+
# else:
|
| 101 |
+
# return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
|
| 102 |
+
# else:
|
| 103 |
+
# return_layers = {'layer4': "0"}
|
| 104 |
+
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
|
| 105 |
+
self.num_channels = num_channels
|
| 106 |
+
|
| 107 |
+
def forward(self, tensor_list: NestedTensor):
|
| 108 |
+
xs = self.body(tensor_list.tensors)
|
| 109 |
+
out: Dict[str, NestedTensor] = {}
|
| 110 |
+
for name, x in xs.items():
|
| 111 |
+
m = tensor_list.mask
|
| 112 |
+
assert m is not None
|
| 113 |
+
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
|
| 114 |
+
out[name] = NestedTensor(x, mask)
|
| 115 |
+
# import ipdb; ipdb.set_trace()
|
| 116 |
+
return out
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class Backbone(BackboneBase):
|
| 120 |
+
"""ResNet backbone with frozen BatchNorm."""
|
| 121 |
+
|
| 122 |
+
def __init__(
|
| 123 |
+
self,
|
| 124 |
+
name: str,
|
| 125 |
+
train_backbone: bool,
|
| 126 |
+
dilation: bool,
|
| 127 |
+
return_interm_indices: list,
|
| 128 |
+
batch_norm=FrozenBatchNorm2d,
|
| 129 |
+
):
|
| 130 |
+
if name in ["resnet18", "resnet34", "resnet50", "resnet101"]:
|
| 131 |
+
backbone = getattr(torchvision.models, name)(
|
| 132 |
+
replace_stride_with_dilation=[False, False, dilation],
|
| 133 |
+
pretrained=is_main_process(),
|
| 134 |
+
norm_layer=batch_norm,
|
| 135 |
+
)
|
| 136 |
+
else:
|
| 137 |
+
raise NotImplementedError("Why you can get here with name {}".format(name))
|
| 138 |
+
# num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
|
| 139 |
+
assert name not in ("resnet18", "resnet34"), "Only resnet50 and resnet101 are available."
|
| 140 |
+
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
|
| 141 |
+
num_channels_all = [256, 512, 1024, 2048]
|
| 142 |
+
num_channels = num_channels_all[4 - len(return_interm_indices) :]
|
| 143 |
+
super().__init__(backbone, train_backbone, num_channels, return_interm_indices)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class Joiner(nn.Sequential):
|
| 147 |
+
def __init__(self, backbone, position_embedding):
|
| 148 |
+
super().__init__(backbone, position_embedding)
|
| 149 |
+
|
| 150 |
+
def forward(self, tensor_list: NestedTensor):
|
| 151 |
+
xs = self[0](tensor_list)
|
| 152 |
+
out: List[NestedTensor] = []
|
| 153 |
+
pos = []
|
| 154 |
+
for name, x in xs.items():
|
| 155 |
+
out.append(x)
|
| 156 |
+
# position encoding
|
| 157 |
+
pos.append(self[1](x).to(x.tensors.dtype))
|
| 158 |
+
|
| 159 |
+
return out, pos
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def build_backbone(args):
|
| 163 |
+
"""
|
| 164 |
+
Useful args:
|
| 165 |
+
- backbone: backbone name
|
| 166 |
+
- lr_backbone:
|
| 167 |
+
- dilation
|
| 168 |
+
- return_interm_indices: available: [0,1,2,3], [1,2,3], [3]
|
| 169 |
+
- backbone_freeze_keywords:
|
| 170 |
+
- use_checkpoint: for swin only for now
|
| 171 |
+
|
| 172 |
+
"""
|
| 173 |
+
position_embedding = build_position_encoding(args)
|
| 174 |
+
train_backbone = True
|
| 175 |
+
if not train_backbone:
|
| 176 |
+
raise ValueError("Please set lr_backbone > 0")
|
| 177 |
+
return_interm_indices = args.return_interm_indices
|
| 178 |
+
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
|
| 179 |
+
args.backbone_freeze_keywords
|
| 180 |
+
use_checkpoint = getattr(args, "use_checkpoint", False)
|
| 181 |
+
|
| 182 |
+
if args.backbone in ["resnet50", "resnet101"]:
|
| 183 |
+
backbone = Backbone(
|
| 184 |
+
args.backbone,
|
| 185 |
+
train_backbone,
|
| 186 |
+
args.dilation,
|
| 187 |
+
return_interm_indices,
|
| 188 |
+
batch_norm=FrozenBatchNorm2d,
|
| 189 |
+
)
|
| 190 |
+
bb_num_channels = backbone.num_channels
|
| 191 |
+
elif args.backbone in [
|
| 192 |
+
"swin_T_224_1k",
|
| 193 |
+
"swin_B_224_22k",
|
| 194 |
+
"swin_B_384_22k",
|
| 195 |
+
"swin_L_224_22k",
|
| 196 |
+
"swin_L_384_22k",
|
| 197 |
+
]:
|
| 198 |
+
pretrain_img_size = int(args.backbone.split("_")[-2])
|
| 199 |
+
backbone = build_swin_transformer(
|
| 200 |
+
args.backbone,
|
| 201 |
+
pretrain_img_size=pretrain_img_size,
|
| 202 |
+
out_indices=tuple(return_interm_indices),
|
| 203 |
+
dilation=False,
|
| 204 |
+
use_checkpoint=use_checkpoint,
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]
|
| 208 |
+
else:
|
| 209 |
+
raise NotImplementedError("Unknown backbone {}".format(args.backbone))
|
| 210 |
+
|
| 211 |
+
assert len(bb_num_channels) == len(
|
| 212 |
+
return_interm_indices
|
| 213 |
+
), f"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}"
|
| 214 |
+
|
| 215 |
+
model = Joiner(backbone, position_embedding)
|
| 216 |
+
model.num_channels = bb_num_channels
|
| 217 |
+
assert isinstance(
|
| 218 |
+
bb_num_channels, List
|
| 219 |
+
), "bb_num_channels is expected to be a List but {}".format(type(bb_num_channels))
|
| 220 |
+
# import ipdb; ipdb.set_trace()
|
| 221 |
+
return model
|
tool/groundingdino/models/GroundingDINO/backbone/position_encoding.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------
|
| 2 |
+
# Grounding DINO
|
| 3 |
+
# url: https://github.com/IDEA-Research/GroundingDINO
|
| 4 |
+
# Copyright (c) 2023 IDEA. All Rights Reserved.
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 6 |
+
# ------------------------------------------------------------------------
|
| 7 |
+
# DINO
|
| 8 |
+
# Copyright (c) 2022 IDEA. All Rights Reserved.
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 10 |
+
# ------------------------------------------------------------------------
|
| 11 |
+
# Conditional DETR
|
| 12 |
+
# Copyright (c) 2021 Microsoft. All Rights Reserved.
|
| 13 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 14 |
+
# ------------------------------------------------------------------------
|
| 15 |
+
# Copied from DETR (https://github.com/facebookresearch/detr)
|
| 16 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
| 17 |
+
# ------------------------------------------------------------------------
|
| 18 |
+
|
| 19 |
+
"""
|
| 20 |
+
Various positional encodings for the transformer.
|
| 21 |
+
"""
|
| 22 |
+
import math
|
| 23 |
+
|
| 24 |
+
import torch
|
| 25 |
+
from torch import nn
|
| 26 |
+
|
| 27 |
+
from groundingdino.util.misc import NestedTensor
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class PositionEmbeddingSine(nn.Module):
|
| 31 |
+
"""
|
| 32 |
+
This is a more standard version of the position embedding, very similar to the one
|
| 33 |
+
used by the Attention is all you need paper, generalized to work on images.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
|
| 37 |
+
super().__init__()
|
| 38 |
+
self.num_pos_feats = num_pos_feats
|
| 39 |
+
self.temperature = temperature
|
| 40 |
+
self.normalize = normalize
|
| 41 |
+
if scale is not None and normalize is False:
|
| 42 |
+
raise ValueError("normalize should be True if scale is passed")
|
| 43 |
+
if scale is None:
|
| 44 |
+
scale = 2 * math.pi
|
| 45 |
+
self.scale = scale
|
| 46 |
+
|
| 47 |
+
def forward(self, tensor_list: NestedTensor):
|
| 48 |
+
x = tensor_list.tensors
|
| 49 |
+
mask = tensor_list.mask
|
| 50 |
+
assert mask is not None
|
| 51 |
+
not_mask = ~mask
|
| 52 |
+
y_embed = not_mask.cumsum(1, dtype=torch.float32)
|
| 53 |
+
x_embed = not_mask.cumsum(2, dtype=torch.float32)
|
| 54 |
+
if self.normalize:
|
| 55 |
+
eps = 1e-6
|
| 56 |
+
# if os.environ.get("SHILONG_AMP", None) == '1':
|
| 57 |
+
# eps = 1e-4
|
| 58 |
+
# else:
|
| 59 |
+
# eps = 1e-6
|
| 60 |
+
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
|
| 61 |
+
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
|
| 62 |
+
|
| 63 |
+
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
| 64 |
+
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
| 65 |
+
|
| 66 |
+
pos_x = x_embed[:, :, :, None] / dim_t
|
| 67 |
+
pos_y = y_embed[:, :, :, None] / dim_t
|
| 68 |
+
pos_x = torch.stack(
|
| 69 |
+
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
|
| 70 |
+
).flatten(3)
|
| 71 |
+
pos_y = torch.stack(
|
| 72 |
+
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
|
| 73 |
+
).flatten(3)
|
| 74 |
+
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
| 75 |
+
return pos
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class PositionEmbeddingSineHW(nn.Module):
|
| 79 |
+
"""
|
| 80 |
+
This is a more standard version of the position embedding, very similar to the one
|
| 81 |
+
used by the Attention is all you need paper, generalized to work on images.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
def __init__(
|
| 85 |
+
self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None
|
| 86 |
+
):
|
| 87 |
+
super().__init__()
|
| 88 |
+
self.num_pos_feats = num_pos_feats
|
| 89 |
+
self.temperatureH = temperatureH
|
| 90 |
+
self.temperatureW = temperatureW
|
| 91 |
+
self.normalize = normalize
|
| 92 |
+
if scale is not None and normalize is False:
|
| 93 |
+
raise ValueError("normalize should be True if scale is passed")
|
| 94 |
+
if scale is None:
|
| 95 |
+
scale = 2 * math.pi
|
| 96 |
+
self.scale = scale
|
| 97 |
+
|
| 98 |
+
def forward(self, tensor_list: NestedTensor):
|
| 99 |
+
x = tensor_list.tensors
|
| 100 |
+
mask = tensor_list.mask
|
| 101 |
+
assert mask is not None
|
| 102 |
+
not_mask = ~mask
|
| 103 |
+
y_embed = not_mask.cumsum(1, dtype=torch.float32)
|
| 104 |
+
x_embed = not_mask.cumsum(2, dtype=torch.float32)
|
| 105 |
+
|
| 106 |
+
# import ipdb; ipdb.set_trace()
|
| 107 |
+
|
| 108 |
+
if self.normalize:
|
| 109 |
+
eps = 1e-6
|
| 110 |
+
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
|
| 111 |
+
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
|
| 112 |
+
|
| 113 |
+
dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
| 114 |
+
dim_tx = self.temperatureW ** (2 * (torch.div(dim_tx, 2, rounding_mode='floor')) / self.num_pos_feats)
|
| 115 |
+
pos_x = x_embed[:, :, :, None] / dim_tx
|
| 116 |
+
|
| 117 |
+
dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
| 118 |
+
dim_ty = self.temperatureH ** (2 * (torch.div(dim_ty, 2, rounding_mode='floor')) / self.num_pos_feats)
|
| 119 |
+
pos_y = y_embed[:, :, :, None] / dim_ty
|
| 120 |
+
|
| 121 |
+
pos_x = torch.stack(
|
| 122 |
+
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
|
| 123 |
+
).flatten(3)
|
| 124 |
+
pos_y = torch.stack(
|
| 125 |
+
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
|
| 126 |
+
).flatten(3)
|
| 127 |
+
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
| 128 |
+
|
| 129 |
+
# import ipdb; ipdb.set_trace()
|
| 130 |
+
|
| 131 |
+
return pos
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class PositionEmbeddingLearned(nn.Module):
|
| 135 |
+
"""
|
| 136 |
+
Absolute pos embedding, learned.
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
def __init__(self, num_pos_feats=256):
|
| 140 |
+
super().__init__()
|
| 141 |
+
self.row_embed = nn.Embedding(50, num_pos_feats)
|
| 142 |
+
self.col_embed = nn.Embedding(50, num_pos_feats)
|
| 143 |
+
self.reset_parameters()
|
| 144 |
+
|
| 145 |
+
def reset_parameters(self):
|
| 146 |
+
nn.init.uniform_(self.row_embed.weight)
|
| 147 |
+
nn.init.uniform_(self.col_embed.weight)
|
| 148 |
+
|
| 149 |
+
def forward(self, tensor_list: NestedTensor):
|
| 150 |
+
x = tensor_list.tensors
|
| 151 |
+
h, w = x.shape[-2:]
|
| 152 |
+
i = torch.arange(w, device=x.device)
|
| 153 |
+
j = torch.arange(h, device=x.device)
|
| 154 |
+
x_emb = self.col_embed(i)
|
| 155 |
+
y_emb = self.row_embed(j)
|
| 156 |
+
pos = (
|
| 157 |
+
torch.cat(
|
| 158 |
+
[
|
| 159 |
+
x_emb.unsqueeze(0).repeat(h, 1, 1),
|
| 160 |
+
y_emb.unsqueeze(1).repeat(1, w, 1),
|
| 161 |
+
],
|
| 162 |
+
dim=-1,
|
| 163 |
+
)
|
| 164 |
+
.permute(2, 0, 1)
|
| 165 |
+
.unsqueeze(0)
|
| 166 |
+
.repeat(x.shape[0], 1, 1, 1)
|
| 167 |
+
)
|
| 168 |
+
return pos
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def build_position_encoding(args):
|
| 172 |
+
N_steps = args.hidden_dim // 2
|
| 173 |
+
if args.position_embedding in ("v2", "sine"):
|
| 174 |
+
# TODO find a better way of exposing other arguments
|
| 175 |
+
position_embedding = PositionEmbeddingSineHW(
|
| 176 |
+
N_steps,
|
| 177 |
+
temperatureH=args.pe_temperatureH,
|
| 178 |
+
temperatureW=args.pe_temperatureW,
|
| 179 |
+
normalize=True,
|
| 180 |
+
)
|
| 181 |
+
elif args.position_embedding in ("v3", "learned"):
|
| 182 |
+
position_embedding = PositionEmbeddingLearned(N_steps)
|
| 183 |
+
else:
|
| 184 |
+
raise ValueError(f"not supported {args.position_embedding}")
|
| 185 |
+
|
| 186 |
+
return position_embedding
|
tool/groundingdino/models/GroundingDINO/backbone/swin_transformer.py
ADDED
|
@@ -0,0 +1,802 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------
|
| 2 |
+
# Grounding DINO
|
| 3 |
+
# url: https://github.com/IDEA-Research/GroundingDINO
|
| 4 |
+
# Copyright (c) 2023 IDEA. All Rights Reserved.
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 6 |
+
# ------------------------------------------------------------------------
|
| 7 |
+
# DINO
|
| 8 |
+
# Copyright (c) 2022 IDEA. All Rights Reserved.
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 10 |
+
# --------------------------------------------------------
|
| 11 |
+
# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py
|
| 12 |
+
# --------------------------------------------------------
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn as nn
|
| 17 |
+
import torch.nn.functional as F
|
| 18 |
+
import torch.utils.checkpoint as checkpoint
|
| 19 |
+
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
| 20 |
+
|
| 21 |
+
from groundingdino.util.misc import NestedTensor
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class Mlp(nn.Module):
|
| 25 |
+
"""Multilayer perceptron."""
|
| 26 |
+
|
| 27 |
+
def __init__(
|
| 28 |
+
self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0
|
| 29 |
+
):
|
| 30 |
+
super().__init__()
|
| 31 |
+
out_features = out_features or in_features
|
| 32 |
+
hidden_features = hidden_features or in_features
|
| 33 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
| 34 |
+
self.act = act_layer()
|
| 35 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
| 36 |
+
self.drop = nn.Dropout(drop)
|
| 37 |
+
|
| 38 |
+
def forward(self, x):
|
| 39 |
+
x = self.fc1(x)
|
| 40 |
+
x = self.act(x)
|
| 41 |
+
x = self.drop(x)
|
| 42 |
+
x = self.fc2(x)
|
| 43 |
+
x = self.drop(x)
|
| 44 |
+
return x
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def window_partition(x, window_size):
|
| 48 |
+
"""
|
| 49 |
+
Args:
|
| 50 |
+
x: (B, H, W, C)
|
| 51 |
+
window_size (int): window size
|
| 52 |
+
Returns:
|
| 53 |
+
windows: (num_windows*B, window_size, window_size, C)
|
| 54 |
+
"""
|
| 55 |
+
B, H, W, C = x.shape
|
| 56 |
+
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
| 57 |
+
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
| 58 |
+
return windows
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def window_reverse(windows, window_size, H, W):
|
| 62 |
+
"""
|
| 63 |
+
Args:
|
| 64 |
+
windows: (num_windows*B, window_size, window_size, C)
|
| 65 |
+
window_size (int): Window size
|
| 66 |
+
H (int): Height of image
|
| 67 |
+
W (int): Width of image
|
| 68 |
+
Returns:
|
| 69 |
+
x: (B, H, W, C)
|
| 70 |
+
"""
|
| 71 |
+
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
| 72 |
+
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
| 73 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
| 74 |
+
return x
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class WindowAttention(nn.Module):
|
| 78 |
+
"""Window based multi-head self attention (W-MSA) module with relative position bias.
|
| 79 |
+
It supports both of shifted and non-shifted window.
|
| 80 |
+
Args:
|
| 81 |
+
dim (int): Number of input channels.
|
| 82 |
+
window_size (tuple[int]): The height and width of the window.
|
| 83 |
+
num_heads (int): Number of attention heads.
|
| 84 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
| 85 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
| 86 |
+
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
| 87 |
+
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
def __init__(
|
| 91 |
+
self,
|
| 92 |
+
dim,
|
| 93 |
+
window_size,
|
| 94 |
+
num_heads,
|
| 95 |
+
qkv_bias=True,
|
| 96 |
+
qk_scale=None,
|
| 97 |
+
attn_drop=0.0,
|
| 98 |
+
proj_drop=0.0,
|
| 99 |
+
):
|
| 100 |
+
|
| 101 |
+
super().__init__()
|
| 102 |
+
self.dim = dim
|
| 103 |
+
self.window_size = window_size # Wh, Ww
|
| 104 |
+
self.num_heads = num_heads
|
| 105 |
+
head_dim = dim // num_heads
|
| 106 |
+
self.scale = qk_scale or head_dim**-0.5
|
| 107 |
+
|
| 108 |
+
# define a parameter table of relative position bias
|
| 109 |
+
self.relative_position_bias_table = nn.Parameter(
|
| 110 |
+
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
|
| 111 |
+
) # 2*Wh-1 * 2*Ww-1, nH
|
| 112 |
+
|
| 113 |
+
# get pair-wise relative position index for each token inside the window
|
| 114 |
+
coords_h = torch.arange(self.window_size[0])
|
| 115 |
+
coords_w = torch.arange(self.window_size[1])
|
| 116 |
+
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
| 117 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
| 118 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
| 119 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
| 120 |
+
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
| 121 |
+
relative_coords[:, :, 1] += self.window_size[1] - 1
|
| 122 |
+
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
| 123 |
+
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
| 124 |
+
self.register_buffer("relative_position_index", relative_position_index)
|
| 125 |
+
|
| 126 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 127 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 128 |
+
self.proj = nn.Linear(dim, dim)
|
| 129 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 130 |
+
|
| 131 |
+
trunc_normal_(self.relative_position_bias_table, std=0.02)
|
| 132 |
+
self.softmax = nn.Softmax(dim=-1)
|
| 133 |
+
|
| 134 |
+
def forward(self, x, mask=None):
|
| 135 |
+
"""Forward function.
|
| 136 |
+
Args:
|
| 137 |
+
x: input features with shape of (num_windows*B, N, C)
|
| 138 |
+
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
| 139 |
+
"""
|
| 140 |
+
B_, N, C = x.shape
|
| 141 |
+
qkv = (
|
| 142 |
+
self.qkv(x)
|
| 143 |
+
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
|
| 144 |
+
.permute(2, 0, 3, 1, 4)
|
| 145 |
+
)
|
| 146 |
+
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
| 147 |
+
|
| 148 |
+
q = q * self.scale
|
| 149 |
+
attn = q @ k.transpose(-2, -1)
|
| 150 |
+
|
| 151 |
+
relative_position_bias = self.relative_position_bias_table[
|
| 152 |
+
self.relative_position_index.view(-1)
|
| 153 |
+
].view(
|
| 154 |
+
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
|
| 155 |
+
) # Wh*Ww,Wh*Ww,nH
|
| 156 |
+
relative_position_bias = relative_position_bias.permute(
|
| 157 |
+
2, 0, 1
|
| 158 |
+
).contiguous() # nH, Wh*Ww, Wh*Ww
|
| 159 |
+
attn = attn + relative_position_bias.unsqueeze(0)
|
| 160 |
+
|
| 161 |
+
if mask is not None:
|
| 162 |
+
nW = mask.shape[0]
|
| 163 |
+
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
|
| 164 |
+
attn = attn.view(-1, self.num_heads, N, N)
|
| 165 |
+
attn = self.softmax(attn)
|
| 166 |
+
else:
|
| 167 |
+
attn = self.softmax(attn)
|
| 168 |
+
|
| 169 |
+
attn = self.attn_drop(attn)
|
| 170 |
+
|
| 171 |
+
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
| 172 |
+
x = self.proj(x)
|
| 173 |
+
x = self.proj_drop(x)
|
| 174 |
+
return x
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class SwinTransformerBlock(nn.Module):
|
| 178 |
+
"""Swin Transformer Block.
|
| 179 |
+
Args:
|
| 180 |
+
dim (int): Number of input channels.
|
| 181 |
+
num_heads (int): Number of attention heads.
|
| 182 |
+
window_size (int): Window size.
|
| 183 |
+
shift_size (int): Shift size for SW-MSA.
|
| 184 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
| 185 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
| 186 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
| 187 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
| 188 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
| 189 |
+
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
| 190 |
+
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
| 191 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
def __init__(
|
| 195 |
+
self,
|
| 196 |
+
dim,
|
| 197 |
+
num_heads,
|
| 198 |
+
window_size=7,
|
| 199 |
+
shift_size=0,
|
| 200 |
+
mlp_ratio=4.0,
|
| 201 |
+
qkv_bias=True,
|
| 202 |
+
qk_scale=None,
|
| 203 |
+
drop=0.0,
|
| 204 |
+
attn_drop=0.0,
|
| 205 |
+
drop_path=0.0,
|
| 206 |
+
act_layer=nn.GELU,
|
| 207 |
+
norm_layer=nn.LayerNorm,
|
| 208 |
+
):
|
| 209 |
+
super().__init__()
|
| 210 |
+
self.dim = dim
|
| 211 |
+
self.num_heads = num_heads
|
| 212 |
+
self.window_size = window_size
|
| 213 |
+
self.shift_size = shift_size
|
| 214 |
+
self.mlp_ratio = mlp_ratio
|
| 215 |
+
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
|
| 216 |
+
|
| 217 |
+
self.norm1 = norm_layer(dim)
|
| 218 |
+
self.attn = WindowAttention(
|
| 219 |
+
dim,
|
| 220 |
+
window_size=to_2tuple(self.window_size),
|
| 221 |
+
num_heads=num_heads,
|
| 222 |
+
qkv_bias=qkv_bias,
|
| 223 |
+
qk_scale=qk_scale,
|
| 224 |
+
attn_drop=attn_drop,
|
| 225 |
+
proj_drop=drop,
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 229 |
+
self.norm2 = norm_layer(dim)
|
| 230 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
| 231 |
+
self.mlp = Mlp(
|
| 232 |
+
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
self.H = None
|
| 236 |
+
self.W = None
|
| 237 |
+
|
| 238 |
+
def forward(self, x, mask_matrix):
|
| 239 |
+
"""Forward function.
|
| 240 |
+
Args:
|
| 241 |
+
x: Input feature, tensor size (B, H*W, C).
|
| 242 |
+
H, W: Spatial resolution of the input feature.
|
| 243 |
+
mask_matrix: Attention mask for cyclic shift.
|
| 244 |
+
"""
|
| 245 |
+
B, L, C = x.shape
|
| 246 |
+
H, W = self.H, self.W
|
| 247 |
+
assert L == H * W, "input feature has wrong size"
|
| 248 |
+
|
| 249 |
+
shortcut = x
|
| 250 |
+
x = self.norm1(x)
|
| 251 |
+
x = x.view(B, H, W, C)
|
| 252 |
+
|
| 253 |
+
# pad feature maps to multiples of window size
|
| 254 |
+
pad_l = pad_t = 0
|
| 255 |
+
pad_r = (self.window_size - W % self.window_size) % self.window_size
|
| 256 |
+
pad_b = (self.window_size - H % self.window_size) % self.window_size
|
| 257 |
+
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
|
| 258 |
+
_, Hp, Wp, _ = x.shape
|
| 259 |
+
|
| 260 |
+
# cyclic shift
|
| 261 |
+
if self.shift_size > 0:
|
| 262 |
+
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
| 263 |
+
attn_mask = mask_matrix
|
| 264 |
+
else:
|
| 265 |
+
shifted_x = x
|
| 266 |
+
attn_mask = None
|
| 267 |
+
|
| 268 |
+
# partition windows
|
| 269 |
+
x_windows = window_partition(
|
| 270 |
+
shifted_x, self.window_size
|
| 271 |
+
) # nW*B, window_size, window_size, C
|
| 272 |
+
x_windows = x_windows.view(
|
| 273 |
+
-1, self.window_size * self.window_size, C
|
| 274 |
+
) # nW*B, window_size*window_size, C
|
| 275 |
+
|
| 276 |
+
# W-MSA/SW-MSA
|
| 277 |
+
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
|
| 278 |
+
|
| 279 |
+
# merge windows
|
| 280 |
+
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
| 281 |
+
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
|
| 282 |
+
|
| 283 |
+
# reverse cyclic shift
|
| 284 |
+
if self.shift_size > 0:
|
| 285 |
+
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
| 286 |
+
else:
|
| 287 |
+
x = shifted_x
|
| 288 |
+
|
| 289 |
+
if pad_r > 0 or pad_b > 0:
|
| 290 |
+
x = x[:, :H, :W, :].contiguous()
|
| 291 |
+
|
| 292 |
+
x = x.view(B, H * W, C)
|
| 293 |
+
|
| 294 |
+
# FFN
|
| 295 |
+
x = shortcut + self.drop_path(x)
|
| 296 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
| 297 |
+
|
| 298 |
+
return x
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
class PatchMerging(nn.Module):
|
| 302 |
+
"""Patch Merging Layer
|
| 303 |
+
Args:
|
| 304 |
+
dim (int): Number of input channels.
|
| 305 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(self, dim, norm_layer=nn.LayerNorm):
|
| 309 |
+
super().__init__()
|
| 310 |
+
self.dim = dim
|
| 311 |
+
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
| 312 |
+
self.norm = norm_layer(4 * dim)
|
| 313 |
+
|
| 314 |
+
def forward(self, x, H, W):
|
| 315 |
+
"""Forward function.
|
| 316 |
+
Args:
|
| 317 |
+
x: Input feature, tensor size (B, H*W, C).
|
| 318 |
+
H, W: Spatial resolution of the input feature.
|
| 319 |
+
"""
|
| 320 |
+
B, L, C = x.shape
|
| 321 |
+
assert L == H * W, "input feature has wrong size"
|
| 322 |
+
|
| 323 |
+
x = x.view(B, H, W, C)
|
| 324 |
+
|
| 325 |
+
# padding
|
| 326 |
+
pad_input = (H % 2 == 1) or (W % 2 == 1)
|
| 327 |
+
if pad_input:
|
| 328 |
+
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
|
| 329 |
+
|
| 330 |
+
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
| 331 |
+
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
| 332 |
+
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
| 333 |
+
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
| 334 |
+
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
| 335 |
+
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
| 336 |
+
|
| 337 |
+
x = self.norm(x)
|
| 338 |
+
x = self.reduction(x)
|
| 339 |
+
|
| 340 |
+
return x
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class BasicLayer(nn.Module):
|
| 344 |
+
"""A basic Swin Transformer layer for one stage.
|
| 345 |
+
Args:
|
| 346 |
+
dim (int): Number of feature channels
|
| 347 |
+
depth (int): Depths of this stage.
|
| 348 |
+
num_heads (int): Number of attention head.
|
| 349 |
+
window_size (int): Local window size. Default: 7.
|
| 350 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
|
| 351 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
| 352 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
| 353 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
| 354 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
| 355 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
| 356 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
| 357 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
| 358 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
| 359 |
+
"""
|
| 360 |
+
|
| 361 |
+
def __init__(
|
| 362 |
+
self,
|
| 363 |
+
dim,
|
| 364 |
+
depth,
|
| 365 |
+
num_heads,
|
| 366 |
+
window_size=7,
|
| 367 |
+
mlp_ratio=4.0,
|
| 368 |
+
qkv_bias=True,
|
| 369 |
+
qk_scale=None,
|
| 370 |
+
drop=0.0,
|
| 371 |
+
attn_drop=0.0,
|
| 372 |
+
drop_path=0.0,
|
| 373 |
+
norm_layer=nn.LayerNorm,
|
| 374 |
+
downsample=None,
|
| 375 |
+
use_checkpoint=False,
|
| 376 |
+
):
|
| 377 |
+
super().__init__()
|
| 378 |
+
self.window_size = window_size
|
| 379 |
+
self.shift_size = window_size // 2
|
| 380 |
+
self.depth = depth
|
| 381 |
+
self.use_checkpoint = use_checkpoint
|
| 382 |
+
|
| 383 |
+
# build blocks
|
| 384 |
+
self.blocks = nn.ModuleList(
|
| 385 |
+
[
|
| 386 |
+
SwinTransformerBlock(
|
| 387 |
+
dim=dim,
|
| 388 |
+
num_heads=num_heads,
|
| 389 |
+
window_size=window_size,
|
| 390 |
+
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
| 391 |
+
mlp_ratio=mlp_ratio,
|
| 392 |
+
qkv_bias=qkv_bias,
|
| 393 |
+
qk_scale=qk_scale,
|
| 394 |
+
drop=drop,
|
| 395 |
+
attn_drop=attn_drop,
|
| 396 |
+
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
| 397 |
+
norm_layer=norm_layer,
|
| 398 |
+
)
|
| 399 |
+
for i in range(depth)
|
| 400 |
+
]
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
# patch merging layer
|
| 404 |
+
if downsample is not None:
|
| 405 |
+
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
|
| 406 |
+
else:
|
| 407 |
+
self.downsample = None
|
| 408 |
+
|
| 409 |
+
def forward(self, x, H, W):
|
| 410 |
+
"""Forward function.
|
| 411 |
+
Args:
|
| 412 |
+
x: Input feature, tensor size (B, H*W, C).
|
| 413 |
+
H, W: Spatial resolution of the input feature.
|
| 414 |
+
"""
|
| 415 |
+
|
| 416 |
+
# calculate attention mask for SW-MSA
|
| 417 |
+
Hp = int(np.ceil(H / self.window_size)) * self.window_size
|
| 418 |
+
Wp = int(np.ceil(W / self.window_size)) * self.window_size
|
| 419 |
+
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
|
| 420 |
+
h_slices = (
|
| 421 |
+
slice(0, -self.window_size),
|
| 422 |
+
slice(-self.window_size, -self.shift_size),
|
| 423 |
+
slice(-self.shift_size, None),
|
| 424 |
+
)
|
| 425 |
+
w_slices = (
|
| 426 |
+
slice(0, -self.window_size),
|
| 427 |
+
slice(-self.window_size, -self.shift_size),
|
| 428 |
+
slice(-self.shift_size, None),
|
| 429 |
+
)
|
| 430 |
+
cnt = 0
|
| 431 |
+
for h in h_slices:
|
| 432 |
+
for w in w_slices:
|
| 433 |
+
img_mask[:, h, w, :] = cnt
|
| 434 |
+
cnt += 1
|
| 435 |
+
|
| 436 |
+
mask_windows = window_partition(
|
| 437 |
+
img_mask, self.window_size
|
| 438 |
+
) # nW, window_size, window_size, 1
|
| 439 |
+
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
| 440 |
+
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
| 441 |
+
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
|
| 442 |
+
attn_mask == 0, float(0.0)
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
for blk in self.blocks:
|
| 446 |
+
blk.H, blk.W = H, W
|
| 447 |
+
if self.use_checkpoint:
|
| 448 |
+
x = checkpoint.checkpoint(blk, x, attn_mask)
|
| 449 |
+
else:
|
| 450 |
+
x = blk(x, attn_mask)
|
| 451 |
+
if self.downsample is not None:
|
| 452 |
+
x_down = self.downsample(x, H, W)
|
| 453 |
+
Wh, Ww = (H + 1) // 2, (W + 1) // 2
|
| 454 |
+
return x, H, W, x_down, Wh, Ww
|
| 455 |
+
else:
|
| 456 |
+
return x, H, W, x, H, W
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
class PatchEmbed(nn.Module):
|
| 460 |
+
"""Image to Patch Embedding
|
| 461 |
+
Args:
|
| 462 |
+
patch_size (int): Patch token size. Default: 4.
|
| 463 |
+
in_chans (int): Number of input image channels. Default: 3.
|
| 464 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
| 465 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
| 466 |
+
"""
|
| 467 |
+
|
| 468 |
+
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
| 469 |
+
super().__init__()
|
| 470 |
+
patch_size = to_2tuple(patch_size)
|
| 471 |
+
self.patch_size = patch_size
|
| 472 |
+
|
| 473 |
+
self.in_chans = in_chans
|
| 474 |
+
self.embed_dim = embed_dim
|
| 475 |
+
|
| 476 |
+
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
| 477 |
+
if norm_layer is not None:
|
| 478 |
+
self.norm = norm_layer(embed_dim)
|
| 479 |
+
else:
|
| 480 |
+
self.norm = None
|
| 481 |
+
|
| 482 |
+
def forward(self, x):
|
| 483 |
+
"""Forward function."""
|
| 484 |
+
# padding
|
| 485 |
+
_, _, H, W = x.size()
|
| 486 |
+
if W % self.patch_size[1] != 0:
|
| 487 |
+
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
|
| 488 |
+
if H % self.patch_size[0] != 0:
|
| 489 |
+
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
|
| 490 |
+
|
| 491 |
+
x = self.proj(x) # B C Wh Ww
|
| 492 |
+
if self.norm is not None:
|
| 493 |
+
Wh, Ww = x.size(2), x.size(3)
|
| 494 |
+
x = x.flatten(2).transpose(1, 2)
|
| 495 |
+
x = self.norm(x)
|
| 496 |
+
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
|
| 497 |
+
|
| 498 |
+
return x
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class SwinTransformer(nn.Module):
|
| 502 |
+
"""Swin Transformer backbone.
|
| 503 |
+
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
|
| 504 |
+
https://arxiv.org/pdf/2103.14030
|
| 505 |
+
Args:
|
| 506 |
+
pretrain_img_size (int): Input image size for training the pretrained model,
|
| 507 |
+
used in absolute postion embedding. Default 224.
|
| 508 |
+
patch_size (int | tuple(int)): Patch size. Default: 4.
|
| 509 |
+
in_chans (int): Number of input image channels. Default: 3.
|
| 510 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
| 511 |
+
depths (tuple[int]): Depths of each Swin Transformer stage.
|
| 512 |
+
num_heads (tuple[int]): Number of attention head of each stage.
|
| 513 |
+
window_size (int): Window size. Default: 7.
|
| 514 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
|
| 515 |
+
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
| 516 |
+
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
|
| 517 |
+
drop_rate (float): Dropout rate.
|
| 518 |
+
attn_drop_rate (float): Attention dropout rate. Default: 0.
|
| 519 |
+
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
|
| 520 |
+
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
| 521 |
+
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
|
| 522 |
+
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
|
| 523 |
+
out_indices (Sequence[int]): Output from which stages.
|
| 524 |
+
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
|
| 525 |
+
-1 means not freezing any parameters.
|
| 526 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
| 527 |
+
dilation (bool): if True, the output size if 16x downsample, ow 32x downsample.
|
| 528 |
+
"""
|
| 529 |
+
|
| 530 |
+
def __init__(
|
| 531 |
+
self,
|
| 532 |
+
pretrain_img_size=224,
|
| 533 |
+
patch_size=4,
|
| 534 |
+
in_chans=3,
|
| 535 |
+
embed_dim=96,
|
| 536 |
+
depths=[2, 2, 6, 2],
|
| 537 |
+
num_heads=[3, 6, 12, 24],
|
| 538 |
+
window_size=7,
|
| 539 |
+
mlp_ratio=4.0,
|
| 540 |
+
qkv_bias=True,
|
| 541 |
+
qk_scale=None,
|
| 542 |
+
drop_rate=0.0,
|
| 543 |
+
attn_drop_rate=0.0,
|
| 544 |
+
drop_path_rate=0.2,
|
| 545 |
+
norm_layer=nn.LayerNorm,
|
| 546 |
+
ape=False,
|
| 547 |
+
patch_norm=True,
|
| 548 |
+
out_indices=(0, 1, 2, 3),
|
| 549 |
+
frozen_stages=-1,
|
| 550 |
+
dilation=False,
|
| 551 |
+
use_checkpoint=False,
|
| 552 |
+
):
|
| 553 |
+
super().__init__()
|
| 554 |
+
|
| 555 |
+
self.pretrain_img_size = pretrain_img_size
|
| 556 |
+
self.num_layers = len(depths)
|
| 557 |
+
self.embed_dim = embed_dim
|
| 558 |
+
self.ape = ape
|
| 559 |
+
self.patch_norm = patch_norm
|
| 560 |
+
self.out_indices = out_indices
|
| 561 |
+
self.frozen_stages = frozen_stages
|
| 562 |
+
self.dilation = dilation
|
| 563 |
+
|
| 564 |
+
# if use_checkpoint:
|
| 565 |
+
# print("use_checkpoint!!!!!!!!!!!!!!!!!!!!!!!!")
|
| 566 |
+
|
| 567 |
+
# split image into non-overlapping patches
|
| 568 |
+
self.patch_embed = PatchEmbed(
|
| 569 |
+
patch_size=patch_size,
|
| 570 |
+
in_chans=in_chans,
|
| 571 |
+
embed_dim=embed_dim,
|
| 572 |
+
norm_layer=norm_layer if self.patch_norm else None,
|
| 573 |
+
)
|
| 574 |
+
|
| 575 |
+
# absolute position embedding
|
| 576 |
+
if self.ape:
|
| 577 |
+
pretrain_img_size = to_2tuple(pretrain_img_size)
|
| 578 |
+
patch_size = to_2tuple(patch_size)
|
| 579 |
+
patches_resolution = [
|
| 580 |
+
pretrain_img_size[0] // patch_size[0],
|
| 581 |
+
pretrain_img_size[1] // patch_size[1],
|
| 582 |
+
]
|
| 583 |
+
|
| 584 |
+
self.absolute_pos_embed = nn.Parameter(
|
| 585 |
+
torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])
|
| 586 |
+
)
|
| 587 |
+
trunc_normal_(self.absolute_pos_embed, std=0.02)
|
| 588 |
+
|
| 589 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
| 590 |
+
|
| 591 |
+
# stochastic depth
|
| 592 |
+
dpr = [
|
| 593 |
+
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
|
| 594 |
+
] # stochastic depth decay rule
|
| 595 |
+
|
| 596 |
+
# build layers
|
| 597 |
+
self.layers = nn.ModuleList()
|
| 598 |
+
# prepare downsample list
|
| 599 |
+
downsamplelist = [PatchMerging for i in range(self.num_layers)]
|
| 600 |
+
downsamplelist[-1] = None
|
| 601 |
+
num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)]
|
| 602 |
+
if self.dilation:
|
| 603 |
+
downsamplelist[-2] = None
|
| 604 |
+
num_features[-1] = int(embed_dim * 2 ** (self.num_layers - 1)) // 2
|
| 605 |
+
for i_layer in range(self.num_layers):
|
| 606 |
+
layer = BasicLayer(
|
| 607 |
+
# dim=int(embed_dim * 2 ** i_layer),
|
| 608 |
+
dim=num_features[i_layer],
|
| 609 |
+
depth=depths[i_layer],
|
| 610 |
+
num_heads=num_heads[i_layer],
|
| 611 |
+
window_size=window_size,
|
| 612 |
+
mlp_ratio=mlp_ratio,
|
| 613 |
+
qkv_bias=qkv_bias,
|
| 614 |
+
qk_scale=qk_scale,
|
| 615 |
+
drop=drop_rate,
|
| 616 |
+
attn_drop=attn_drop_rate,
|
| 617 |
+
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
|
| 618 |
+
norm_layer=norm_layer,
|
| 619 |
+
# downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
|
| 620 |
+
downsample=downsamplelist[i_layer],
|
| 621 |
+
use_checkpoint=use_checkpoint,
|
| 622 |
+
)
|
| 623 |
+
self.layers.append(layer)
|
| 624 |
+
|
| 625 |
+
# num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
|
| 626 |
+
self.num_features = num_features
|
| 627 |
+
|
| 628 |
+
# add a norm layer for each output
|
| 629 |
+
for i_layer in out_indices:
|
| 630 |
+
layer = norm_layer(num_features[i_layer])
|
| 631 |
+
layer_name = f"norm{i_layer}"
|
| 632 |
+
self.add_module(layer_name, layer)
|
| 633 |
+
|
| 634 |
+
self._freeze_stages()
|
| 635 |
+
|
| 636 |
+
def _freeze_stages(self):
|
| 637 |
+
if self.frozen_stages >= 0:
|
| 638 |
+
self.patch_embed.eval()
|
| 639 |
+
for param in self.patch_embed.parameters():
|
| 640 |
+
param.requires_grad = False
|
| 641 |
+
|
| 642 |
+
if self.frozen_stages >= 1 and self.ape:
|
| 643 |
+
self.absolute_pos_embed.requires_grad = False
|
| 644 |
+
|
| 645 |
+
if self.frozen_stages >= 2:
|
| 646 |
+
self.pos_drop.eval()
|
| 647 |
+
for i in range(0, self.frozen_stages - 1):
|
| 648 |
+
m = self.layers[i]
|
| 649 |
+
m.eval()
|
| 650 |
+
for param in m.parameters():
|
| 651 |
+
param.requires_grad = False
|
| 652 |
+
|
| 653 |
+
# def init_weights(self, pretrained=None):
|
| 654 |
+
# """Initialize the weights in backbone.
|
| 655 |
+
# Args:
|
| 656 |
+
# pretrained (str, optional): Path to pre-trained weights.
|
| 657 |
+
# Defaults to None.
|
| 658 |
+
# """
|
| 659 |
+
|
| 660 |
+
# def _init_weights(m):
|
| 661 |
+
# if isinstance(m, nn.Linear):
|
| 662 |
+
# trunc_normal_(m.weight, std=.02)
|
| 663 |
+
# if isinstance(m, nn.Linear) and m.bias is not None:
|
| 664 |
+
# nn.init.constant_(m.bias, 0)
|
| 665 |
+
# elif isinstance(m, nn.LayerNorm):
|
| 666 |
+
# nn.init.constant_(m.bias, 0)
|
| 667 |
+
# nn.init.constant_(m.weight, 1.0)
|
| 668 |
+
|
| 669 |
+
# if isinstance(pretrained, str):
|
| 670 |
+
# self.apply(_init_weights)
|
| 671 |
+
# logger = get_root_logger()
|
| 672 |
+
# load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 673 |
+
# elif pretrained is None:
|
| 674 |
+
# self.apply(_init_weights)
|
| 675 |
+
# else:
|
| 676 |
+
# raise TypeError('pretrained must be a str or None')
|
| 677 |
+
|
| 678 |
+
def forward_raw(self, x):
|
| 679 |
+
"""Forward function."""
|
| 680 |
+
x = self.patch_embed(x)
|
| 681 |
+
|
| 682 |
+
Wh, Ww = x.size(2), x.size(3)
|
| 683 |
+
if self.ape:
|
| 684 |
+
# interpolate the position embedding to the corresponding size
|
| 685 |
+
absolute_pos_embed = F.interpolate(
|
| 686 |
+
self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
|
| 687 |
+
)
|
| 688 |
+
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
|
| 689 |
+
else:
|
| 690 |
+
x = x.flatten(2).transpose(1, 2)
|
| 691 |
+
x = self.pos_drop(x)
|
| 692 |
+
|
| 693 |
+
outs = []
|
| 694 |
+
for i in range(self.num_layers):
|
| 695 |
+
layer = self.layers[i]
|
| 696 |
+
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
|
| 697 |
+
# import ipdb; ipdb.set_trace()
|
| 698 |
+
|
| 699 |
+
if i in self.out_indices:
|
| 700 |
+
norm_layer = getattr(self, f"norm{i}")
|
| 701 |
+
x_out = norm_layer(x_out)
|
| 702 |
+
|
| 703 |
+
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
|
| 704 |
+
outs.append(out)
|
| 705 |
+
# in:
|
| 706 |
+
# torch.Size([2, 3, 1024, 1024])
|
| 707 |
+
# outs:
|
| 708 |
+
# [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \
|
| 709 |
+
# torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]
|
| 710 |
+
return tuple(outs)
|
| 711 |
+
|
| 712 |
+
def forward(self, tensor_list: NestedTensor):
|
| 713 |
+
x = tensor_list.tensors
|
| 714 |
+
|
| 715 |
+
"""Forward function."""
|
| 716 |
+
x = self.patch_embed(x)
|
| 717 |
+
|
| 718 |
+
Wh, Ww = x.size(2), x.size(3)
|
| 719 |
+
if self.ape:
|
| 720 |
+
# interpolate the position embedding to the corresponding size
|
| 721 |
+
absolute_pos_embed = F.interpolate(
|
| 722 |
+
self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
|
| 723 |
+
)
|
| 724 |
+
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
|
| 725 |
+
else:
|
| 726 |
+
x = x.flatten(2).transpose(1, 2)
|
| 727 |
+
x = self.pos_drop(x)
|
| 728 |
+
|
| 729 |
+
outs = []
|
| 730 |
+
for i in range(self.num_layers):
|
| 731 |
+
layer = self.layers[i]
|
| 732 |
+
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
|
| 733 |
+
|
| 734 |
+
if i in self.out_indices:
|
| 735 |
+
norm_layer = getattr(self, f"norm{i}")
|
| 736 |
+
x_out = norm_layer(x_out)
|
| 737 |
+
|
| 738 |
+
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
|
| 739 |
+
outs.append(out)
|
| 740 |
+
# in:
|
| 741 |
+
# torch.Size([2, 3, 1024, 1024])
|
| 742 |
+
# out:
|
| 743 |
+
# [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \
|
| 744 |
+
# torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]
|
| 745 |
+
|
| 746 |
+
# collect for nesttensors
|
| 747 |
+
outs_dict = {}
|
| 748 |
+
for idx, out_i in enumerate(outs):
|
| 749 |
+
m = tensor_list.mask
|
| 750 |
+
assert m is not None
|
| 751 |
+
mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0]
|
| 752 |
+
outs_dict[idx] = NestedTensor(out_i, mask)
|
| 753 |
+
|
| 754 |
+
return outs_dict
|
| 755 |
+
|
| 756 |
+
def train(self, mode=True):
|
| 757 |
+
"""Convert the model into training mode while keep layers freezed."""
|
| 758 |
+
super(SwinTransformer, self).train(mode)
|
| 759 |
+
self._freeze_stages()
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def build_swin_transformer(modelname, pretrain_img_size, **kw):
|
| 763 |
+
assert modelname in [
|
| 764 |
+
"swin_T_224_1k",
|
| 765 |
+
"swin_B_224_22k",
|
| 766 |
+
"swin_B_384_22k",
|
| 767 |
+
"swin_L_224_22k",
|
| 768 |
+
"swin_L_384_22k",
|
| 769 |
+
]
|
| 770 |
+
|
| 771 |
+
model_para_dict = {
|
| 772 |
+
"swin_T_224_1k": dict(
|
| 773 |
+
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7
|
| 774 |
+
),
|
| 775 |
+
"swin_B_224_22k": dict(
|
| 776 |
+
embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7
|
| 777 |
+
),
|
| 778 |
+
"swin_B_384_22k": dict(
|
| 779 |
+
embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12
|
| 780 |
+
),
|
| 781 |
+
"swin_L_224_22k": dict(
|
| 782 |
+
embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7
|
| 783 |
+
),
|
| 784 |
+
"swin_L_384_22k": dict(
|
| 785 |
+
embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12
|
| 786 |
+
),
|
| 787 |
+
}
|
| 788 |
+
kw_cgf = model_para_dict[modelname]
|
| 789 |
+
kw_cgf.update(kw)
|
| 790 |
+
model = SwinTransformer(pretrain_img_size=pretrain_img_size, **kw_cgf)
|
| 791 |
+
return model
|
| 792 |
+
|
| 793 |
+
|
| 794 |
+
if __name__ == "__main__":
|
| 795 |
+
model = build_swin_transformer("swin_L_384_22k", 384, dilation=True)
|
| 796 |
+
x = torch.rand(2, 3, 1024, 1024)
|
| 797 |
+
y = model.forward_raw(x)
|
| 798 |
+
import ipdb
|
| 799 |
+
|
| 800 |
+
ipdb.set_trace()
|
| 801 |
+
x = torch.rand(2, 3, 384, 384)
|
| 802 |
+
y = model.forward_raw(x)
|