roll-ai commited on
Commit
e8bdafd
·
verified ·
1 Parent(s): 241ec33

Upload 333 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +22 -0
  2. finetune/Metric3D/.idea/Metric3D.iml +12 -0
  3. finetune/Metric3D/.idea/inspectionProfiles/profiles_settings.xml +6 -0
  4. finetune/Metric3D/.idea/modules.xml +8 -0
  5. finetune/Metric3D/.idea/vcs.xml +6 -0
  6. finetune/Metric3D/LICENSE +24 -0
  7. finetune/Metric3D/README.md +396 -0
  8. finetune/Metric3D/data/gene_annos_kitti_demo.py +32 -0
  9. finetune/Metric3D/data/gene_annos_nyu_demo.py +31 -0
  10. finetune/Metric3D/data/kitti_demo/depth/0000000005.png +3 -0
  11. finetune/Metric3D/data/kitti_demo/depth/0000000050.png +3 -0
  12. finetune/Metric3D/data/kitti_demo/depth/0000000100.png +3 -0
  13. finetune/Metric3D/data/kitti_demo/rgb/0000000005.png +3 -0
  14. finetune/Metric3D/data/kitti_demo/rgb/0000000050.png +3 -0
  15. finetune/Metric3D/data/kitti_demo/rgb/0000000100.png +3 -0
  16. finetune/Metric3D/data/kitti_demo/test_annotations.json +1 -0
  17. finetune/Metric3D/data/nyu_demo/depth/sync_depth_00000.png +3 -0
  18. finetune/Metric3D/data/nyu_demo/depth/sync_depth_00050.png +3 -0
  19. finetune/Metric3D/data/nyu_demo/depth/sync_depth_00100.png +3 -0
  20. finetune/Metric3D/data/nyu_demo/rgb/rgb_00000.jpg +0 -0
  21. finetune/Metric3D/data/nyu_demo/rgb/rgb_00050.jpg +0 -0
  22. finetune/Metric3D/data/nyu_demo/rgb/rgb_00100.jpg +0 -0
  23. finetune/Metric3D/data/nyu_demo/test_annotations.json +1 -0
  24. finetune/Metric3D/data/wild_demo/david-kohler-VFRTXGw1VjU-unsplash.jpg +3 -0
  25. finetune/Metric3D/data/wild_demo/jonathan-borba-CnthDZXCdoY-unsplash.jpg +3 -0
  26. finetune/Metric3D/data/wild_demo/randy-fath-G1yhU1Ej-9A-unsplash.jpg +3 -0
  27. finetune/Metric3D/data_info/__init__.py +2 -0
  28. finetune/Metric3D/data_info/pretrained_weight.py +16 -0
  29. finetune/Metric3D/data_info/public_datasets.py +7 -0
  30. finetune/Metric3D/hubconf.py +224 -0
  31. finetune/Metric3D/media/gifs/demo_1.gif +3 -0
  32. finetune/Metric3D/media/gifs/demo_12.gif +3 -0
  33. finetune/Metric3D/media/gifs/demo_2.gif +3 -0
  34. finetune/Metric3D/media/gifs/demo_22.gif +3 -0
  35. finetune/Metric3D/media/screenshots/challenge.PNG +3 -0
  36. finetune/Metric3D/media/screenshots/depth_normal.jpg +3 -0
  37. finetune/Metric3D/media/screenshots/metrology.jpg +3 -0
  38. finetune/Metric3D/media/screenshots/page2.png +3 -0
  39. finetune/Metric3D/media/screenshots/pipeline.png +3 -0
  40. finetune/Metric3D/mono/configs/HourglassDecoder/convlarge.0.3_150.py +25 -0
  41. finetune/Metric3D/mono/configs/HourglassDecoder/convtiny.0.3_150.py +25 -0
  42. finetune/Metric3D/mono/configs/HourglassDecoder/test_kitti_convlarge.0.3_150.py +25 -0
  43. finetune/Metric3D/mono/configs/HourglassDecoder/test_nyu_convlarge.0.3_150.py +25 -0
  44. finetune/Metric3D/mono/configs/HourglassDecoder/vit.raft5.giant2.py +33 -0
  45. finetune/Metric3D/mono/configs/HourglassDecoder/vit.raft5.large.py +33 -0
  46. finetune/Metric3D/mono/configs/HourglassDecoder/vit.raft5.small.py +33 -0
  47. finetune/Metric3D/mono/configs/__init__.py +1 -0
  48. finetune/Metric3D/mono/configs/_base_/_data_base_.py +13 -0
  49. finetune/Metric3D/mono/configs/_base_/datasets/_data_base_.py +12 -0
  50. finetune/Metric3D/mono/configs/_base_/default_runtime.py +4 -0
.gitattributes CHANGED
@@ -75,3 +75,25 @@ static/videos/various_types/food.mp4 filter=lfs diff=lfs merge=lfs -text
75
  static/videos/various_types/human.mp4 filter=lfs diff=lfs merge=lfs -text
76
  static/videos/various_types/landscape.mp4 filter=lfs diff=lfs merge=lfs -text
77
  static/videos/various_types/pets.mp4 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  static/videos/various_types/human.mp4 filter=lfs diff=lfs merge=lfs -text
76
  static/videos/various_types/landscape.mp4 filter=lfs diff=lfs merge=lfs -text
77
  static/videos/various_types/pets.mp4 filter=lfs diff=lfs merge=lfs -text
78
+ finetune/Metric3D/data/kitti_demo/depth/0000000005.png filter=lfs diff=lfs merge=lfs -text
79
+ finetune/Metric3D/data/kitti_demo/depth/0000000050.png filter=lfs diff=lfs merge=lfs -text
80
+ finetune/Metric3D/data/kitti_demo/depth/0000000100.png filter=lfs diff=lfs merge=lfs -text
81
+ finetune/Metric3D/data/kitti_demo/rgb/0000000005.png filter=lfs diff=lfs merge=lfs -text
82
+ finetune/Metric3D/data/kitti_demo/rgb/0000000050.png filter=lfs diff=lfs merge=lfs -text
83
+ finetune/Metric3D/data/kitti_demo/rgb/0000000100.png filter=lfs diff=lfs merge=lfs -text
84
+ finetune/Metric3D/data/nyu_demo/depth/sync_depth_00000.png filter=lfs diff=lfs merge=lfs -text
85
+ finetune/Metric3D/data/nyu_demo/depth/sync_depth_00050.png filter=lfs diff=lfs merge=lfs -text
86
+ finetune/Metric3D/data/nyu_demo/depth/sync_depth_00100.png filter=lfs diff=lfs merge=lfs -text
87
+ finetune/Metric3D/data/wild_demo/david-kohler-VFRTXGw1VjU-unsplash.jpg filter=lfs diff=lfs merge=lfs -text
88
+ finetune/Metric3D/data/wild_demo/jonathan-borba-CnthDZXCdoY-unsplash.jpg filter=lfs diff=lfs merge=lfs -text
89
+ finetune/Metric3D/data/wild_demo/randy-fath-G1yhU1Ej-9A-unsplash.jpg filter=lfs diff=lfs merge=lfs -text
90
+ finetune/Metric3D/media/gifs/demo_1.gif filter=lfs diff=lfs merge=lfs -text
91
+ finetune/Metric3D/media/gifs/demo_12.gif filter=lfs diff=lfs merge=lfs -text
92
+ finetune/Metric3D/media/gifs/demo_2.gif filter=lfs diff=lfs merge=lfs -text
93
+ finetune/Metric3D/media/gifs/demo_22.gif filter=lfs diff=lfs merge=lfs -text
94
+ finetune/Metric3D/media/screenshots/challenge.PNG filter=lfs diff=lfs merge=lfs -text
95
+ finetune/Metric3D/media/screenshots/depth_normal.jpg filter=lfs diff=lfs merge=lfs -text
96
+ finetune/Metric3D/media/screenshots/metrology.jpg filter=lfs diff=lfs merge=lfs -text
97
+ finetune/Metric3D/media/screenshots/page2.png filter=lfs diff=lfs merge=lfs -text
98
+ finetune/Metric3D/media/screenshots/pipeline.png filter=lfs diff=lfs merge=lfs -text
99
+ finetune/Metric3D/training/kitti_json_files/eigen_train.json filter=lfs diff=lfs merge=lfs -text
finetune/Metric3D/.idea/Metric3D.iml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ <component name="PyDocumentationSettings">
9
+ <option name="format" value="PLAIN" />
10
+ <option name="myDocStringFormat" value="Plain" />
11
+ </component>
12
+ </module>
finetune/Metric3D/.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
finetune/Metric3D/.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/Metric3D.iml" filepath="$PROJECT_DIR$/.idea/Metric3D.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
finetune/Metric3D/.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
finetune/Metric3D/LICENSE ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 2-Clause License
2
+
3
+ Copyright (c) 2024, Wei Yin and Mu Hu
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ 1. Redistributions of source code must retain the above copyright notice, this
9
+ list of conditions and the following disclaimer.
10
+
11
+ 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ this list of conditions and the following disclaimer in the documentation
13
+ and/or other materials provided with the distribution.
14
+
15
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
finetune/Metric3D/README.md ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚀 Metric3D Project 🚀
2
+
3
+ **Official PyTorch implementation of Metric3Dv1 and Metric3Dv2:**
4
+
5
+ [1] [Metric3D: Towards Zero-shot Metric 3D Prediction from A Single Image](https://arxiv.org/abs/2307.10984)
6
+
7
+ [2] [Metric3Dv2: A Versatile Monocular Geometric Foundation Model for Zero-shot Metric Depth and Surface Normal Estimation](https://arxiv.org/abs/2404.15506)
8
+
9
+ <a href='https://jugghm.github.io/Metric3Dv2'><img src='https://img.shields.io/badge/project%[email protected]'></a>
10
+ <a href='https://arxiv.org/abs/2307.10984'><img src='https://img.shields.io/badge/arxiv-@Metric3Dv1-green'></a>
11
+ <a href='https://arxiv.org/abs/2404.15506'><img src='https://img.shields.io/badge/arxiv-@Metric3Dv2-red'></a>
12
+ <a href='https://huggingface.co/spaces/JUGGHM/Metric3D'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
13
+
14
+ [//]: # (### [Project Page]&#40;https://arxiv.org/abs/2307.08695&#41; | [v2 Paper]&#40;https://arxiv.org/abs/2307.10984&#41; | [v1 Arxiv]&#40;https://arxiv.org/abs/2307.10984&#41; | [Video]&#40;https://www.youtube.com/playlist?list=PLEuyXJsWqUNd04nwfm9gFBw5FVbcaQPl3&#41; | [Hugging Face 🤗]&#40;https://huggingface.co/spaces/JUGGHM/Metric3D&#41; )
15
+
16
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/metric3d-v2-a-versatile-monocular-geometric-1/monocular-depth-estimation-on-nyu-depth-v2)](https://paperswithcode.com/sota/monocular-depth-estimation-on-nyu-depth-v2?p=metric3d-v2-a-versatile-monocular-geometric-1)
17
+
18
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/metric3d-v2-a-versatile-monocular-geometric-1/monocular-depth-estimation-on-kitti-eigen)](https://paperswithcode.com/sota/monocular-depth-estimation-on-kitti-eigen?p=metric3d-v2-a-versatile-monocular-geometric-1)
19
+
20
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/metric3d-v2-a-versatile-monocular-geometric-1/surface-normals-estimation-on-nyu-depth-v2-1)](https://paperswithcode.com/sota/surface-normals-estimation-on-nyu-depth-v2-1?p=metric3d-v2-a-versatile-monocular-geometric-1)
21
+
22
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/metric3d-v2-a-versatile-monocular-geometric-1/surface-normals-estimation-on-ibims-1)](https://paperswithcode.com/sota/surface-normals-estimation-on-ibims-1?p=metric3d-v2-a-versatile-monocular-geometric-1)
23
+
24
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/metric3d-v2-a-versatile-monocular-geometric-1/surface-normals-estimation-on-scannetv2)](https://paperswithcode.com/sota/surface-normals-estimation-on-scannetv2?p=metric3d-v2-a-versatile-monocular-geometric-1)
25
+
26
+ 🏆 **Champion in [CVPR2023 Monocular Depth Estimation Challenge](https://jspenmar.github.io/MDEC)**
27
+
28
+ ## News
29
+ - `[2024/8]` Metric3Dv2 is accepted by TPAMI!
30
+ - `[2024/7/5]` Our stable-diffusion alternative GeoWizard has now been accepted by ECCV 2024! Check NOW the [repository](https://github.com/fuxiao0719/GeoWizard) and [paper](https://arxiv.org/abs/2403.12013) for the finest-grained geometry ever! 🎉🎉🎉
31
+ - `[2024/6/25]` Json files for KITTI datasets now available! Refer to [Training](./training/README.md) for more details
32
+ - `[2024/6/3]` ONNX is supported! We appreciate [@xenova](https://github.com/xenova) for their remarkable efforts!
33
+ - `[2024/4/25]` Weights for ViT-giant2 model released!
34
+ - `[2024/4/11]` Training codes are released!
35
+ - `[2024/3/18]` [HuggingFace 🤗](https://huggingface.co/spaces/JUGGHM/Metric3D) GPU version updated!
36
+ - `[2024/3/18]` [Project page](https://jugghm.github.io/Metric3Dv2/) released!
37
+ - `[2024/3/18]` Metric3D V2 models released, supporting metric depth and surface normal now!
38
+ - `[2023/8/10]` Inference codes, pre-trained weights, and demo released.
39
+ - `[2023/7]` Metric3D accepted by ICCV 2023!
40
+ - `[2023/4]` The Champion of [2nd Monocular Depth Estimation Challenge](https://jspenmar.github.io/MDEC) in CVPR 2023
41
+
42
+ ## 🌼 Abstract
43
+ Metric3D is a strong and robust geometry foundation model for high-quality and zero-shot **metric depth** and **surface normal** estimation from a single image. It excels at solving in-the-wild scene reconstruction. It can directly help you measure the size of structures from a single image. Now it achieves SOTA performance on over 10 depth and normal benchmarks.
44
+
45
+ ![depth_normal](media/screenshots/depth_normal.jpg)
46
+
47
+ ![metrology](media/screenshots/metrology.jpg)
48
+
49
+
50
+ ## 📝 Benchmarks
51
+
52
+ ### Metric Depth
53
+
54
+ [//]: # (#### Zero-shot Testing)
55
+
56
+ [//]: # (Our models work well on both indoor and outdoor scenarios, compared with other zero-shot metric depth estimation methods.)
57
+
58
+ [//]: # ()
59
+ [//]: # (| | Backbone | KITTI $\delta 1$ ↑ | KITTI $\delta 2$ ↑ | KITTI $\delta 3$ ↑ | KITTI AbsRel ↓ | KITTI RMSE ↓ | KITTI RMS_log ↓ | NYU $\delta 1$ ↑ | NYU $\delta 2$ ↑ | NYU $\delta 3$ ↑ | NYU AbsRel ↓ | NYU RMSE ↓ | NYU log10 ↓ |)
60
+
61
+ [//]: # (|-----------------|------------|--------------------|---------------------|--------------------|-----------------|---------------|------------------|------------------|------------------|------------------|---------------|-------------|--------------|)
62
+
63
+ [//]: # (| ZeroDepth | ResNet-18 | 0.910 | 0.980 | 0.996 | 0.057 | 4.044 | 0.083 | 0.901 | 0.961 | - | 0.100 | 0.380 | - |)
64
+
65
+ [//]: # (| PolyMax | ConvNeXt-L | - | - | - | - | - | - | 0.969 | 0.996 | 0.999 | 0.067 | 0.250 | 0.033 |)
66
+
67
+ [//]: # (| Ours | ViT-L | 0.985 | 0.995 | 0.999 | 0.052 | 2.511 | 0.074 | 0.975 | 0.994 | 0.998 | 0.063 | 0.251 | 0.028 |)
68
+
69
+ [//]: # (| Ours | ViT-g2 | 0.989 | 0.996 | 0.999 | 0.051 | 2.403 | 0.080 | 0.980 | 0.997 | 0.999 | 0.067 | 0.260 | 0.030 |)
70
+
71
+ [//]: # ()
72
+ [//]: # ([//]: # &#40;| Adabins | Efficient-B5 | 0.964 | 0.995 | 0.999 | 0.058 | 2.360 | 0.088 | 0.903 | 0.984 | 0.997 | 0.103 | 0.0444 | 0.364 |&#41;)
73
+ [//]: # ([//]: # &#40;| NewCRFs | SwinT-L | 0.974 | 0.997 | 0.999 | 0.052 | 2.129 | 0.079 | 0.922 | 0.983 | 0.994 | 0.095 | 0.041 | 0.334 |&#41;)
74
+ [//]: # ([//]: # &#40;| Ours &#40;CSTM_label&#41; | ConvNeXt-L | 0.964 | 0.993 | 0.998 | 0.058 | 2.770 | 0.092 | 0.944 | 0.986 | 0.995 | 0.083 | 0.035 | 0.310 |&#41;)
75
+
76
+ [//]: # (#### Finetuned)
77
+ Our models rank 1st on the routing KITTI and NYU benchmarks.
78
+
79
+ | | Backbone | KITTI δ1 ↑ | KITTI δ2 ↑ | KITTI AbsRel ↓ | KITTI RMSE ↓ | KITTI RMS_log ↓ | NYU δ1 ↑ | NYU δ2 ↑ | NYU AbsRel ↓ | NYU RMSE ↓ | NYU log10 ↓ |
80
+ |---------------|-------------|------------|-------------|-----------------|---------------|------------------|----------|----------|---------------|-------------|--------------|
81
+ | ZoeDepth | ViT-Large | 0.971 | 0.995 | 0.053 | 2.281 | 0.082 | 0.953 | 0.995 | 0.077 | 0.277 | 0.033 |
82
+ | ZeroDepth | ResNet-18 | 0.968 | 0.996 | 0.057 | 2.087 | 0.083 | 0.954 | 0.995 | 0.074 | 0.269 | 0.103 |
83
+ | IEBins | SwinT-Large | 0.978 | 0.998 | 0.050 | 2.011 | 0.075 | 0.936 | 0.992 | 0.087 | 0.314 | 0.031 |
84
+ | DepthAnything | ViT-Large | 0.982 | 0.998 | 0.046 | 1.985 | 0.069 | 0.984 | 0.998 | 0.056 | 0.206 | 0.024 |
85
+ | Ours | ViT-Large | 0.985 | 0.998 | 0.044 | 1.985 | 0.064 | 0.989 | 0.998 | 0.047 | 0.183 | 0.020 |
86
+ | Ours | ViT-giant2 | 0.989 | 0.998 | 0.039 | 1.766 | 0.060 | 0.987 | 0.997 | 0.045 | 0.187 | 0.015 |
87
+
88
+ ### Affine-invariant Depth
89
+ Even compared to recent affine-invariant depth methods (Marigold and Depth Anything), our metric-depth (and normal) models still show superior performance.
90
+
91
+ | | #Data for Pretrain and Train | KITTI Absrel ↓ | KITTI δ1 ↑ | NYUv2 AbsRel ↓ | NYUv2 δ1 ↑ | DIODE-Full AbsRel ↓ | DIODE-Full δ1 ↑ | Eth3d AbsRel ↓ | Eth3d δ1 ↑ |
92
+ |-----------------------|----------------------------------------------|----------------|------------|-----------------|------------|---------------------|-----------------|----------------------|------------|
93
+ | OmniData (v2, ViT-L) | 1.3M + 12.2M | 0.069 | 0.948 | 0.074 | 0.945 | 0.149 | 0.835 | 0.166 | 0.778 |
94
+ | MariGold (LDMv2) | 5B + 74K | 0.099 | 0.916 | 0.055 | 0.961 | 0.308 | 0.773 | 0.127 | 0.960 |
95
+ | DepthAnything (ViT-L) | 142M + 63M | 0.076 | 0.947 | 0.043 | 0.981 | 0.277 | 0.759 | 0.065 | 0.882 |
96
+ | Ours (ViT-L) | 142M + 16M | 0.042 | 0.979 | 0.042 | 0.980 | 0.141 | 0.882 | 0.042 | 0.987 |
97
+ | Ours (ViT-g) | 142M + 16M | 0.043 | 0.982 | 0.043 | 0.981 | 0.136 | 0.895 | 0.042 | 0.983 |
98
+
99
+
100
+ ### Surface Normal
101
+ Our models also show powerful performance on normal benchmarks.
102
+
103
+ | | NYU 11.25° ↑ | NYU Mean ↓ | NYU RMS ↓ | ScanNet 11.25° ↑ | ScanNet Mean ↓ | ScanNet RMS ↓ | iBims 11.25° ↑ | iBims Mean ↓ | iBims RMS ↓ |
104
+ |--------------|----------|----------|-----------|-----------------|----------------|--------------|---------------|--------------|-------------|
105
+ | EESNU | 0.597 | 16.0 | 24.7 | 0.711 | 11.8 | 20.3 | 0.585 | 20.0 | - |
106
+ | IronDepth | - | - | - | - | - | - | 0.431 | 25.3 | 37.4 |
107
+ | PolyMax | 0.656 | 13.1 | 20.4 | - | - | - | - | - | - |
108
+ | Ours (ViT-L) | 0.688 | 12.0 | 19.2 | 0.760 | 9.9 | 16.4 | 0.694 | 19.4 | 34.9 |
109
+ | Ours (ViT-g) | 0.662 | 13.2 | 20.2 | 0.778 | 9.2 | 15.3 | 0.697 | 19.6 | 35.2 |
110
+
111
+
112
+
113
+ ## 🌈 DEMOs
114
+
115
+ ### Zero-shot monocular metric depth & surface normal
116
+ <img src="media/gifs/demo_1.gif" width="600" height="337">
117
+ <img src="media/gifs/demo_12.gif" width="600" height="337">
118
+
119
+ ### Zero-shot metric 3D recovery
120
+ <img src="media/gifs/demo_2.gif" width="600" height="337">
121
+
122
+ ### Improving monocular SLAM
123
+ <img src="media/gifs/demo_22.gif" width="600" height="337">
124
+
125
+ [//]: # (https://github.com/YvanYin/Metric3D/assets/35299633/f95815ef-2506-4193-a6d9-1163ea821268)
126
+
127
+ [//]: # (https://github.com/YvanYin/Metric3D/assets/35299633/ed00706c-41cc-49ea-accb-ad0532633cc2)
128
+
129
+ [//]: # (### Zero-shot metric 3D recovery)
130
+
131
+ [//]: # (https://github.com/YvanYin/Metric3D/assets/35299633/26cd7ae1-dd5a-4446-b275-54c5ca7ef945)
132
+
133
+ [//]: # (https://github.com/YvanYin/Metric3D/assets/35299633/21e5484b-c304-4fe3-b1d3-8eebc4e26e42)
134
+ [//]: # (### Monocular reconstruction for a Sequence)
135
+
136
+ [//]: # ()
137
+ [//]: # (### In-the-wild 3D reconstruction)
138
+
139
+ [//]: # ()
140
+ [//]: # (| | Image | Reconstruction | Pointcloud File |)
141
+
142
+ [//]: # (|:---------:|:------------------:|:------------------:|:--------:|)
143
+
144
+ [//]: # (| room | <img src="data/wild_demo/jonathan-borba-CnthDZXCdoY-unsplash.jpg" width="300" height="335"> | <img src="media/gifs/room.gif" width="300" height="335"> | [Download]&#40;https://drive.google.com/file/d/1P1izSegH2c4LUrXGiUksw037PVb0hjZr/view?usp=drive_link&#41; |)
145
+
146
+ [//]: # (| Colosseum | <img src="data/wild_demo/david-kohler-VFRTXGw1VjU-unsplash.jpg" width="300" height="169"> | <img src="media/gifs/colo.gif" width="300" height="169"> | [Download]&#40;https://drive.google.com/file/d/1jJCXe5IpxBhHDr0TZtNZhjxKTRUz56Hg/view?usp=drive_link&#41; |)
147
+
148
+ [//]: # (| chess | <img src="data/wild_demo/randy-fath-G1yhU1Ej-9A-unsplash.jpg" width="300" height="169" align=center> | <img src="media/gifs/chess.gif" width="300" height="169"> | [Download]&#40;https://drive.google.com/file/d/1oV_Foq25_p-tTDRTcyO2AzXEdFJQz-Wm/view?usp=drive_link&#41; |)
149
+
150
+ [//]: # ()
151
+ [//]: # (All three images are downloaded from [unplash]&#40;https://unsplash.com/&#41; and put in the data/wild_demo directory.)
152
+
153
+ [//]: # ()
154
+ [//]: # (### 3D metric reconstruction, Metric3D × DroidSLAM)
155
+
156
+ [//]: # (Metric3D can also provide scale information for DroidSLAM, help to solve the scale drift problem for better trajectories. )
157
+
158
+ [//]: # ()
159
+ [//]: # (#### Bird Eyes' View &#40;Left: Droid-SLAM &#40;mono&#41;. Right: Droid-SLAM with Metric-3D&#41;)
160
+
161
+ [//]: # ()
162
+ [//]: # (<div align=center>)
163
+
164
+ [//]: # (<img src="media/gifs/0028.gif"> )
165
+
166
+ [//]: # (</div>)
167
+
168
+ [//]: # ()
169
+ [//]: # (### Front View)
170
+
171
+ [//]: # ()
172
+ [//]: # (<div align=center>)
173
+
174
+ [//]: # (<img src="media/gifs/0028_fv.gif"> )
175
+
176
+ [//]: # (</div>)
177
+
178
+ [//]: # ()
179
+ [//]: # (#### KITTI odemetry evaluation &#40;Translational RMS drift &#40;t_rel, ↓&#41; / Rotational RMS drift &#40;r_rel, ↓&#41;&#41;)
180
+
181
+ [//]: # (| | Modality | seq 00 | seq 02 | seq 05 | seq 06 | seq 08 | seq 09 | seq 10 |)
182
+
183
+ [//]: # (|:----------:|:--------:|:----------:|:----------:|:---------:|:----------:|:----------:|:---------:|:---------:|)
184
+
185
+ [//]: # (| ORB-SLAM2 | Mono | 11.43/0.58 | 10.34/0.26 | 9.04/0.26 | 14.56/0.26 | 11.46/0.28 | 9.3/0.26 | 2.57/0.32 |)
186
+
187
+ [//]: # (| Droid-SLAM | Mono | 33.9/0.29 | 34.88/0.27 | 23.4/0.27 | 17.2/0.26 | 39.6/0.31 | 21.7/0.23 | 7/0.25 |)
188
+
189
+ [//]: # (| Droid+Ours | Mono | 1.44/0.37 | 2.64/0.29 | 1.44/0.25 | 0.6/0.2 | 2.2/0.3 | 1.63/0.22 | 2.73/0.23 |)
190
+
191
+ [//]: # (| ORB-SLAM2 | Stereo | 0.88/0.31 | 0.77/0.28 | 0.62/0.26 | 0.89/0.27 | 1.03/0.31 | 0.86/0.25 | 0.62/0.29 |)
192
+
193
+ [//]: # ()
194
+ [//]: # (Metric3D makes the mono-SLAM scale-aware, like stereo systems.)
195
+
196
+ [//]: # ()
197
+ [//]: # (#### KITTI sequence videos - Youtube)
198
+
199
+ [//]: # ([2011_09_30_drive_0028]&#40;https://youtu.be/gcTB4MgVCLQ&#41; /)
200
+
201
+ [//]: # ([2011_09_30_drive_0033]&#40;https://youtu.be/He581fmoPP4&#41; /)
202
+
203
+ [//]: # ([2011_09_30_drive_0034]&#40;https://youtu.be/I3PkukQ3_F8&#41;)
204
+
205
+ [//]: # ()
206
+ [//]: # (#### Estimated pose)
207
+
208
+ [//]: # ([2011_09_30_drive_0033]&#40;https://drive.google.com/file/d/1SMXWzLYrEdmBe6uYMR9ShtDXeFDewChv/view?usp=drive_link&#41; / )
209
+
210
+ [//]: # ([2011_09_30_drive_0034]&#40;https://drive.google.com/file/d/1ONU4GxpvTlgW0TjReF1R2i-WFxbbjQPG/view?usp=drive_link&#41; /)
211
+
212
+ [//]: # ([2011_10_03_drive_0042]&#40;https://drive.google.com/file/d/19fweg6p1Q6TjJD2KlD7EMA_aV4FIeQUD/view?usp=drive_link&#41;)
213
+
214
+ [//]: # ()
215
+ [//]: # (#### Pointcloud files)
216
+
217
+ [//]: # ([2011_09_30_drive_0033]&#40;https://drive.google.com/file/d/1K0o8DpUmLf-f_rue0OX1VaHlldpHBAfw/view?usp=drive_link&#41; /)
218
+
219
+ [//]: # ([2011_09_30_drive_0034]&#40;https://drive.google.com/file/d/1bvZ6JwMRyvi07H7Z2VD_0NX1Im8qraZo/view?usp=drive_link&#41; /)
220
+
221
+ [//]: # ([2011_10_03_drive_0042]&#40;https://drive.google.com/file/d/1Vw59F8nN5ApWdLeGKXvYgyS9SNKHKy4x/view?usp=drive_link&#41;)
222
+
223
+ ## 🔨 Installation
224
+ ### One-line Installation
225
+ For the ViT models, use the following environment:
226
+ ```bash
227
+ pip install -r requirements_v2.txt
228
+ ```
229
+
230
+ For ConvNeXt-L, it is
231
+ ```bash
232
+ pip install -r requirements_v1.txt
233
+ ```
234
+
235
+ ### dataset annotation components
236
+ With off-the-shelf depth datasets, we need to generate json annotaions in compatible with this dataset, which is organized by:
237
+ ```
238
+ dict(
239
+ 'files':list(
240
+ dict(
241
+ 'rgb': 'data/kitti_demo/rgb/xxx.png',
242
+ 'depth': 'data/kitti_demo/depth/xxx.png',
243
+ 'depth_scale': 1000.0 # the depth scale of gt depth img.
244
+ 'cam_in': [fx, fy, cx, cy],
245
+ ),
246
+
247
+ dict(
248
+ ...
249
+ ),
250
+
251
+ ...
252
+ )
253
+ )
254
+ ```
255
+ To generate such annotations, please refer to the "Inference" section.
256
+
257
+ ### configs
258
+ In ```mono/configs``` we provide different config setups.
259
+
260
+ Intrinsics of the canonical camera is set bellow:
261
+ ```
262
+ canonical_space = dict(
263
+ img_size=(512, 960),
264
+ focal_length=1000.0,
265
+ ),
266
+ ```
267
+ where cx and cy is set to be half of the image size.
268
+
269
+ Inference settings are defined as
270
+ ```
271
+ depth_range=(0, 1),
272
+ depth_normalize=(0.3, 150),
273
+ crop_size = (512, 1088),
274
+ ```
275
+ where the images will be first resized as the ```crop_size``` and then fed into the model.
276
+
277
+ ## ✈️ Training
278
+ Please refer to [training/README.md](./training/README.md).
279
+ Now we provide complete json files for KITTI fine-tuning.
280
+
281
+ ## ✈️ Inference
282
+ ### News: Improved ONNX support with dynamic shapes (Feature owned by [@xenova](https://github.com/xenova). Appreciate for this outstanding contribution 🚩🚩🚩)
283
+
284
+ Now the onnx supports are availble for all three models with varying shapes. Refer to [issue117](https://github.com/YvanYin/Metric3D/issues/117) for more details.
285
+
286
+ ### Improved ONNX Checkpoints Available now
287
+ | | Encoder | Decoder | Link |
288
+ |:----:|:-------------------:|:-----------------:|:-------------------------------------------------------------------------------------------------:|
289
+ | v2-S-ONNX | DINO2reg-ViT-Small | RAFT-4iter | [Download 🤗](https://huggingface.co/onnx-community/metric3d-vit-small) |
290
+ | v2-L-ONNX | DINO2reg-ViT-Large | RAFT-8iter | [Download 🤗](https://huggingface.co/onnx-community/metric3d-vit-large) |
291
+ | v2-g-ONNX | DINO2reg-ViT-giant2 | RAFT-8iter | [Download 🤗](https://huggingface.co/onnx-community/metric3d-vit-giant2) |
292
+
293
+ One additional [reminder](https://github.com/YvanYin/Metric3D/issues/143#issue-2444506808) for using these onnx models is reported by @norbertlink.
294
+
295
+ ### News: Pytorch Hub is supported
296
+ Now you can use Metric3D via Pytorch Hub with just few lines of code:
297
+ ```python
298
+ import torch
299
+ model = torch.hub.load('yvanyin/metric3d', 'metric3d_vit_small', pretrain=True)
300
+ pred_depth, confidence, output_dict = model.inference({'input': rgb})
301
+ pred_normal = output_dict['prediction_normal'][:, :3, :, :] # only available for Metric3Dv2 i.e., ViT models
302
+ normal_confidence = output_dict['prediction_normal'][:, 3, :, :] # see https://arxiv.org/abs/2109.09881 for details
303
+ ```
304
+ Supported models: `metric3d_convnext_tiny`, `metric3d_convnext_large`, `metric3d_vit_small`, `metric3d_vit_large`, `metric3d_vit_giant2`.
305
+
306
+ We also provided a minimal working example in [hubconf.py](https://github.com/YvanYin/Metric3D/blob/main/hubconf.py#L145), which hopefully makes everything clearer.
307
+
308
+ ### News: ONNX Exportation and Inference are supported
309
+
310
+ We also provided a flexible working example in [metric3d_onnx_export.py](./onnx/metric3d_onnx_export.py) to export the Pytorch Hub model to ONNX format. We could test with the following commands:
311
+
312
+ ```bash
313
+ # Export the model to ONNX model
314
+ python3 onnx/metric_3d_onnx_export.py metric3d_vit_small # metric3d_vit_large/metric3d_convnext_large
315
+
316
+ # Test the inference of the ONNX model
317
+ python3 onnx/test_onnx.py metric3d_vit_small.onnx
318
+ ```
319
+
320
+ [ros2_vision_inference](https://github.com/Owen-Liuyuxuan/ros2_vision_inference) provides a Python example, showcasing a pipeline from image to point clouds and integrated into ROS2 systems.
321
+
322
+ ### Download Checkpoint
323
+ | | Encoder | Decoder | Link |
324
+ |:----:|:-------------------:|:-----------------:|:-------------------------------------------------------------------------------------------------:|
325
+ | v1-T | ConvNeXt-Tiny | Hourglass-Decoder | [Download 🤗](https://huggingface.co/JUGGHM/Metric3D/blob/main/convtiny_hourglass_v1.pth) |
326
+ | v1-L | ConvNeXt-Large | Hourglass-Decoder | [Download](https://drive.google.com/file/d/1KVINiBkVpJylx_6z1lAC7CQ4kmn-RJRN/view?usp=drive_link) |
327
+ | v2-S | DINO2reg-ViT-Small | RAFT-4iter | [Download](https://drive.google.com/file/d/1YfmvXwpWmhLg3jSxnhT7LvY0yawlXcr_/view?usp=drive_link) |
328
+ | v2-L | DINO2reg-ViT-Large | RAFT-8iter | [Download](https://drive.google.com/file/d/1eT2gG-kwsVzNy5nJrbm4KC-9DbNKyLnr/view?usp=drive_link) |
329
+ | v2-g | DINO2reg-ViT-giant2 | RAFT-8iter | [Download 🤗](https://huggingface.co/JUGGHM/Metric3D/blob/main/metric_depth_vit_giant2_800k.pth) |
330
+
331
+ ### Dataset Mode
332
+ 1. put the trained ckpt file ```model.pth``` in ```weight/```.
333
+ 2. generate data annotation by following the code ```data/gene_annos_kitti_demo.py```, which includes 'rgb', (optional) 'intrinsic', (optional) 'depth', (optional) 'depth_scale'.
334
+ 3. change the 'test_data_path' in ```test_*.sh``` to the ```*.json``` path.
335
+ 4. run ```source test_kitti.sh``` or ```source test_nyu.sh```.
336
+
337
+ ### In-the-Wild Mode
338
+ 1. put the trained ckpt file ```model.pth``` in ```weight/```.
339
+ 2. change the 'test_data_path' in ```test.sh``` to the image folder path.
340
+ 3. run ```source test_vit.sh``` for transformers and ```source test.sh``` for convnets.
341
+ As no intrinsics are provided, we provided by default 9 settings of focal length.
342
+
343
+ ### Metric3D and Droid-Slam
344
+ If you are interested in combining metric3D and monocular visual slam system to achieve the metric slam, you can refer to this [repo](https://github.com/Jianxff/droid_metric).
345
+
346
+ ## ❓ Q & A
347
+ ### Q1: Why depth maps look good but pointclouds are distorted?
348
+ Because the focal length is not properly set! Please find a proper focal length by modifying codes [here](mono/utils/do_test.py#309) yourself.
349
+
350
+ ### Q2: Why the point clouds are too slow to be generated?
351
+ Because the images are too large! Use smaller ones instead.
352
+
353
+ ### Q3: Why predicted depth maps are not satisfactory?
354
+ First be sure all black padding regions at image boundaries are cropped out. Then please try again.
355
+ Besides, metric 3D is not almighty. Some objects (chandeliers, drones...) / camera views (aerial view, bev...) do not occur frequently in the training datasets. We will going deeper into this and release more powerful solutions.
356
+
357
+ ## 📧 Citation
358
+ If you use this toolbox in your research or wish to refer to the baseline results published here, please use the following BibTeX entries:
359
+ ```
360
+ @misc{Metric3D,
361
+ author = {Yin, Wei and Hu, Mu},
362
+ title = {OpenMetric3D: An Open Toolbox for Monocular Depth Estimation},
363
+ howpublished = {\url{https://github.com/YvanYin/Metric3D}},
364
+ year = {2024}
365
+ }
366
+ ```
367
+ <!-- ```
368
+ @article{hu2024metric3dv2,
369
+ title={Metric3D v2: A Versatile Monocular Geometric Foundation Model for Zero-shot Metric Depth and Surface Normal Estimation},
370
+ author={Hu, Mu and Yin, Wei and Zhang, Chi and Cai, Zhipeng and Long, Xiaoxiao and Chen, Hao and Wang, Kaixuan and Yu, Gang and Shen, Chunhua and Shen, Shaojie},
371
+ journal={arXiv preprint arXiv:2404.15506},
372
+ year={2024}
373
+ }
374
+ ``` -->
375
+ Also please cite our papers if this help your research.
376
+ ```
377
+ @article{hu2024metric3dv2,
378
+ title={Metric3d v2: A versatile monocular geometric foundation model for zero-shot metric depth and surface normal estimation},
379
+ author={Hu, Mu and Yin, Wei and Zhang, Chi and Cai, Zhipeng and Long, Xiaoxiao and Chen, Hao and Wang, Kaixuan and Yu, Gang and Shen, Chunhua and Shen, Shaojie},
380
+ journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
381
+ year={2024},
382
+ publisher={IEEE}
383
+ }
384
+ ```
385
+ ```
386
+ @article{yin2023metric,
387
+ title={Metric3D: Towards Zero-shot Metric 3D Prediction from A Single Image},
388
+ author={Wei Yin, Chi Zhang, Hao Chen, Zhipeng Cai, Gang Yu, Kaixuan Wang, Xiaozhi Chen, Chunhua Shen},
389
+ booktitle={ICCV},
390
+ year={2023}
391
+ }
392
+ ```
393
+
394
+ ## License and Contact
395
+
396
+ The *Metric 3D* code is under a 2-clause BSD License. For further commercial inquiries, please contact Dr. Wei Yin [[email protected]] and Mr. Mu Hu [[email protected]].
finetune/Metric3D/data/gene_annos_kitti_demo.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ if __name__=='__main__':
2
+ import os
3
+ import os.path as osp
4
+ import numpy as np
5
+ import cv2
6
+ import json
7
+
8
+ code_root = '/mnt/nas/share/home/xugk/MetricDepth_test/'
9
+
10
+ data_root = osp.join(code_root, 'data/kitti_demo')
11
+ split_root = code_root
12
+
13
+ files = []
14
+ rgb_root = osp.join(data_root, 'rgb')
15
+ depth_root = osp.join(data_root, 'depth')
16
+ for rgb_file in os.listdir(rgb_root):
17
+ rgb_path = osp.join(rgb_root, rgb_file).split(split_root)[-1]
18
+ depth_path = rgb_path.replace('/rgb/', '/depth/')
19
+ cam_in = [707.0493, 707.0493, 604.0814, 180.5066]
20
+ depth_scale = 256.
21
+
22
+ meta_data = {}
23
+ meta_data['cam_in'] = cam_in
24
+ meta_data['rgb'] = rgb_path
25
+ meta_data['depth'] = depth_path
26
+ meta_data['depth_scale'] = depth_scale
27
+ files.append(meta_data)
28
+ files_dict = dict(files=files)
29
+
30
+ with open(osp.join(code_root, 'data/kitti_demo/test_annotations.json'), 'w') as f:
31
+ json.dump(files_dict, f)
32
+
finetune/Metric3D/data/gene_annos_nyu_demo.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ if __name__=='__main__':
2
+ import os
3
+ import os.path as osp
4
+ import numpy as np
5
+ import cv2
6
+ import json
7
+
8
+ code_root = '/mnt/nas/share/home/xugk/MetricDepth_test/'
9
+
10
+ data_root = osp.join(code_root, 'data/nyu_demo')
11
+ split_root = code_root
12
+
13
+ files = []
14
+ rgb_root = osp.join(data_root, 'rgb')
15
+ depth_root = osp.join(data_root, 'depth')
16
+ for rgb_file in os.listdir(rgb_root):
17
+ rgb_path = osp.join(rgb_root, rgb_file).split(split_root)[-1]
18
+ depth_path = rgb_path.replace('.jpg', '.png').replace('/rgb_', '/sync_depth_').replace('/rgb/', '/depth/')
19
+ cam_in = [518.8579, 519.46961, 325.58245, 253.73617]
20
+ depth_scale = 1000.
21
+
22
+ meta_data = {}
23
+ meta_data['cam_in'] = cam_in
24
+ meta_data['rgb'] = rgb_path
25
+ meta_data['depth'] = depth_path
26
+ meta_data['depth_scale'] = depth_scale
27
+ files.append(meta_data)
28
+ files_dict = dict(files=files)
29
+
30
+ with open(osp.join(code_root, 'data/nyu_demo/test_annotations.json'), 'w') as f:
31
+ json.dump(files_dict, f)
finetune/Metric3D/data/kitti_demo/depth/0000000005.png ADDED

Git LFS Details

  • SHA256: eb0d83fc93bcf235384c690ae405e0b24b3bfc6a05e1220a4c902bed3b5ba113
  • Pointer size: 131 Bytes
  • Size of remote file: 192 kB
finetune/Metric3D/data/kitti_demo/depth/0000000050.png ADDED

Git LFS Details

  • SHA256: 3eef554b3b312829e7d1e76a1acd13e7261024eb3c4d6e176328be377ff9216e
  • Pointer size: 131 Bytes
  • Size of remote file: 201 kB
finetune/Metric3D/data/kitti_demo/depth/0000000100.png ADDED

Git LFS Details

  • SHA256: 4b7e9c85e2b4f8131019fe93e0c1cf36f5058b30d040998a8199c4bb2d97e9b1
  • Pointer size: 131 Bytes
  • Size of remote file: 182 kB
finetune/Metric3D/data/kitti_demo/rgb/0000000005.png ADDED

Git LFS Details

  • SHA256: a9754dcadc8b3ace31a368500af3e382e2c0763242a7b054d424650cec67646a
  • Pointer size: 131 Bytes
  • Size of remote file: 873 kB
finetune/Metric3D/data/kitti_demo/rgb/0000000050.png ADDED

Git LFS Details

  • SHA256: 19e4f8f377521c8e28aca9addf2b695f9e374e5f44ee38d58970d12a21fbc4bf
  • Pointer size: 131 Bytes
  • Size of remote file: 874 kB
finetune/Metric3D/data/kitti_demo/rgb/0000000100.png ADDED

Git LFS Details

  • SHA256: 1f216c6fa51fb640c6cfb8a16cc91f60b20b1d2775def3d86c52c2bba1388365
  • Pointer size: 131 Bytes
  • Size of remote file: 916 kB
finetune/Metric3D/data/kitti_demo/test_annotations.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"files": [{"cam_in": [707.0493, 707.0493, 604.0814, 180.5066], "rgb": "data/kitti_demo/rgb/0000000050.png", "depth": "data/kitti_demo/depth/0000000050.png", "depth_scale": 256.0}, {"cam_in": [707.0493, 707.0493, 604.0814, 180.5066], "rgb": "data/kitti_demo/rgb/0000000100.png", "depth": "data/kitti_demo/depth/0000000100.png", "depth_scale": 256.0}, {"cam_in": [707.0493, 707.0493, 604.0814, 180.5066], "rgb": "data/kitti_demo/rgb/0000000005.png", "depth": "data/kitti_demo/depth/0000000005.png", "depth_scale": 256.0}]}
finetune/Metric3D/data/nyu_demo/depth/sync_depth_00000.png ADDED

Git LFS Details

  • SHA256: 043e9c8bee7af97afff01e451da3f5e9cd1591995f415944dd0dc91036a35b5a
  • Pointer size: 131 Bytes
  • Size of remote file: 166 kB
finetune/Metric3D/data/nyu_demo/depth/sync_depth_00050.png ADDED

Git LFS Details

  • SHA256: 53c764e869f61cf4240586395bc7374dcc02e65b8442801b53b74ffa563d30fe
  • Pointer size: 131 Bytes
  • Size of remote file: 182 kB
finetune/Metric3D/data/nyu_demo/depth/sync_depth_00100.png ADDED

Git LFS Details

  • SHA256: dc0c16d56bfdcc958f37fa28bcf39b110a14c317bfe3c221b3c3bc6d73dec67d
  • Pointer size: 131 Bytes
  • Size of remote file: 142 kB
finetune/Metric3D/data/nyu_demo/rgb/rgb_00000.jpg ADDED
finetune/Metric3D/data/nyu_demo/rgb/rgb_00050.jpg ADDED
finetune/Metric3D/data/nyu_demo/rgb/rgb_00100.jpg ADDED
finetune/Metric3D/data/nyu_demo/test_annotations.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"files": [{"cam_in": [518.8579, 519.46961, 325.58245, 253.73617], "rgb": "data/nyu_demo/rgb/rgb_00000.jpg", "depth": "data/nyu_demo/depth/sync_depth_00000.png", "depth_scale": 1000.0}, {"cam_in": [518.8579, 519.46961, 325.58245, 253.73617], "rgb": "data/nyu_demo/rgb/rgb_00050.jpg", "depth": "data/nyu_demo/depth/sync_depth_00050.png", "depth_scale": 1000.0}, {"cam_in": [518.8579, 519.46961, 325.58245, 253.73617], "rgb": "data/nyu_demo/rgb/rgb_00100.jpg", "depth": "data/nyu_demo/depth/sync_depth_00100.png", "depth_scale": 1000.0}]}
finetune/Metric3D/data/wild_demo/david-kohler-VFRTXGw1VjU-unsplash.jpg ADDED

Git LFS Details

  • SHA256: 78a2041d375f24e00cabbb88a6b0199230c4d07777a3722b4a04828babbf30f0
  • Pointer size: 131 Bytes
  • Size of remote file: 127 kB
finetune/Metric3D/data/wild_demo/jonathan-borba-CnthDZXCdoY-unsplash.jpg ADDED

Git LFS Details

  • SHA256: bdb629d9a64804955adeab654b0b9a28298b73603d9eb2ae75c88dc3d106a412
  • Pointer size: 131 Bytes
  • Size of remote file: 180 kB
finetune/Metric3D/data/wild_demo/randy-fath-G1yhU1Ej-9A-unsplash.jpg ADDED

Git LFS Details

  • SHA256: 95501e0df0690f6ba0423da026cf33b86952774e1727c5ef4e03ae3b4333c90f
  • Pointer size: 131 Bytes
  • Size of remote file: 100 kB
finetune/Metric3D/data_info/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .public_datasets import *
2
+ from .pretrained_weight import *
finetune/Metric3D/data_info/pretrained_weight.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ mldb_info={}
3
+
4
+ mldb_info['checkpoint']={
5
+ 'mldb_root': '/mnt/nas/share/home/xugk/ckpt', # NOTE: modify it to the pretrained ckpt root
6
+
7
+ # pretrained weight for convnext
8
+ 'convnext_tiny': 'convnext/convnext_tiny_22k_1k_384.pth',
9
+ 'convnext_small': 'convnext/convnext_small_22k_1k_384.pth',
10
+ 'convnext_base': 'convnext/convnext_base_22k_1k_384.pth',
11
+ 'convnext_large': 'convnext/convnext_large_22k_1k_384.pth',
12
+ 'vit_large': 'vit/dinov2_vitl14_pretrain.pth',
13
+ 'vit_small_reg': 'vit/dinov2_vits14_reg4_pretrain.pth',
14
+ 'vit_large_reg': 'vit/dinov2_vitl14_reg4_pretrain.pth',
15
+ 'vit_giant2_reg': 'vit/dinov2_vitg14_reg4_pretrain.pth',
16
+ }
finetune/Metric3D/data_info/public_datasets.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ mldb_info = {}
2
+
3
+ mldb_info['NYU']={
4
+ 'mldb_root': '/mnt/nas/share/home/xugk/data/',
5
+ 'data_root': 'nyu',
6
+ 'test_annotations_path': 'nyu/test_annotation.json',
7
+ }
finetune/Metric3D/hubconf.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dependencies = ['torch', 'torchvision']
2
+
3
+ import os
4
+ import torch
5
+ try:
6
+ from mmcv.utils import Config, DictAction
7
+ except:
8
+ from mmengine import Config, DictAction
9
+
10
+ from mono.model.monodepth_model import get_configured_monodepth_model
11
+ metric3d_dir = os.path.dirname(__file__)
12
+
13
+ MODEL_TYPE = {
14
+ 'ConvNeXt-Tiny': {
15
+ 'cfg_file': f'{metric3d_dir}/mono/configs/HourglassDecoder/convtiny.0.3_150.py',
16
+ 'ckpt_file': 'https://huggingface.co/JUGGHM/Metric3D/resolve/main/convtiny_hourglass_v1.pth',
17
+ },
18
+ 'ConvNeXt-Large': {
19
+ 'cfg_file': f'{metric3d_dir}/mono/configs/HourglassDecoder/convlarge.0.3_150.py',
20
+ 'ckpt_file': 'https://huggingface.co/JUGGHM/Metric3D/resolve/main/convlarge_hourglass_0.3_150_step750k_v1.1.pth',
21
+ },
22
+ 'ViT-Small': {
23
+ 'cfg_file': f'{metric3d_dir}/mono/configs/HourglassDecoder/vit.raft5.small.py',
24
+ 'ckpt_file': 'https://huggingface.co/JUGGHM/Metric3D/resolve/main/metric_depth_vit_small_800k.pth',
25
+ },
26
+ 'ViT-Large': {
27
+ 'cfg_file': f'{metric3d_dir}/mono/configs/HourglassDecoder/vit.raft5.large.py',
28
+ 'ckpt_file': 'https://huggingface.co/JUGGHM/Metric3D/resolve/main/metric_depth_vit_large_800k.pth',
29
+ },
30
+ 'ViT-giant2': {
31
+ 'cfg_file': f'{metric3d_dir}/mono/configs/HourglassDecoder/vit.raft5.giant2.py',
32
+ 'ckpt_file': 'https://huggingface.co/JUGGHM/Metric3D/resolve/main/metric_depth_vit_giant2_800k.pth',
33
+ },
34
+ }
35
+
36
+
37
+
38
+ def metric3d_convnext_tiny(pretrain=False, **kwargs):
39
+ '''
40
+ Return a Metric3D model with ConvNeXt-Large backbone and Hourglass-Decoder head.
41
+ For usage examples, refer to: https://github.com/YvanYin/Metric3D/blob/main/hubconf.py
42
+ Args:
43
+ pretrain (bool): whether to load pretrained weights.
44
+ Returns:
45
+ model (nn.Module): a Metric3D model.
46
+ '''
47
+ cfg_file = MODEL_TYPE['ConvNeXt-Tiny']['cfg_file']
48
+ ckpt_file = MODEL_TYPE['ConvNeXt-Tiny']['ckpt_file']
49
+
50
+ cfg = Config.fromfile(cfg_file)
51
+ model = get_configured_monodepth_model(cfg)
52
+ if pretrain:
53
+ model.load_state_dict(
54
+ torch.hub.load_state_dict_from_url(ckpt_file)['model_state_dict'],
55
+ strict=False,
56
+ )
57
+ return model
58
+
59
+ def metric3d_convnext_large(pretrain=False, **kwargs):
60
+ '''
61
+ Return a Metric3D model with ConvNeXt-Large backbone and Hourglass-Decoder head.
62
+ For usage examples, refer to: https://github.com/YvanYin/Metric3D/blob/main/hubconf.py
63
+ Args:
64
+ pretrain (bool): whether to load pretrained weights.
65
+ Returns:
66
+ model (nn.Module): a Metric3D model.
67
+ '''
68
+ cfg_file = MODEL_TYPE['ConvNeXt-Large']['cfg_file']
69
+ ckpt_file = MODEL_TYPE['ConvNeXt-Large']['ckpt_file']
70
+
71
+ cfg = Config.fromfile(cfg_file)
72
+ model = get_configured_monodepth_model(cfg)
73
+ if pretrain:
74
+ model.load_state_dict(
75
+ torch.hub.load_state_dict_from_url(ckpt_file)['model_state_dict'],
76
+ strict=False,
77
+ )
78
+ return model
79
+
80
+ def metric3d_vit_small(pretrain=False, **kwargs):
81
+ '''
82
+ Return a Metric3D model with ViT-Small backbone and RAFT-4iter head.
83
+ For usage examples, refer to: https://github.com/YvanYin/Metric3D/blob/main/hubconf.py
84
+ Args:
85
+ pretrain (bool): whether to load pretrained weights.
86
+ Returns:
87
+ model (nn.Module): a Metric3D model.
88
+ '''
89
+ cfg_file = MODEL_TYPE['ViT-Small']['cfg_file']
90
+ ckpt_file = MODEL_TYPE['ViT-Small']['ckpt_file']
91
+
92
+ cfg = Config.fromfile(cfg_file)
93
+ model = get_configured_monodepth_model(cfg)
94
+ if pretrain:
95
+ model.load_state_dict(
96
+ torch.hub.load_state_dict_from_url(ckpt_file)['model_state_dict'],
97
+ strict=False,
98
+ )
99
+ return model
100
+
101
+ def metric3d_vit_large(pretrain=False, **kwargs):
102
+ '''
103
+ Return a Metric3D model with ViT-Large backbone and RAFT-8iter head.
104
+ For usage examples, refer to: https://github.com/YvanYin/Metric3D/blob/main/hubconf.py
105
+ Args:
106
+ pretrain (bool): whether to load pretrained weights.
107
+ Returns:
108
+ model (nn.Module): a Metric3D model.
109
+ '''
110
+ cfg_file = MODEL_TYPE['ViT-Large']['cfg_file']
111
+ ckpt_file = MODEL_TYPE['ViT-Large']['ckpt_file']
112
+
113
+ cfg = Config.fromfile(cfg_file)
114
+ model = get_configured_monodepth_model(cfg)
115
+ if pretrain:
116
+ model.load_state_dict(
117
+ torch.hub.load_state_dict_from_url(ckpt_file)['model_state_dict'],
118
+ strict=False,
119
+ )
120
+ return model
121
+
122
+ def metric3d_vit_giant2(pretrain=False, **kwargs):
123
+ '''
124
+ Return a Metric3D model with ViT-Giant2 backbone and RAFT-8iter head.
125
+ For usage examples, refer to: https://github.com/YvanYin/Metric3D/blob/main/hubconf.py
126
+ Args:
127
+ pretrain (bool): whether to load pretrained weights.
128
+ Returns:
129
+ model (nn.Module): a Metric3D model.
130
+ '''
131
+ cfg_file = MODEL_TYPE['ViT-giant2']['cfg_file']
132
+ ckpt_file = MODEL_TYPE['ViT-giant2']['ckpt_file']
133
+
134
+ cfg = Config.fromfile(cfg_file)
135
+ model = get_configured_monodepth_model(cfg)
136
+ if pretrain:
137
+ model.load_state_dict(
138
+ torch.hub.load_state_dict_from_url(ckpt_file)['model_state_dict'],
139
+ strict=False,
140
+ )
141
+ return model
142
+
143
+
144
+
145
+ if __name__ == '__main__':
146
+ import cv2
147
+ import numpy as np
148
+ #### prepare data
149
+ rgb_file = 'data/kitti_demo/rgb/0000000050.png'
150
+ depth_file = 'data/kitti_demo/depth/0000000050.png'
151
+ intrinsic = [707.0493, 707.0493, 604.0814, 180.5066]
152
+ gt_depth_scale = 256.0
153
+ rgb_origin = cv2.imread(rgb_file)[:, :, ::-1]
154
+
155
+ #### ajust input size to fit pretrained model
156
+ # keep ratio resize
157
+ input_size = (616, 1064) # for vit model
158
+ # input_size = (544, 1216) # for convnext model
159
+ h, w = rgb_origin.shape[:2]
160
+ scale = min(input_size[0] / h, input_size[1] / w)
161
+ rgb = cv2.resize(rgb_origin, (int(w * scale), int(h * scale)), interpolation=cv2.INTER_LINEAR)
162
+ # remember to scale intrinsic, hold depth
163
+ intrinsic = [intrinsic[0] * scale, intrinsic[1] * scale, intrinsic[2] * scale, intrinsic[3] * scale]
164
+ # padding to input_size
165
+ padding = [123.675, 116.28, 103.53]
166
+ h, w = rgb.shape[:2]
167
+ pad_h = input_size[0] - h
168
+ pad_w = input_size[1] - w
169
+ pad_h_half = pad_h // 2
170
+ pad_w_half = pad_w // 2
171
+ rgb = cv2.copyMakeBorder(rgb, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=padding)
172
+ pad_info = [pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half]
173
+
174
+ #### normalize
175
+ mean = torch.tensor([123.675, 116.28, 103.53]).float()[:, None, None]
176
+ std = torch.tensor([58.395, 57.12, 57.375]).float()[:, None, None]
177
+ rgb = torch.from_numpy(rgb.transpose((2, 0, 1))).float()
178
+ rgb = torch.div((rgb - mean), std)
179
+ rgb = rgb[None, :, :, :].cuda()
180
+
181
+ ###################### canonical camera space ######################
182
+ # inference
183
+ model = torch.hub.load('yvanyin/metric3d', 'metric3d_vit_small', pretrain=True)
184
+ model.cuda().eval()
185
+ with torch.no_grad():
186
+ pred_depth, confidence, output_dict = model.inference({'input': rgb})
187
+
188
+ # un pad
189
+ pred_depth = pred_depth.squeeze()
190
+ pred_depth = pred_depth[pad_info[0] : pred_depth.shape[0] - pad_info[1], pad_info[2] : pred_depth.shape[1] - pad_info[3]]
191
+
192
+ # upsample to original size
193
+ pred_depth = torch.nn.functional.interpolate(pred_depth[None, None, :, :], rgb_origin.shape[:2], mode='bilinear').squeeze()
194
+ ###################### canonical camera space ######################
195
+
196
+ #### de-canonical transform
197
+ canonical_to_real_scale = intrinsic[0] / 1000.0 # 1000.0 is the focal length of canonical camera
198
+ pred_depth = pred_depth * canonical_to_real_scale # now the depth is metric
199
+ pred_depth = torch.clamp(pred_depth, 0, 300)
200
+
201
+ #### you can now do anything with the metric depth
202
+ # such as evaluate predicted depth
203
+ if depth_file is not None:
204
+ gt_depth = cv2.imread(depth_file, -1)
205
+ gt_depth = gt_depth / gt_depth_scale
206
+ gt_depth = torch.from_numpy(gt_depth).float().cuda()
207
+ assert gt_depth.shape == pred_depth.shape
208
+
209
+ mask = (gt_depth > 1e-8)
210
+ abs_rel_err = (torch.abs(pred_depth[mask] - gt_depth[mask]) / gt_depth[mask]).mean()
211
+ print('abs_rel_err:', abs_rel_err.item())
212
+
213
+ #### normal are also available
214
+ if 'prediction_normal' in output_dict: # only available for Metric3Dv2, i.e. vit model
215
+ pred_normal = output_dict['prediction_normal'][:, :3, :, :]
216
+ normal_confidence = output_dict['prediction_normal'][:, 3, :, :] # see https://arxiv.org/abs/2109.09881 for details
217
+ # un pad and resize to some size if needed
218
+ pred_normal = pred_normal.squeeze()
219
+ pred_normal = pred_normal[:, pad_info[0] : pred_normal.shape[1] - pad_info[1], pad_info[2] : pred_normal.shape[2] - pad_info[3]]
220
+ # you can now do anything with the normal
221
+ # such as visualize pred_normal
222
+ pred_normal_vis = pred_normal.cpu().numpy().transpose((1, 2, 0))
223
+ pred_normal_vis = (pred_normal_vis + 1) / 2
224
+ cv2.imwrite('normal_vis.png', (pred_normal_vis * 255).astype(np.uint8))
finetune/Metric3D/media/gifs/demo_1.gif ADDED

Git LFS Details

  • SHA256: f07ee050ca8b76991966f45bb74eae6e61e6b11eeb9466b524c6ab5164711d36
  • Pointer size: 133 Bytes
  • Size of remote file: 10.7 MB
finetune/Metric3D/media/gifs/demo_12.gif ADDED

Git LFS Details

  • SHA256: c1886d6dff7714d015e6b7c004d88d3014057b1cefe1dc5544fa9bedb81383bc
  • Pointer size: 132 Bytes
  • Size of remote file: 9.41 MB
finetune/Metric3D/media/gifs/demo_2.gif ADDED

Git LFS Details

  • SHA256: d11e3f9a11374166fc363a3fed17928957de546a548eccc4c7efa4d9317cf4c5
  • Pointer size: 132 Bytes
  • Size of remote file: 9.02 MB
finetune/Metric3D/media/gifs/demo_22.gif ADDED

Git LFS Details

  • SHA256: c56b0785a5991126d02b349f8801980f31b2ef7b661cad07be4888ff42dc29d0
  • Pointer size: 132 Bytes
  • Size of remote file: 6.39 MB
finetune/Metric3D/media/screenshots/challenge.PNG ADDED

Git LFS Details

  • SHA256: cde4895922d3bd0b4ced1ef80352c2ad154e6225efc403335dff15d42c6974b2
  • Pointer size: 131 Bytes
  • Size of remote file: 139 kB
finetune/Metric3D/media/screenshots/depth_normal.jpg ADDED

Git LFS Details

  • SHA256: b79635e71ff5409592302067bc7b6347db3447f99f62bd6393b674b2556cbac6
  • Pointer size: 132 Bytes
  • Size of remote file: 2.99 MB
finetune/Metric3D/media/screenshots/metrology.jpg ADDED

Git LFS Details

  • SHA256: 09e1fb9d08df85528e10a30b147b939f704c712b7e4b823cb54325f4f3cdd8c4
  • Pointer size: 132 Bytes
  • Size of remote file: 1.24 MB
finetune/Metric3D/media/screenshots/page2.png ADDED

Git LFS Details

  • SHA256: c46a332e0f9f868c767f65f70c0fa11ec4f7da2dfe69d47046dff5c37964c171
  • Pointer size: 132 Bytes
  • Size of remote file: 4.35 MB
finetune/Metric3D/media/screenshots/pipeline.png ADDED

Git LFS Details

  • SHA256: 19a7b36e83761aae0ecd27e1215e31fded8c9ef3d308734e690456921703f662
  • Pointer size: 131 Bytes
  • Size of remote file: 399 kB
finetune/Metric3D/mono/configs/HourglassDecoder/convlarge.0.3_150.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=[
2
+ '../_base_/models/encoder_decoder/convnext_large.hourglassdecoder.py',
3
+ '../_base_/datasets/_data_base_.py',
4
+ '../_base_/default_runtime.py',
5
+ ]
6
+
7
+ model = dict(
8
+ backbone=dict(
9
+ pretrained=False,
10
+ )
11
+ )
12
+
13
+ # configs of the canonical space
14
+ data_basic=dict(
15
+ canonical_space = dict(
16
+ img_size=(512, 960),
17
+ focal_length=1000.0,
18
+ ),
19
+ depth_range=(0, 1),
20
+ depth_normalize=(0.3, 150),
21
+ crop_size = (544, 1216),
22
+ )
23
+
24
+ batchsize_per_gpu = 2
25
+ thread_per_gpu = 4
finetune/Metric3D/mono/configs/HourglassDecoder/convtiny.0.3_150.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=[
2
+ '../_base_/models/encoder_decoder/convnext_tiny.hourglassdecoder.py',
3
+ '../_base_/datasets/_data_base_.py',
4
+ '../_base_/default_runtime.py',
5
+ ]
6
+
7
+ model = dict(
8
+ backbone=dict(
9
+ pretrained=False,
10
+ )
11
+ )
12
+
13
+ # configs of the canonical space
14
+ data_basic=dict(
15
+ canonical_space = dict(
16
+ img_size=(512, 960),
17
+ focal_length=1000.0,
18
+ ),
19
+ depth_range=(0, 1),
20
+ depth_normalize=(0.3, 150),
21
+ crop_size = (544, 1216),
22
+ )
23
+
24
+ batchsize_per_gpu = 2
25
+ thread_per_gpu = 4
finetune/Metric3D/mono/configs/HourglassDecoder/test_kitti_convlarge.0.3_150.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=[
2
+ '../_base_/models/encoder_decoder/convnext_large.hourglassdecoder.py',
3
+ '../_base_/datasets/_data_base_.py',
4
+ '../_base_/default_runtime.py',
5
+ ]
6
+
7
+ model = dict(
8
+ backbone=dict(
9
+ pretrained=False,
10
+ )
11
+ )
12
+
13
+ # configs of the canonical space
14
+ data_basic=dict(
15
+ canonical_space = dict(
16
+ img_size=(512, 960),
17
+ focal_length=1000.0,
18
+ ),
19
+ depth_range=(0, 1),
20
+ depth_normalize=(0.3, 150),
21
+ crop_size = (512, 1088),
22
+ )
23
+
24
+ batchsize_per_gpu = 2
25
+ thread_per_gpu = 4
finetune/Metric3D/mono/configs/HourglassDecoder/test_nyu_convlarge.0.3_150.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=[
2
+ '../_base_/models/encoder_decoder/convnext_large.hourglassdecoder.py',
3
+ '../_base_/datasets/_data_base_.py',
4
+ '../_base_/default_runtime.py',
5
+ ]
6
+
7
+ model = dict(
8
+ backbone=dict(
9
+ pretrained=False,
10
+ )
11
+ )
12
+
13
+ # configs of the canonical space
14
+ data_basic=dict(
15
+ canonical_space = dict(
16
+ img_size=(512, 960),
17
+ focal_length=1000.0,
18
+ ),
19
+ depth_range=(0, 1),
20
+ depth_normalize=(0.3, 150),
21
+ crop_size = (480, 1216),
22
+ )
23
+
24
+ batchsize_per_gpu = 2
25
+ thread_per_gpu = 4
finetune/Metric3D/mono/configs/HourglassDecoder/vit.raft5.giant2.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=[
2
+ '../_base_/models/encoder_decoder/dino_vit_giant2_reg.dpt_raft.py',
3
+ '../_base_/datasets/_data_base_.py',
4
+ '../_base_/default_runtime.py',
5
+ ]
6
+
7
+ import numpy as np
8
+ model=dict(
9
+ decode_head=dict(
10
+ type='RAFTDepthNormalDPT5',
11
+ iters=8,
12
+ n_downsample=2,
13
+ detach=False,
14
+ )
15
+ )
16
+
17
+
18
+ max_value = 200
19
+ # configs of the canonical space
20
+ data_basic=dict(
21
+ canonical_space = dict(
22
+ # img_size=(540, 960),
23
+ focal_length=1000.0,
24
+ ),
25
+ depth_range=(0, 1),
26
+ depth_normalize=(0.1, max_value),
27
+ crop_size = (616, 1064), # %28 = 0
28
+ clip_depth_range=(0.1, 200),
29
+ vit_size=(616,1064)
30
+ )
31
+
32
+ batchsize_per_gpu = 1
33
+ thread_per_gpu = 1
finetune/Metric3D/mono/configs/HourglassDecoder/vit.raft5.large.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=[
2
+ '../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
3
+ '../_base_/datasets/_data_base_.py',
4
+ '../_base_/default_runtime.py',
5
+ ]
6
+
7
+ import numpy as np
8
+ model=dict(
9
+ decode_head=dict(
10
+ type='RAFTDepthNormalDPT5',
11
+ iters=8,
12
+ n_downsample=2,
13
+ detach=False,
14
+ )
15
+ )
16
+
17
+
18
+ max_value = 200
19
+ # configs of the canonical space
20
+ data_basic=dict(
21
+ canonical_space = dict(
22
+ # img_size=(540, 960),
23
+ focal_length=1000.0,
24
+ ),
25
+ depth_range=(0, 1),
26
+ depth_normalize=(0.1, max_value),
27
+ crop_size = (616, 1064), # %28 = 0
28
+ clip_depth_range=(0.1, 200),
29
+ vit_size=(616,1064)
30
+ )
31
+
32
+ batchsize_per_gpu = 1
33
+ thread_per_gpu = 1
finetune/Metric3D/mono/configs/HourglassDecoder/vit.raft5.small.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=[
2
+ '../_base_/models/encoder_decoder/dino_vit_small_reg.dpt_raft.py',
3
+ '../_base_/datasets/_data_base_.py',
4
+ '../_base_/default_runtime.py',
5
+ ]
6
+
7
+ import numpy as np
8
+ model=dict(
9
+ decode_head=dict(
10
+ type='RAFTDepthNormalDPT5',
11
+ iters=4,
12
+ n_downsample=2,
13
+ detach=False,
14
+ )
15
+ )
16
+
17
+
18
+ max_value = 200
19
+ # configs of the canonical space
20
+ data_basic=dict(
21
+ canonical_space = dict(
22
+ # img_size=(540, 960),
23
+ focal_length=1000.0,
24
+ ),
25
+ depth_range=(0, 1),
26
+ depth_normalize=(0.1, max_value),
27
+ crop_size = (616, 1064), # %28 = 0
28
+ clip_depth_range=(0.1, 200),
29
+ vit_size=(616,1064)
30
+ )
31
+
32
+ batchsize_per_gpu = 1
33
+ thread_per_gpu = 1
finetune/Metric3D/mono/configs/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
finetune/Metric3D/mono/configs/_base_/_data_base_.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # canonical camera setting and basic data setting
2
+ # we set it same as the E300 camera (crop version)
3
+ #
4
+ data_basic=dict(
5
+ canonical_space = dict(
6
+ img_size=(540, 960),
7
+ focal_length=1196.0,
8
+ ),
9
+ depth_range=(0.9, 150),
10
+ depth_normalize=(0.006, 1.001),
11
+ crop_size = (512, 960),
12
+ clip_depth_range=(0.9, 150),
13
+ )
finetune/Metric3D/mono/configs/_base_/datasets/_data_base_.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # canonical camera setting and basic data setting
2
+ #
3
+ data_basic=dict(
4
+ canonical_space = dict(
5
+ img_size=(540, 960),
6
+ focal_length=1196.0,
7
+ ),
8
+ depth_range=(0.9, 150),
9
+ depth_normalize=(0.006, 1.001),
10
+ crop_size = (512, 960),
11
+ clip_depth_range=(0.9, 150),
12
+ )
finetune/Metric3D/mono/configs/_base_/default_runtime.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ load_from = None
3
+ cudnn_benchmark = True
4
+ test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3','rmse_log', 'log10', 'sq_rel']