Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- demo/.config/.last_opt_in_prompt.yaml +1 -0
- demo/.config/.last_survey_prompt.yaml +1 -0
- demo/.config/.last_update_check.json +1 -0
- demo/.config/active_config +1 -0
- demo/.config/config_sentinel +0 -0
- demo/.config/configurations/config_default +6 -0
- demo/.config/default_configs.db +0 -0
- demo/.config/gce +1 -0
- demo/.config/logs/2024.04.15/13.24.57.449262.log +596 -0
- demo/.config/logs/2024.04.15/13.25.24.511550.log +5 -0
- demo/.config/logs/2024.04.15/13.25.35.658011.log +169 -0
- demo/.config/logs/2024.04.15/13.25.45.199675.log +5 -0
- demo/.config/logs/2024.04.15/13.25.59.817323.log +8 -0
- demo/.config/logs/2024.04.15/13.26.00.519914.log +8 -0
- demo/.gitattributes +38 -0
- demo/README.md +6 -0
- demo/app.py +7 -0
- demo/dust3r/.gitignore +132 -0
- demo/dust3r/.gitmodules +3 -0
- demo/dust3r/LICENSE +7 -0
- demo/dust3r/NOTICE +13 -0
- demo/dust3r/README.md +299 -0
- demo/dust3r/assets/demo.jpg +0 -0
- demo/dust3r/assets/dust3r_archi.jpg +0 -0
- demo/dust3r/assets/matching.jpg +0 -0
- demo/dust3r/assets/pipeline1.jpg +0 -0
- demo/dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth +3 -0
- demo/dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth.1 +3 -0
- demo/dust3r/croco/LICENSE +52 -0
- demo/dust3r/croco/NOTICE +21 -0
- demo/dust3r/croco/README.MD +124 -0
- demo/dust3r/croco/assets/Chateau1.png +0 -0
- demo/dust3r/croco/assets/Chateau2.png +0 -0
- demo/dust3r/croco/assets/arch.jpg +0 -0
- demo/dust3r/croco/croco-stereo-flow-demo.ipynb +191 -0
- demo/dust3r/croco/datasets/__init__.py +0 -0
- demo/dust3r/croco/datasets/crops/README.MD +104 -0
- demo/dust3r/croco/datasets/crops/extract_crops_from_images.py +159 -0
- demo/dust3r/croco/datasets/habitat_sim/README.MD +76 -0
- demo/dust3r/croco/datasets/habitat_sim/__init__.py +0 -0
- demo/dust3r/croco/datasets/habitat_sim/generate_from_metadata.py +92 -0
- demo/dust3r/croco/datasets/habitat_sim/generate_from_metadata_files.py +27 -0
- demo/dust3r/croco/datasets/habitat_sim/generate_multiview_images.py +177 -0
- demo/dust3r/croco/datasets/habitat_sim/multiview_habitat_sim_generator.py +390 -0
- demo/dust3r/croco/datasets/habitat_sim/pack_metadata_files.py +69 -0
- demo/dust3r/croco/datasets/habitat_sim/paths.py +129 -0
- demo/dust3r/croco/datasets/pairs_dataset.py +109 -0
- demo/dust3r/croco/datasets/transforms.py +95 -0
- demo/dust3r/croco/demo.py +55 -0
.gitattributes
CHANGED
@@ -36,3 +36,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
36 |
dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth.1 filter=lfs diff=lfs merge=lfs -text
|
37 |
sample_data/mnist_test.csv filter=lfs diff=lfs merge=lfs -text
|
38 |
sample_data/mnist_train_small.csv filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
36 |
dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth.1 filter=lfs diff=lfs merge=lfs -text
|
37 |
sample_data/mnist_test.csv filter=lfs diff=lfs merge=lfs -text
|
38 |
sample_data/mnist_train_small.csv filter=lfs diff=lfs merge=lfs -text
|
39 |
+
demo/dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth.1 filter=lfs diff=lfs merge=lfs -text
|
40 |
+
demo/sample_data/mnist_test.csv filter=lfs diff=lfs merge=lfs -text
|
41 |
+
demo/sample_data/mnist_train_small.csv filter=lfs diff=lfs merge=lfs -text
|
demo/.config/.last_opt_in_prompt.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
demo/.config/.last_survey_prompt.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
last_prompt_time: 1713187535.105859
|
demo/.config/.last_update_check.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"last_update_check_time": 1713187544.7223938, "last_update_check_revision": 20240329151455, "notifications": [], "last_nag_times": {}}
|
demo/.config/active_config
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
default
|
demo/.config/config_sentinel
ADDED
File without changes
|
demo/.config/configurations/config_default
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[component_manager]
|
2 |
+
disable_update_check = true
|
3 |
+
|
4 |
+
[compute]
|
5 |
+
gce_metadata_read_timeout_sec = 0
|
6 |
+
|
demo/.config/default_configs.db
ADDED
Binary file (12.3 kB). View file
|
|
demo/.config/gce
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
False
|
demo/.config/logs/2024.04.15/13.24.57.449262.log
ADDED
@@ -0,0 +1,596 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-04-15 13:25:09,477 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2024-04-15 13:25:09,481 DEBUG root Loaded Command Group: ['gcloud', 'components', 'update']
|
3 |
+
2024-04-15 13:25:09,483 DEBUG root Running [gcloud.components.update] with arguments: [--allow-no-backup: "True", --compile-python: "True", --quiet: "True", COMPONENT-IDS:7: "['core', 'gcloud-deps', 'bq', 'gcloud', 'gcloud-crc32c', 'gsutil', 'anthoscli']"]
|
4 |
+
2024-04-15 13:25:09,485 INFO ___FILE_ONLY___ Beginning update. This process may take several minutes.
|
5 |
+
|
6 |
+
2024-04-15 13:25:09,509 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
7 |
+
2024-04-15 13:25:09,581 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components-2.json HTTP/1.1" 200 214439
|
8 |
+
2024-04-15 13:25:09,598 INFO ___FILE_ONLY___
|
9 |
+
|
10 |
+
2024-04-15 13:25:09,598 INFO ___FILE_ONLY___
|
11 |
+
Your current Google Cloud CLI version is: 471.0.0
|
12 |
+
|
13 |
+
2024-04-15 13:25:09,598 INFO ___FILE_ONLY___ Installing components from version: 471.0.0
|
14 |
+
|
15 |
+
2024-04-15 13:25:09,598 INFO ___FILE_ONLY___
|
16 |
+
|
17 |
+
2024-04-15 13:25:09,599 DEBUG root Chosen display Format:table[box,title="These components will be removed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
18 |
+
2024-04-15 13:25:09,600 DEBUG root Chosen display Format:table[box,title="These components will be updated."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
19 |
+
2024-04-15 13:25:09,600 DEBUG root Chosen display Format:table[box,title="These components will be installed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
20 |
+
2024-04-15 13:25:09,730 INFO ___FILE_ONLY___ ┌─────────────────────────────────────────────────────────────────────────────┐
|
21 |
+
2024-04-15 13:25:09,730 INFO ___FILE_ONLY___
|
22 |
+
|
23 |
+
2024-04-15 13:25:09,730 INFO ___FILE_ONLY___ │ These components will be installed. │
|
24 |
+
2024-04-15 13:25:09,730 INFO ___FILE_ONLY___
|
25 |
+
|
26 |
+
2024-04-15 13:25:09,730 INFO ___FILE_ONLY___ ├─────────────────────────────────────────────────────┬────────────┬──────────┤
|
27 |
+
2024-04-15 13:25:09,730 INFO ___FILE_ONLY___
|
28 |
+
|
29 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___ │ Name │ Version │ Size │
|
30 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___
|
31 |
+
|
32 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___ ├─────────────────────────────────────────────────────┼────────────┼──────────┤
|
33 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___
|
34 |
+
|
35 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___ │
|
36 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___ BigQuery Command Line Tool
|
37 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___
|
38 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___ │
|
39 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___ 2.1.3
|
40 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___
|
41 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___ │
|
42 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___ 1.7 MiB
|
43 |
+
2024-04-15 13:25:09,731 INFO ___FILE_ONLY___
|
44 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___ │
|
45 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___
|
46 |
+
|
47 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___ │
|
48 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___ BigQuery Command Line Tool (Platform Specific)
|
49 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___
|
50 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___ │
|
51 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___ 2.0.101
|
52 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___
|
53 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___ │
|
54 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___ < 1 MiB
|
55 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___
|
56 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___ │
|
57 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___
|
58 |
+
|
59 |
+
2024-04-15 13:25:09,732 INFO ___FILE_ONLY___ │
|
60 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___ Bundled Python 3.11
|
61 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___
|
62 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___ │
|
63 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___ 3.11.8
|
64 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___
|
65 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___ │
|
66 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___ 74.9 MiB
|
67 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___
|
68 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___ │
|
69 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___
|
70 |
+
|
71 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___ │
|
72 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___ Cloud Storage Command Line Tool
|
73 |
+
2024-04-15 13:25:09,733 INFO ___FILE_ONLY___
|
74 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___ │
|
75 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___ 5.27
|
76 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___
|
77 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___ │
|
78 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___ 11.3 MiB
|
79 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___
|
80 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___ │
|
81 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___
|
82 |
+
|
83 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___ │
|
84 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___ Cloud Storage Command Line Tool (Platform Specific)
|
85 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___
|
86 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___ │
|
87 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___ 5.27
|
88 |
+
2024-04-15 13:25:09,734 INFO ___FILE_ONLY___
|
89 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___ │
|
90 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___ < 1 MiB
|
91 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___
|
92 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___ │
|
93 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___
|
94 |
+
|
95 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___ │
|
96 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___ Google Cloud CLI Core Libraries (Platform Specific)
|
97 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___
|
98 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___ │
|
99 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___ 2024.01.06
|
100 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___
|
101 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___ │
|
102 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___ < 1 MiB
|
103 |
+
2024-04-15 13:25:09,735 INFO ___FILE_ONLY___
|
104 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___ │
|
105 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___
|
106 |
+
|
107 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___ │
|
108 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___ Google Cloud CRC32C Hash Tool
|
109 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___
|
110 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___ │
|
111 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___ 1.0.0
|
112 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___
|
113 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___ │
|
114 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___ 1.2 MiB
|
115 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___
|
116 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___ │
|
117 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___
|
118 |
+
|
119 |
+
2024-04-15 13:25:09,736 INFO ___FILE_ONLY___ │
|
120 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___ anthoscli
|
121 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___
|
122 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___ │
|
123 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___ 0.2.48
|
124 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___
|
125 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___ │
|
126 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___ 68.9 MiB
|
127 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___
|
128 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___ │
|
129 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___
|
130 |
+
|
131 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___ │
|
132 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___ gcloud cli dependencies
|
133 |
+
2024-04-15 13:25:09,737 INFO ___FILE_ONLY___
|
134 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___ │
|
135 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___ 2021.04.16
|
136 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___
|
137 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___ │
|
138 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___ < 1 MiB
|
139 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___
|
140 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___ │
|
141 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___
|
142 |
+
|
143 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___ └─────────────────────────────────────────────────────┴────────────┴──────────┘
|
144 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___
|
145 |
+
|
146 |
+
2024-04-15 13:25:09,738 INFO ___FILE_ONLY___
|
147 |
+
|
148 |
+
2024-04-15 13:25:09,743 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
149 |
+
2024-04-15 13:25:09,806 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/RELEASE_NOTES HTTP/1.1" 200 1186568
|
150 |
+
2024-04-15 13:25:09,879 INFO ___FILE_ONLY___ For the latest full release notes, please visit:
|
151 |
+
https://cloud.google.com/sdk/release_notes
|
152 |
+
|
153 |
+
|
154 |
+
2024-04-15 13:25:09,882 INFO ___FILE_ONLY___ ╔═════════════════════════════════════════���══════════════════╗
|
155 |
+
|
156 |
+
2024-04-15 13:25:09,882 INFO ___FILE_ONLY___ ╠═ Creating update staging area ═╣
|
157 |
+
|
158 |
+
2024-04-15 13:25:09,882 INFO ___FILE_ONLY___ ╚
|
159 |
+
2024-04-15 13:25:09,882 INFO ___FILE_ONLY___ ══════
|
160 |
+
2024-04-15 13:25:09,883 INFO ___FILE_ONLY___ ══════
|
161 |
+
2024-04-15 13:25:09,883 INFO ___FILE_ONLY___ ══════
|
162 |
+
2024-04-15 13:25:10,313 INFO ___FILE_ONLY___ ═
|
163 |
+
2024-04-15 13:25:10,364 INFO ___FILE_ONLY___ ═
|
164 |
+
2024-04-15 13:25:10,414 INFO ___FILE_ONLY___ ═
|
165 |
+
2024-04-15 13:25:10,463 INFO ___FILE_ONLY___ ═
|
166 |
+
2024-04-15 13:25:10,504 INFO ___FILE_ONLY___ ═
|
167 |
+
2024-04-15 13:25:10,543 INFO ___FILE_ONLY___ ═
|
168 |
+
2024-04-15 13:25:10,579 INFO ___FILE_ONLY___ ═
|
169 |
+
2024-04-15 13:25:10,619 INFO ___FILE_ONLY___ ═
|
170 |
+
2024-04-15 13:25:10,659 INFO ___FILE_ONLY___ ═
|
171 |
+
2024-04-15 13:25:10,713 INFO ___FILE_ONLY___ ═
|
172 |
+
2024-04-15 13:25:10,804 INFO ___FILE_ONLY___ ═
|
173 |
+
2024-04-15 13:25:10,917 INFO ___FILE_ONLY___ ═
|
174 |
+
2024-04-15 13:25:10,988 INFO ___FILE_ONLY___ ═
|
175 |
+
2024-04-15 13:25:11,056 INFO ___FILE_ONLY___ ═
|
176 |
+
2024-04-15 13:25:11,129 INFO ___FILE_ONLY___ ═
|
177 |
+
2024-04-15 13:25:11,194 INFO ___FILE_ONLY___ ═
|
178 |
+
2024-04-15 13:25:11,252 INFO ___FILE_ONLY___ ═
|
179 |
+
2024-04-15 13:25:11,313 INFO ___FILE_ONLY___ ═
|
180 |
+
2024-04-15 13:25:11,376 INFO ___FILE_ONLY___ ═
|
181 |
+
2024-04-15 13:25:11,444 INFO ___FILE_ONLY___ ═
|
182 |
+
2024-04-15 13:25:11,507 INFO ___FILE_ONLY___ ═
|
183 |
+
2024-04-15 13:25:11,568 INFO ___FILE_ONLY___ ═
|
184 |
+
2024-04-15 13:25:11,642 INFO ___FILE_ONLY___ ═
|
185 |
+
2024-04-15 13:25:11,714 INFO ___FILE_ONLY___ ═
|
186 |
+
2024-04-15 13:25:11,790 INFO ___FILE_ONLY___ ═
|
187 |
+
2024-04-15 13:25:11,865 INFO ___FILE_ONLY___ ═
|
188 |
+
2024-04-15 13:25:11,944 INFO ___FILE_ONLY___ ═
|
189 |
+
2024-04-15 13:25:12,008 INFO ___FILE_ONLY___ ═
|
190 |
+
2024-04-15 13:25:12,074 INFO ___FILE_ONLY___ ═
|
191 |
+
2024-04-15 13:25:12,134 INFO ___FILE_ONLY___ ═
|
192 |
+
2024-04-15 13:25:12,188 INFO ___FILE_ONLY___ ═
|
193 |
+
2024-04-15 13:25:12,236 INFO ___FILE_ONLY___ ═
|
194 |
+
2024-04-15 13:25:12,299 INFO ___FILE_ONLY___ ═
|
195 |
+
2024-04-15 13:25:12,358 INFO ___FILE_ONLY___ ═
|
196 |
+
2024-04-15 13:25:12,415 INFO ___FILE_ONLY___ ═
|
197 |
+
2024-04-15 13:25:12,465 INFO ___FILE_ONLY___ ═
|
198 |
+
2024-04-15 13:25:12,551 INFO ___FILE_ONLY___ ═
|
199 |
+
2024-04-15 13:25:12,613 INFO ___FILE_ONLY___ ═
|
200 |
+
2024-04-15 13:25:12,695 INFO ___FILE_ONLY___ ═
|
201 |
+
2024-04-15 13:25:12,859 INFO ___FILE_ONLY___ ═
|
202 |
+
2024-04-15 13:25:12,913 INFO ___FILE_ONLY___ ═
|
203 |
+
2024-04-15 13:25:12,971 INFO ___FILE_ONLY___ ═
|
204 |
+
2024-04-15 13:25:12,971 INFO ___FILE_ONLY___ ╝
|
205 |
+
|
206 |
+
2024-04-15 13:25:13,063 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
207 |
+
|
208 |
+
2024-04-15 13:25:13,063 INFO ___FILE_ONLY___ ╠═ Installing: BigQuery Command Line Tool ═╣
|
209 |
+
|
210 |
+
2024-04-15 13:25:13,063 INFO ___FILE_ONLY___ ╚
|
211 |
+
2024-04-15 13:25:13,067 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
212 |
+
2024-04-15 13:25:13,126 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bq-20240329151455.tar.gz HTTP/1.1" 200 1743971
|
213 |
+
2024-04-15 13:25:13,138 INFO ___FILE_ONLY___ ═
|
214 |
+
2024-04-15 13:25:13,138 INFO ___FILE_ONLY___ ═
|
215 |
+
2024-04-15 13:25:13,138 INFO ___FILE_ONLY___ ═
|
216 |
+
2024-04-15 13:25:13,139 INFO ___FILE_ONLY___ ═
|
217 |
+
2024-04-15 13:25:13,139 INFO ___FILE_ONLY___ ═
|
218 |
+
2024-04-15 13:25:13,139 INFO ___FILE_ONLY___ ═
|
219 |
+
2024-04-15 13:25:13,139 INFO ___FILE_ONLY___ ═
|
220 |
+
2024-04-15 13:25:13,139 INFO ___FILE_ONLY___ ═
|
221 |
+
2024-04-15 13:25:13,139 INFO ___FILE_ONLY___ ═
|
222 |
+
2024-04-15 13:25:13,140 INFO ___FILE_ONLY___ ═
|
223 |
+
2024-04-15 13:25:13,140 INFO ___FILE_ONLY___ ═
|
224 |
+
2024-04-15 13:25:13,140 INFO ___FILE_ONLY___ ═
|
225 |
+
2024-04-15 13:25:13,140 INFO ___FILE_ONLY___ ═
|
226 |
+
2024-04-15 13:25:13,140 INFO ___FILE_ONLY___ ═
|
227 |
+
2024-04-15 13:25:13,140 INFO ___FILE_ONLY___ ═
|
228 |
+
2024-04-15 13:25:13,141 INFO ___FILE_ONLY___ ═
|
229 |
+
2024-04-15 13:25:13,141 INFO ___FILE_ONLY___ ═
|
230 |
+
2024-04-15 13:25:13,141 INFO ___FILE_ONLY___ ═
|
231 |
+
2024-04-15 13:25:13,141 INFO ___FILE_ONLY___ ═
|
232 |
+
2024-04-15 13:25:13,141 INFO ___FILE_ONLY___ ═
|
233 |
+
2024-04-15 13:25:13,141 INFO ___FILE_ONLY___ ═
|
234 |
+
2024-04-15 13:25:13,142 INFO ___FILE_ONLY___ ═
|
235 |
+
2024-04-15 13:25:13,142 INFO ___FILE_ONLY___ ═
|
236 |
+
2024-04-15 13:25:13,142 INFO ___FILE_ONLY___ ═
|
237 |
+
2024-04-15 13:25:13,142 INFO ___FILE_ONLY___ ═
|
238 |
+
2024-04-15 13:25:13,142 INFO ___FILE_ONLY___ ═
|
239 |
+
2024-04-15 13:25:13,142 INFO ___FILE_ONLY___ ═
|
240 |
+
2024-04-15 13:25:13,142 INFO ___FILE_ONLY___ ═
|
241 |
+
2024-04-15 13:25:13,143 INFO ___FILE_ONLY___ ═
|
242 |
+
2024-04-15 13:25:13,143 INFO ___FILE_ONLY___ ═
|
243 |
+
2024-04-15 13:25:13,273 INFO ___FILE_ONLY___ ═
|
244 |
+
2024-04-15 13:25:13,279 INFO ___FILE_ONLY___ ═
|
245 |
+
2024-04-15 13:25:13,284 INFO ___FILE_ONLY___ ═
|
246 |
+
2024-04-15 13:25:13,288 INFO ___FILE_ONLY___ ═
|
247 |
+
2024-04-15 13:25:13,292 INFO ___FILE_ONLY___ ═
|
248 |
+
2024-04-15 13:25:13,297 INFO ___FILE_ONLY___ ═
|
249 |
+
2024-04-15 13:25:13,302 INFO ___FILE_ONLY___ ═
|
250 |
+
2024-04-15 13:25:13,305 INFO ___FILE_ONLY___ ═
|
251 |
+
2024-04-15 13:25:13,311 INFO ___FILE_ONLY___ ═
|
252 |
+
2024-04-15 13:25:13,315 INFO ___FILE_ONLY___ ═
|
253 |
+
2024-04-15 13:25:13,319 INFO ___FILE_ONLY___ ═
|
254 |
+
2024-04-15 13:25:13,323 INFO ___FILE_ONLY___ ═
|
255 |
+
2024-04-15 13:25:13,327 INFO ___FILE_ONLY___ ═
|
256 |
+
2024-04-15 13:25:13,334 INFO ___FILE_ONLY___ ═
|
257 |
+
2024-04-15 13:25:13,338 INFO ___FILE_ONLY___ ═
|
258 |
+
2024-04-15 13:25:13,342 INFO ___FILE_ONLY___ ═
|
259 |
+
2024-04-15 13:25:13,348 INFO ___FILE_ONLY___ ═
|
260 |
+
2024-04-15 13:25:13,353 INFO ___FILE_ONLY___ ═
|
261 |
+
2024-04-15 13:25:13,360 INFO ___FILE_ONLY___ ═
|
262 |
+
2024-04-15 13:25:13,364 INFO ___FILE_ONLY___ ═
|
263 |
+
2024-04-15 13:25:13,370 INFO ___FILE_ONLY___ ═
|
264 |
+
2024-04-15 13:25:13,376 INFO ___FILE_ONLY___ ═
|
265 |
+
2024-04-15 13:25:13,380 INFO ___FILE_ONLY___ ═
|
266 |
+
2024-04-15 13:25:13,384 INFO ___FILE_ONLY___ ═
|
267 |
+
2024-04-15 13:25:13,389 INFO ___FILE_ONLY___ ═
|
268 |
+
2024-04-15 13:25:13,393 INFO ___FILE_ONLY___ ═
|
269 |
+
2024-04-15 13:25:13,398 INFO ___FILE_ONLY___ ═
|
270 |
+
2024-04-15 13:25:13,402 INFO ___FILE_ONLY___ ═
|
271 |
+
2024-04-15 13:25:13,406 INFO ___FILE_ONLY___ ═
|
272 |
+
2024-04-15 13:25:13,411 INFO ___FILE_ONLY___ ═
|
273 |
+
2024-04-15 13:25:13,411 INFO ___FILE_ONLY___ ╝
|
274 |
+
|
275 |
+
2024-04-15 13:25:13,429 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
276 |
+
|
277 |
+
2024-04-15 13:25:13,429 INFO ___FILE_ONLY___ ╠═ Installing: BigQuery Command Line Tool (Platform Spec... ═╣
|
278 |
+
|
279 |
+
2024-04-15 13:25:13,429 INFO ___FILE_ONLY___ ╚
|
280 |
+
2024-04-15 13:25:13,433 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
281 |
+
2024-04-15 13:25:13,492 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bq-nix-20240106004423.tar.gz HTTP/1.1" 200 2026
|
282 |
+
2024-04-15 13:25:13,493 INFO ___FILE_ONLY___ ══════════════════════════════
|
283 |
+
2024-04-15 13:25:13,494 INFO ___FILE_ONLY___ ══════════════════════════════
|
284 |
+
2024-04-15 13:25:13,494 INFO ___FILE_ONLY___ ╝
|
285 |
+
|
286 |
+
2024-04-15 13:25:13,503 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
287 |
+
|
288 |
+
2024-04-15 13:25:13,503 INFO ___FILE_ONLY___ ╠═ Installing: Bundled Python 3.11 ═╣
|
289 |
+
|
290 |
+
2024-04-15 13:25:13,503 INFO ___FILE_ONLY___ ╚
|
291 |
+
2024-04-15 13:25:13,507 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
292 |
+
2024-04-15 13:25:13,567 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bundled-python3-unix-linux-x86_64-20240229170130.tar.gz HTTP/1.1" 200 78486918
|
293 |
+
2024-04-15 13:25:13,861 INFO ___FILE_ONLY___ ═
|
294 |
+
2024-04-15 13:25:13,864 INFO ___FILE_ONLY___ ═
|
295 |
+
2024-04-15 13:25:13,868 INFO ___FILE_ONLY___ ═
|
296 |
+
2024-04-15 13:25:13,871 INFO ___FILE_ONLY___ ═
|
297 |
+
2024-04-15 13:25:13,874 INFO ___FILE_ONLY___ ═
|
298 |
+
2024-04-15 13:25:13,878 INFO ___FILE_ONLY___ ═
|
299 |
+
2024-04-15 13:25:13,881 INFO ___FILE_ONLY___ ═
|
300 |
+
2024-04-15 13:25:13,884 INFO ___FILE_ONLY___ ═
|
301 |
+
2024-04-15 13:25:13,888 INFO ___FILE_ONLY___ ═
|
302 |
+
2024-04-15 13:25:13,891 INFO ___FILE_ONLY___ ═
|
303 |
+
2024-04-15 13:25:13,894 INFO ___FILE_ONLY___ ═
|
304 |
+
2024-04-15 13:25:13,898 INFO ___FILE_ONLY___ ═
|
305 |
+
2024-04-15 13:25:13,901 INFO ___FILE_ONLY___ ═
|
306 |
+
2024-04-15 13:25:13,904 INFO ___FILE_ONLY___ ═
|
307 |
+
2024-04-15 13:25:13,908 INFO ___FILE_ONLY___ ═
|
308 |
+
2024-04-15 13:25:13,911 INFO ___FILE_ONLY___ ═
|
309 |
+
2024-04-15 13:25:13,914 INFO ___FILE_ONLY___ ═
|
310 |
+
2024-04-15 13:25:13,917 INFO ___FILE_ONLY___ ═
|
311 |
+
2024-04-15 13:25:13,920 INFO ___FILE_ONLY___ ═
|
312 |
+
2024-04-15 13:25:13,923 INFO ___FILE_ONLY___ ═
|
313 |
+
2024-04-15 13:25:13,927 INFO ___FILE_ONLY___ ═
|
314 |
+
2024-04-15 13:25:13,930 INFO ___FILE_ONLY___ ═
|
315 |
+
2024-04-15 13:25:13,933 INFO ___FILE_ONLY___ ═
|
316 |
+
2024-04-15 13:25:13,937 INFO ___FILE_ONLY___ ═
|
317 |
+
2024-04-15 13:25:13,940 INFO ___FILE_ONLY___ ═
|
318 |
+
2024-04-15 13:25:13,944 INFO ___FILE_ONLY___ ═
|
319 |
+
2024-04-15 13:25:13,947 INFO ___FILE_ONLY___ ═
|
320 |
+
2024-04-15 13:25:13,951 INFO ___FILE_ONLY___ ═
|
321 |
+
2024-04-15 13:25:13,954 INFO ___FILE_ONLY___ ═
|
322 |
+
2024-04-15 13:25:13,958 INFO ___FILE_ONLY___ ═
|
323 |
+
2024-04-15 13:25:16,206 INFO ___FILE_ONLY___ ═
|
324 |
+
2024-04-15 13:25:16,235 INFO ___FILE_ONLY___ ═
|
325 |
+
2024-04-15 13:25:16,267 INFO ___FILE_ONLY___ ═
|
326 |
+
2024-04-15 13:25:16,310 INFO ___FILE_ONLY___ ═
|
327 |
+
2024-04-15 13:25:16,346 INFO ___FILE_ONLY___ ═
|
328 |
+
2024-04-15 13:25:16,375 INFO ___FILE_ONLY___ ═
|
329 |
+
2024-04-15 13:25:16,403 INFO ___FILE_ONLY___ ═
|
330 |
+
2024-04-15 13:25:16,433 INFO ___FILE_ONLY___ ═
|
331 |
+
2024-04-15 13:25:16,464 INFO ___FILE_ONLY___ ═
|
332 |
+
2024-04-15 13:25:16,494 INFO ___FILE_ONLY___ ═
|
333 |
+
2024-04-15 13:25:16,524 INFO ___FILE_ONLY___ ═
|
334 |
+
2024-04-15 13:25:16,553 INFO ___FILE_ONLY___ ═
|
335 |
+
2024-04-15 13:25:16,586 INFO ___FILE_ONLY___ ═
|
336 |
+
2024-04-15 13:25:16,616 INFO ___FILE_ONLY___ ═
|
337 |
+
2024-04-15 13:25:16,649 INFO ___FILE_ONLY___ ═
|
338 |
+
2024-04-15 13:25:16,681 INFO ___FILE_ONLY___ ═
|
339 |
+
2024-04-15 13:25:16,712 INFO ___FILE_ONLY___ ═
|
340 |
+
2024-04-15 13:25:17,142 INFO ___FILE_ONLY___ ═
|
341 |
+
2024-04-15 13:25:17,183 INFO ___FILE_ONLY___ ═
|
342 |
+
2024-04-15 13:25:17,240 INFO ___FILE_ONLY___ ═
|
343 |
+
2024-04-15 13:25:17,289 INFO ___FILE_ONLY___ ═
|
344 |
+
2024-04-15 13:25:17,458 INFO ___FILE_ONLY___ ═
|
345 |
+
2024-04-15 13:25:17,608 INFO ___FILE_ONLY___ ═
|
346 |
+
2024-04-15 13:25:17,653 INFO ___FILE_ONLY___ ═
|
347 |
+
2024-04-15 13:25:17,699 INFO ___FILE_ONLY___ ═
|
348 |
+
2024-04-15 13:25:17,775 INFO ___FILE_ONLY___ ═
|
349 |
+
2024-04-15 13:25:17,815 INFO ___FILE_ONLY___ ═
|
350 |
+
2024-04-15 13:25:17,866 INFO ___FILE_ONLY___ ═
|
351 |
+
2024-04-15 13:25:19,094 INFO ___FILE_ONLY___ ═
|
352 |
+
2024-04-15 13:25:19,129 INFO ___FILE_ONLY___ ═
|
353 |
+
2024-04-15 13:25:19,129 INFO ___FILE_ONLY___ ╝
|
354 |
+
|
355 |
+
2024-04-15 13:25:19,248 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
356 |
+
|
357 |
+
2024-04-15 13:25:19,249 INFO ___FILE_ONLY___ ╠═ Installing: Bundled Python 3.11 ═╣
|
358 |
+
|
359 |
+
2024-04-15 13:25:19,249 INFO ___FILE_ONLY___ ╚
|
360 |
+
2024-04-15 13:25:19,254 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
361 |
+
2024-04-15 13:25:19,255 INFO ___FILE_ONLY___ ╝
|
362 |
+
|
363 |
+
2024-04-15 13:25:19,257 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
364 |
+
|
365 |
+
2024-04-15 13:25:19,257 INFO ___FILE_ONLY___ ╠═ Installing: Cloud Storage Command Line Tool ═╣
|
366 |
+
|
367 |
+
2024-04-15 13:25:19,257 INFO ___FILE_ONLY___ ╚
|
368 |
+
2024-04-15 13:25:19,261 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
369 |
+
2024-04-15 13:25:19,326 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gsutil-20231025210228.tar.gz HTTP/1.1" 200 11833901
|
370 |
+
2024-04-15 13:25:19,376 INFO ___FILE_ONLY___ ═
|
371 |
+
2024-04-15 13:25:19,377 INFO ___FILE_ONLY___ ═
|
372 |
+
2024-04-15 13:25:19,378 INFO ___FILE_ONLY___ ═
|
373 |
+
2024-04-15 13:25:19,378 INFO ___FILE_ONLY___ ═
|
374 |
+
2024-04-15 13:25:19,379 INFO ___FILE_ONLY___ ═
|
375 |
+
2024-04-15 13:25:19,380 INFO ___FILE_ONLY___ ═
|
376 |
+
2024-04-15 13:25:19,380 INFO ___FILE_ONLY___ ═
|
377 |
+
2024-04-15 13:25:19,381 INFO ___FILE_ONLY___ ═
|
378 |
+
2024-04-15 13:25:19,381 INFO ___FILE_ONLY___ ═
|
379 |
+
2024-04-15 13:25:19,382 INFO ___FILE_ONLY___ ═
|
380 |
+
2024-04-15 13:25:19,383 INFO ___FILE_ONLY___ ═
|
381 |
+
2024-04-15 13:25:19,383 INFO ___FILE_ONLY___ ═
|
382 |
+
2024-04-15 13:25:19,384 INFO ___FILE_ONLY___ ═
|
383 |
+
2024-04-15 13:25:19,385 INFO ___FILE_ONLY___ ═
|
384 |
+
2024-04-15 13:25:19,385 INFO ___FILE_ONLY___ ═
|
385 |
+
2024-04-15 13:25:19,386 INFO ___FILE_ONLY___ ═
|
386 |
+
2024-04-15 13:25:19,386 INFO ___FILE_ONLY___ ═
|
387 |
+
2024-04-15 13:25:19,387 INFO ___FILE_ONLY___ ═
|
388 |
+
2024-04-15 13:25:19,388 INFO ___FILE_ONLY___ ═
|
389 |
+
2024-04-15 13:25:19,388 INFO ___FILE_ONLY___ ═
|
390 |
+
2024-04-15 13:25:19,389 INFO ___FILE_ONLY___ ═
|
391 |
+
2024-04-15 13:25:19,389 INFO ___FILE_ONLY___ ═
|
392 |
+
2024-04-15 13:25:19,390 INFO ___FILE_ONLY___ ═
|
393 |
+
2024-04-15 13:25:19,391 INFO ___FILE_ONLY___ ═
|
394 |
+
2024-04-15 13:25:19,391 INFO ___FILE_ONLY___ ═
|
395 |
+
2024-04-15 13:25:19,392 INFO ___FILE_ONLY___ ═
|
396 |
+
2024-04-15 13:25:19,393 INFO ___FILE_ONLY___ ═
|
397 |
+
2024-04-15 13:25:19,393 INFO ___FILE_ONLY___ ═
|
398 |
+
2024-04-15 13:25:19,394 INFO ___FILE_ONLY___ ═
|
399 |
+
2024-04-15 13:25:19,394 INFO ___FILE_ONLY___ ═
|
400 |
+
2024-04-15 13:25:20,132 INFO ___FILE_ONLY___ ═
|
401 |
+
2024-04-15 13:25:20,170 INFO ___FILE_ONLY___ ═
|
402 |
+
2024-04-15 13:25:20,201 INFO ___FILE_ONLY___ ═
|
403 |
+
2024-04-15 13:25:20,232 INFO ___FILE_ONLY___ ═
|
404 |
+
2024-04-15 13:25:20,260 INFO ___FILE_ONLY___ ═
|
405 |
+
2024-04-15 13:25:20,290 INFO ___FILE_ONLY___ ═
|
406 |
+
2024-04-15 13:25:20,310 INFO ___FILE_ONLY___ ═
|
407 |
+
2024-04-15 13:25:20,329 INFO ___FILE_ONLY___ ═
|
408 |
+
2024-04-15 13:25:20,352 INFO ___FILE_ONLY___ ═
|
409 |
+
2024-04-15 13:25:20,373 INFO ___FILE_ONLY___ ═
|
410 |
+
2024-04-15 13:25:20,396 INFO ___FILE_ONLY___ ═
|
411 |
+
2024-04-15 13:25:20,416 INFO ___FILE_ONLY___ ═
|
412 |
+
2024-04-15 13:25:20,448 INFO ___FILE_ONLY___ ═
|
413 |
+
2024-04-15 13:25:20,471 INFO ___FILE_ONLY___ ═
|
414 |
+
2024-04-15 13:25:20,503 INFO ___FILE_ONLY___ ═
|
415 |
+
2024-04-15 13:25:20,533 INFO ___FILE_ONLY___ ═
|
416 |
+
2024-04-15 13:25:20,573 INFO ___FILE_ONLY___ ═
|
417 |
+
2024-04-15 13:25:20,607 INFO ___FILE_ONLY___ ═
|
418 |
+
2024-04-15 13:25:20,628 INFO ___FILE_ONLY___ ═
|
419 |
+
2024-04-15 13:25:20,652 INFO ___FILE_ONLY___ ═
|
420 |
+
2024-04-15 13:25:20,683 INFO ___FILE_ONLY___ ═
|
421 |
+
2024-04-15 13:25:20,705 INFO ___FILE_ONLY___ ═
|
422 |
+
2024-04-15 13:25:20,728 INFO ___FILE_ONLY___ ═
|
423 |
+
2024-04-15 13:25:20,757 INFO ___FILE_ONLY___ ═
|
424 |
+
2024-04-15 13:25:20,778 INFO ___FILE_ONLY___ ═
|
425 |
+
2024-04-15 13:25:20,828 INFO ___FILE_ONLY___ ═
|
426 |
+
2024-04-15 13:25:20,856 INFO ___FILE_ONLY___ ═
|
427 |
+
2024-04-15 13:25:20,882 INFO ___FILE_ONLY___ ═
|
428 |
+
2024-04-15 13:25:20,911 INFO ___FILE_ONLY___ ═
|
429 |
+
2024-04-15 13:25:20,932 INFO ___FILE_ONLY___ ═
|
430 |
+
2024-04-15 13:25:20,932 INFO ___FILE_ONLY___ ╝
|
431 |
+
|
432 |
+
2024-04-15 13:25:21,005 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
433 |
+
|
434 |
+
2024-04-15 13:25:21,005 INFO ___FILE_ONLY___ ╠═ Installing: Cloud Storage Command Line Tool (Platform... ═╣
|
435 |
+
|
436 |
+
2024-04-15 13:25:21,005 INFO ___FILE_ONLY___ ╚
|
437 |
+
2024-04-15 13:25:21,009 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
438 |
+
2024-04-15 13:25:21,069 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gsutil-nix-20240106004423.tar.gz HTTP/1.1" 200 2042
|
439 |
+
2024-04-15 13:25:21,070 INFO ___FILE_ONLY___ ══════════════════════════════
|
440 |
+
2024-04-15 13:25:21,071 INFO ___FILE_ONLY___ ══════════════════════════════
|
441 |
+
2024-04-15 13:25:21,071 INFO ___FILE_ONLY___ ╝
|
442 |
+
|
443 |
+
2024-04-15 13:25:21,081 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
444 |
+
|
445 |
+
2024-04-15 13:25:21,081 INFO ___FILE_ONLY___ ╠═ Installing: Default set of gcloud commands ═╣
|
446 |
+
|
447 |
+
2024-04-15 13:25:21,081 INFO ___FILE_ONLY___ ╚
|
448 |
+
2024-04-15 13:25:21,087 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
449 |
+
2024-04-15 13:25:21,087 INFO ___FILE_ONLY___ ╝
|
450 |
+
|
451 |
+
2024-04-15 13:25:21,089 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
452 |
+
|
453 |
+
2024-04-15 13:25:21,089 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CLI Core Libraries (Platform... ═╣
|
454 |
+
|
455 |
+
2024-04-15 13:25:21,089 INFO ___FILE_ONLY___ ╚
|
456 |
+
2024-04-15 13:25:21,093 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
457 |
+
2024-04-15 13:25:21,157 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-core-nix-20240106004423.tar.gz HTTP/1.1" 200 2410
|
458 |
+
2024-04-15 13:25:21,157 INFO ___FILE_ONLY___ ══════════════════════════════
|
459 |
+
2024-04-15 13:25:21,159 INFO ___FILE_ONLY___ ═══════════════
|
460 |
+
2024-04-15 13:25:21,159 INFO ___FILE_ONLY___ ═══════════════
|
461 |
+
2024-04-15 13:25:21,159 INFO ___FILE_ONLY___ ╝
|
462 |
+
|
463 |
+
2024-04-15 13:25:21,168 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
464 |
+
|
465 |
+
2024-04-15 13:25:21,168 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CRC32C Hash Tool ═╣
|
466 |
+
|
467 |
+
2024-04-15 13:25:21,168 INFO ___FILE_ONLY___ ╚
|
468 |
+
2024-04-15 13:25:21,172 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
469 |
+
2024-04-15 13:25:21,229 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gcloud-crc32c-linux-x86_64-20231215195722.tar.gz HTTP/1.1" 200 1287877
|
470 |
+
2024-04-15 13:25:21,240 INFO ___FILE_ONLY___ ═
|
471 |
+
2024-04-15 13:25:21,240 INFO ___FILE_ONLY___ ═
|
472 |
+
2024-04-15 13:25:21,240 INFO ___FILE_ONLY___ ═
|
473 |
+
2024-04-15 13:25:21,241 INFO ___FILE_ONLY___ ═
|
474 |
+
2024-04-15 13:25:21,241 INFO ___FILE_ONLY___ ═
|
475 |
+
2024-04-15 13:25:21,241 INFO ___FILE_ONLY___ ═
|
476 |
+
2024-04-15 13:25:21,241 INFO ___FILE_ONLY___ ═
|
477 |
+
2024-04-15 13:25:21,241 INFO ___FILE_ONLY___ ═
|
478 |
+
2024-04-15 13:25:21,241 INFO ___FILE_ONLY___ ═
|
479 |
+
2024-04-15 13:25:21,241 INFO ___FILE_ONLY___ ═
|
480 |
+
2024-04-15 13:25:21,242 INFO ___FILE_ONLY___ ═
|
481 |
+
2024-04-15 13:25:21,242 INFO ___FILE_ONLY___ ═
|
482 |
+
2024-04-15 13:25:21,242 INFO ___FILE_ONLY___ ═
|
483 |
+
2024-04-15 13:25:21,242 INFO ___FILE_ONLY___ ═
|
484 |
+
2024-04-15 13:25:21,242 INFO ___FILE_ONLY___ ═
|
485 |
+
2024-04-15 13:25:21,242 INFO ___FILE_ONLY___ ═
|
486 |
+
2024-04-15 13:25:21,242 INFO ___FILE_ONLY___ ═
|
487 |
+
2024-04-15 13:25:21,242 INFO ___FILE_ONLY___ ═
|
488 |
+
2024-04-15 13:25:21,243 INFO ___FILE_ONLY___ ═
|
489 |
+
2024-04-15 13:25:21,243 INFO ___FILE_ONLY___ ═
|
490 |
+
2024-04-15 13:25:21,243 INFO ___FILE_ONLY___ ═
|
491 |
+
2024-04-15 13:25:21,243 INFO ___FILE_ONLY___ ═
|
492 |
+
2024-04-15 13:25:21,243 INFO ___FILE_ONLY___ ═
|
493 |
+
2024-04-15 13:25:21,243 INFO ___FILE_ONLY___ ═
|
494 |
+
2024-04-15 13:25:21,243 INFO ___FILE_ONLY___ ═
|
495 |
+
2024-04-15 13:25:21,244 INFO ___FILE_ONLY___ ═
|
496 |
+
2024-04-15 13:25:21,244 INFO ___FILE_ONLY___ ═
|
497 |
+
2024-04-15 13:25:21,244 INFO ___FILE_ONLY___ ═
|
498 |
+
2024-04-15 13:25:21,244 INFO ___FILE_ONLY___ ═
|
499 |
+
2024-04-15 13:25:21,244 INFO ___FILE_ONLY___ ═
|
500 |
+
2024-04-15 13:25:21,278 INFO ___FILE_ONLY___ ═══════════════
|
501 |
+
2024-04-15 13:25:21,279 INFO ___FILE_ONLY___ ═══════════════
|
502 |
+
2024-04-15 13:25:21,279 INFO ___FILE_ONLY___ ╝
|
503 |
+
|
504 |
+
2024-04-15 13:25:21,289 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
505 |
+
|
506 |
+
2024-04-15 13:25:21,289 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CRC32C Hash Tool ═╣
|
507 |
+
|
508 |
+
2024-04-15 13:25:21,289 INFO ___FILE_ONLY___ ╚
|
509 |
+
2024-04-15 13:25:21,294 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
510 |
+
2024-04-15 13:25:21,294 INFO ___FILE_ONLY___ ╝
|
511 |
+
|
512 |
+
2024-04-15 13:25:21,296 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
513 |
+
|
514 |
+
2024-04-15 13:25:21,296 INFO ___FILE_ONLY___ ╠═ Installing: anthoscli ═╣
|
515 |
+
|
516 |
+
2024-04-15 13:25:21,296 INFO ___FILE_ONLY___ ╚
|
517 |
+
2024-04-15 13:25:21,300 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
518 |
+
2024-04-15 13:25:21,360 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-anthoscli-linux-x86_64-20240209195330.tar.gz HTTP/1.1" 200 72231225
|
519 |
+
2024-04-15 13:25:21,643 INFO ___FILE_ONLY___ ═
|
520 |
+
2024-04-15 13:25:21,646 INFO ___FILE_ONLY___ ═
|
521 |
+
2024-04-15 13:25:21,649 INFO ___FILE_ONLY___ ═
|
522 |
+
2024-04-15 13:25:21,653 INFO ___FILE_ONLY___ ═
|
523 |
+
2024-04-15 13:25:21,656 INFO ___FILE_ONLY___ ═
|
524 |
+
2024-04-15 13:25:21,659 INFO ___FILE_ONLY___ ═
|
525 |
+
2024-04-15 13:25:21,662 INFO ___FILE_ONLY___ ═
|
526 |
+
2024-04-15 13:25:21,665 INFO ___FILE_ONLY___ ═
|
527 |
+
2024-04-15 13:25:21,668 INFO ___FILE_ONLY___ ═
|
528 |
+
2024-04-15 13:25:21,671 INFO ___FILE_ONLY___ ═
|
529 |
+
2024-04-15 13:25:21,675 INFO ___FILE_ONLY___ ═
|
530 |
+
2024-04-15 13:25:21,678 INFO ___FILE_ONLY___ ═
|
531 |
+
2024-04-15 13:25:21,681 INFO ___FILE_ONLY___ ═
|
532 |
+
2024-04-15 13:25:21,684 INFO ___FILE_ONLY___ ═
|
533 |
+
2024-04-15 13:25:21,687 INFO ___FILE_ONLY___ ═
|
534 |
+
2024-04-15 13:25:21,690 INFO ___FILE_ONLY___ ═
|
535 |
+
2024-04-15 13:25:21,693 INFO ___FILE_ONLY___ ═
|
536 |
+
2024-04-15 13:25:21,696 INFO ___FILE_ONLY___ ═
|
537 |
+
2024-04-15 13:25:21,699 INFO ___FILE_ONLY___ ═
|
538 |
+
2024-04-15 13:25:21,703 INFO ___FILE_ONLY___ ═
|
539 |
+
2024-04-15 13:25:21,706 INFO ___FILE_ONLY___ ═
|
540 |
+
2024-04-15 13:25:21,709 INFO ___FILE_ONLY___ ═
|
541 |
+
2024-04-15 13:25:21,712 INFO ___FILE_ONLY___ ═
|
542 |
+
2024-04-15 13:25:21,715 INFO ___FILE_ONLY___ ═
|
543 |
+
2024-04-15 13:25:21,718 INFO ___FILE_ONLY___ ═
|
544 |
+
2024-04-15 13:25:21,721 INFO ___FILE_ONLY___ ═
|
545 |
+
2024-04-15 13:25:21,725 INFO ___FILE_ONLY___ ═
|
546 |
+
2024-04-15 13:25:21,728 INFO ___FILE_ONLY___ ═
|
547 |
+
2024-04-15 13:25:21,731 INFO ___FILE_ONLY___ ═
|
548 |
+
2024-04-15 13:25:21,735 INFO ___FILE_ONLY___ ═
|
549 |
+
2024-04-15 13:25:23,940 INFO ___FILE_ONLY___ ══════════
|
550 |
+
2024-04-15 13:25:23,945 INFO ___FILE_ONLY___ ═════════
|
551 |
+
2024-04-15 13:25:23,971 INFO ___FILE_ONLY___ ═══════════
|
552 |
+
2024-04-15 13:25:23,972 INFO ___FILE_ONLY___ ╝
|
553 |
+
|
554 |
+
2024-04-15 13:25:23,994 INFO ___FILE_ONLY___ ╔═════════════════════════════════��══════════════════════════╗
|
555 |
+
|
556 |
+
2024-04-15 13:25:23,994 INFO ___FILE_ONLY___ ╠═ Installing: anthoscli ═╣
|
557 |
+
|
558 |
+
2024-04-15 13:25:23,994 INFO ___FILE_ONLY___ ╚
|
559 |
+
2024-04-15 13:25:23,999 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
560 |
+
2024-04-15 13:25:23,999 INFO ___FILE_ONLY___ ╝
|
561 |
+
|
562 |
+
2024-04-15 13:25:24,001 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
563 |
+
|
564 |
+
2024-04-15 13:25:24,001 INFO ___FILE_ONLY___ ╠═ Installing: gcloud cli dependencies ═╣
|
565 |
+
|
566 |
+
2024-04-15 13:25:24,001 INFO ___FILE_ONLY___ ╚
|
567 |
+
2024-04-15 13:25:24,005 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
568 |
+
2024-04-15 13:25:24,067 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gcloud-deps-linux-x86_64-20210416153011.tar.gz HTTP/1.1" 200 104
|
569 |
+
2024-04-15 13:25:24,067 INFO ___FILE_ONLY___ ══════════════════════════════
|
570 |
+
2024-04-15 13:25:24,067 INFO ___FILE_ONLY___ ══════════════════════════════
|
571 |
+
2024-04-15 13:25:24,068 INFO ___FILE_ONLY___ ╝
|
572 |
+
|
573 |
+
2024-04-15 13:25:24,076 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
574 |
+
|
575 |
+
2024-04-15 13:25:24,076 INFO ___FILE_ONLY___ ╠═ Creating backup and activating new installation ═╣
|
576 |
+
|
577 |
+
2024-04-15 13:25:24,076 INFO ___FILE_ONLY___ ╚
|
578 |
+
2024-04-15 13:25:24,076 DEBUG root Attempting to move directory [/tools/google-cloud-sdk] to [/tools/google-cloud-sdk.staging/.install/.backup]
|
579 |
+
2024-04-15 13:25:24,076 INFO ___FILE_ONLY___ ══════════════════════════════
|
580 |
+
2024-04-15 13:25:24,076 DEBUG root Attempting to move directory [/tools/google-cloud-sdk.staging] to [/tools/google-cloud-sdk]
|
581 |
+
2024-04-15 13:25:24,076 INFO ___FILE_ONLY___ ══════════════════════════════
|
582 |
+
2024-04-15 13:25:24,077 INFO ___FILE_ONLY___ ╝
|
583 |
+
|
584 |
+
2024-04-15 13:25:24,080 DEBUG root Updating notification cache...
|
585 |
+
2024-04-15 13:25:24,081 INFO ___FILE_ONLY___
|
586 |
+
|
587 |
+
2024-04-15 13:25:24,083 INFO ___FILE_ONLY___ Performing post processing steps...
|
588 |
+
2024-04-15 13:25:24,083 DEBUG root Executing command: ['/tools/google-cloud-sdk/bin/gcloud', 'components', 'post-process']
|
589 |
+
2024-04-15 13:25:35,082 DEBUG ___FILE_ONLY___
|
590 |
+
2024-04-15 13:25:35,082 DEBUG ___FILE_ONLY___
|
591 |
+
2024-04-15 13:25:35,100 INFO ___FILE_ONLY___
|
592 |
+
Update done!
|
593 |
+
|
594 |
+
|
595 |
+
2024-04-15 13:25:35,104 DEBUG root Chosen display Format:none
|
596 |
+
2024-04-15 13:25:35,105 INFO root Display format: "none"
|
demo/.config/logs/2024.04.15/13.25.24.511550.log
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-04-15 13:25:24,512 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2024-04-15 13:25:24,514 DEBUG root Loaded Command Group: ['gcloud', 'components', 'post_process']
|
3 |
+
2024-04-15 13:25:24,516 DEBUG root Running [gcloud.components.post-process] with arguments: []
|
4 |
+
2024-04-15 13:25:34,997 DEBUG root Chosen display Format:none
|
5 |
+
2024-04-15 13:25:34,997 INFO root Display format: "none"
|
demo/.config/logs/2024.04.15/13.25.35.658011.log
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-04-15 13:25:35,659 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2024-04-15 13:25:35,661 DEBUG root Loaded Command Group: ['gcloud', 'components', 'update']
|
3 |
+
2024-04-15 13:25:35,664 DEBUG root Running [gcloud.components.update] with arguments: [--quiet: "True", COMPONENT-IDS:8: "['gcloud', 'core', 'bq', 'gsutil', 'compute', 'preview', 'alpha', 'beta']"]
|
4 |
+
2024-04-15 13:25:35,665 INFO ___FILE_ONLY___ Beginning update. This process may take several minutes.
|
5 |
+
|
6 |
+
2024-04-15 13:25:35,673 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
7 |
+
2024-04-15 13:25:35,731 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components-2.json HTTP/1.1" 200 214439
|
8 |
+
2024-04-15 13:25:35,751 WARNING root Component [compute] no longer exists.
|
9 |
+
2024-04-15 13:25:35,751 WARNING root Component [preview] no longer exists.
|
10 |
+
2024-04-15 13:25:35,752 INFO ___FILE_ONLY___
|
11 |
+
|
12 |
+
2024-04-15 13:25:35,753 INFO ___FILE_ONLY___
|
13 |
+
Your current Google Cloud CLI version is: 471.0.0
|
14 |
+
|
15 |
+
2024-04-15 13:25:35,753 INFO ___FILE_ONLY___ Installing components from version: 471.0.0
|
16 |
+
|
17 |
+
2024-04-15 13:25:35,753 INFO ___FILE_ONLY___
|
18 |
+
|
19 |
+
2024-04-15 13:25:35,753 DEBUG root Chosen display Format:table[box,title="These components will be removed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
20 |
+
2024-04-15 13:25:35,754 DEBUG root Chosen display Format:table[box,title="These components will be updated."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
21 |
+
2024-04-15 13:25:35,755 DEBUG root Chosen display Format:table[box,title="These components will be installed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
22 |
+
2024-04-15 13:25:35,793 INFO ___FILE_ONLY___ ┌──────────────────────────────────────────────┐
|
23 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___
|
24 |
+
|
25 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___ │ These components will be installed. │
|
26 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___
|
27 |
+
|
28 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___ ├───────────────────────┬────────────┬─────────┤
|
29 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___
|
30 |
+
|
31 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___ │ Name │ Version │ Size │
|
32 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___
|
33 |
+
|
34 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___ ├───────────────────────┼────────────┼─────────┤
|
35 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___
|
36 |
+
|
37 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___ │
|
38 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___ gcloud Alpha Commands
|
39 |
+
2024-04-15 13:25:35,794 INFO ___FILE_ONLY___
|
40 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___ │
|
41 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___ 2024.03.29
|
42 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___
|
43 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___ │
|
44 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___ < 1 MiB
|
45 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___
|
46 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___ │
|
47 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___
|
48 |
+
|
49 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___ │
|
50 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___ gcloud Beta Commands
|
51 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___
|
52 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___ │
|
53 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___ 2024.03.29
|
54 |
+
2024-04-15 13:25:35,795 INFO ___FILE_ONLY___
|
55 |
+
2024-04-15 13:25:35,796 INFO ___FILE_ONLY___ │
|
56 |
+
2024-04-15 13:25:35,796 INFO ___FILE_ONLY___ < 1 MiB
|
57 |
+
2024-04-15 13:25:35,796 INFO ___FILE_ONLY___
|
58 |
+
2024-04-15 13:25:35,796 INFO ___FILE_ONLY___ │
|
59 |
+
2024-04-15 13:25:35,796 INFO ___FILE_ONLY___
|
60 |
+
|
61 |
+
2024-04-15 13:25:35,796 INFO ___FILE_ONLY___ └───────────────────────┴────────────┴─────────┘
|
62 |
+
2024-04-15 13:25:35,796 INFO ___FILE_ONLY___
|
63 |
+
|
64 |
+
2024-04-15 13:25:35,796 INFO ___FILE_ONLY___
|
65 |
+
|
66 |
+
2024-04-15 13:25:35,800 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
67 |
+
2024-04-15 13:25:35,860 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/RELEASE_NOTES HTTP/1.1" 200 1186568
|
68 |
+
2024-04-15 13:25:35,931 INFO ___FILE_ONLY___ For the latest full release notes, please visit:
|
69 |
+
https://cloud.google.com/sdk/release_notes
|
70 |
+
|
71 |
+
|
72 |
+
2024-04-15 13:25:35,934 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
73 |
+
|
74 |
+
2024-04-15 13:25:35,934 INFO ___FILE_ONLY___ ╠═ Creating update staging area ═╣
|
75 |
+
|
76 |
+
2024-04-15 13:25:35,934 INFO ___FILE_ONLY___ ╚
|
77 |
+
2024-04-15 13:25:35,934 INFO ___FILE_ONLY___ ══════
|
78 |
+
2024-04-15 13:25:36,701 INFO ___FILE_ONLY___ ══════
|
79 |
+
2024-04-15 13:25:36,701 INFO ___FILE_ONLY___ ══════
|
80 |
+
2024-04-15 13:25:37,165 INFO ___FILE_ONLY___ ═
|
81 |
+
2024-04-15 13:25:37,472 INFO ___FILE_ONLY___ ═
|
82 |
+
2024-04-15 13:25:37,530 INFO ___FILE_ONLY___ ═
|
83 |
+
2024-04-15 13:25:37,592 INFO ___FILE_ONLY___ ═
|
84 |
+
2024-04-15 13:25:37,639 INFO ___FILE_ONLY___ ═
|
85 |
+
2024-04-15 13:25:37,684 INFO ___FILE_ONLY___ ═
|
86 |
+
2024-04-15 13:25:37,727 INFO ___FILE_ONLY___ ═
|
87 |
+
2024-04-15 13:25:37,774 INFO ___FILE_ONLY___ ═
|
88 |
+
2024-04-15 13:25:37,853 INFO ___FILE_ONLY___ ═
|
89 |
+
2024-04-15 13:25:37,978 INFO ___FILE_ONLY___ ═
|
90 |
+
2024-04-15 13:25:38,148 INFO ___FILE_ONLY___ ═
|
91 |
+
2024-04-15 13:25:38,241 INFO ___FILE_ONLY___ ═
|
92 |
+
2024-04-15 13:25:38,329 INFO ___FILE_ONLY___ ═
|
93 |
+
2024-04-15 13:25:38,415 INFO ___FILE_ONLY___ ═
|
94 |
+
2024-04-15 13:25:38,494 INFO ___FILE_ONLY___ ═
|
95 |
+
2024-04-15 13:25:38,567 INFO ___FILE_ONLY___ ═
|
96 |
+
2024-04-15 13:25:38,679 INFO ___FILE_ONLY___ ═
|
97 |
+
2024-04-15 13:25:38,750 INFO ___FILE_ONLY___ ═
|
98 |
+
2024-04-15 13:25:38,808 INFO ___FILE_ONLY___ ═
|
99 |
+
2024-04-15 13:25:38,873 INFO ___FILE_ONLY___ ═
|
100 |
+
2024-04-15 13:25:38,940 INFO ___FILE_ONLY___ ═
|
101 |
+
2024-04-15 13:25:38,998 INFO ___FILE_ONLY___ ═
|
102 |
+
2024-04-15 13:25:39,065 INFO ___FILE_ONLY___ ═
|
103 |
+
2024-04-15 13:25:39,133 INFO ___FILE_ONLY___ ═
|
104 |
+
2024-04-15 13:25:39,202 INFO ___FILE_ONLY___ ═
|
105 |
+
2024-04-15 13:25:39,263 INFO ___FILE_ONLY___ ═
|
106 |
+
2024-04-15 13:25:39,325 INFO ___FILE_ONLY___ ═
|
107 |
+
2024-04-15 13:25:39,407 INFO ___FILE_ONLY___ ═
|
108 |
+
2024-04-15 13:25:39,484 INFO ___FILE_ONLY___ ═
|
109 |
+
2024-04-15 13:25:39,557 INFO ___FILE_ONLY___ ═
|
110 |
+
2024-04-15 13:25:39,641 INFO ___FILE_ONLY___ ═
|
111 |
+
2024-04-15 13:25:39,797 INFO ___FILE_ONLY___ ═
|
112 |
+
2024-04-15 13:25:39,875 INFO ___FILE_ONLY___ ═
|
113 |
+
2024-04-15 13:25:39,969 INFO ___FILE_ONLY___ ═
|
114 |
+
2024-04-15 13:25:40,053 INFO ___FILE_ONLY___ ═
|
115 |
+
2024-04-15 13:25:40,127 INFO ___FILE_ONLY___ ═
|
116 |
+
2024-04-15 13:25:40,193 INFO ___FILE_ONLY___ ═
|
117 |
+
2024-04-15 13:25:40,282 INFO ___FILE_ONLY___ ═
|
118 |
+
2024-04-15 13:25:40,342 INFO ___FILE_ONLY___ ═
|
119 |
+
2024-04-15 13:25:40,420 INFO ___FILE_ONLY___ ═
|
120 |
+
2024-04-15 13:25:40,481 INFO ___FILE_ONLY___ ═
|
121 |
+
2024-04-15 13:25:40,548 INFO ___FILE_ONLY___ ═
|
122 |
+
2024-04-15 13:25:40,549 INFO ___FILE_ONLY___ ╝
|
123 |
+
|
124 |
+
2024-04-15 13:25:44,558 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
125 |
+
|
126 |
+
2024-04-15 13:25:44,559 INFO ___FILE_ONLY___ ╠═ Installing: gcloud Alpha Commands ═╣
|
127 |
+
|
128 |
+
2024-04-15 13:25:44,559 INFO ___FILE_ONLY___ ╚
|
129 |
+
2024-04-15 13:25:44,563 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
130 |
+
2024-04-15 13:25:44,626 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-alpha-20240329151455.tar.gz HTTP/1.1" 200 800
|
131 |
+
2024-04-15 13:25:44,626 INFO ___FILE_ONLY___ ══════════════════════════════
|
132 |
+
2024-04-15 13:25:44,628 INFO ___FILE_ONLY___ ══════════════════════════════
|
133 |
+
2024-04-15 13:25:44,628 INFO ___FILE_ONLY___ ╝
|
134 |
+
|
135 |
+
2024-04-15 13:25:44,637 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
136 |
+
|
137 |
+
2024-04-15 13:25:44,637 INFO ___FILE_ONLY___ ╠═ Installing: gcloud Beta Commands ═╣
|
138 |
+
|
139 |
+
2024-04-15 13:25:44,637 INFO ___FILE_ONLY___ ╚
|
140 |
+
2024-04-15 13:25:44,641 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
141 |
+
2024-04-15 13:25:44,704 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-beta-20240329151455.tar.gz HTTP/1.1" 200 797
|
142 |
+
2024-04-15 13:25:44,705 INFO ___FILE_ONLY___ ══════════════════════════════
|
143 |
+
2024-04-15 13:25:44,706 INFO ___FILE_ONLY___ ══════════════════════════════
|
144 |
+
2024-04-15 13:25:44,706 INFO ___FILE_ONLY___ ╝
|
145 |
+
|
146 |
+
2024-04-15 13:25:44,716 INFO ___FILE_ONLY___ ��════════════════════════════════════════════════════════════╗
|
147 |
+
|
148 |
+
2024-04-15 13:25:44,716 INFO ___FILE_ONLY___ ╠═ Creating backup and activating new installation ═╣
|
149 |
+
|
150 |
+
2024-04-15 13:25:44,716 INFO ___FILE_ONLY___ ╚
|
151 |
+
2024-04-15 13:25:44,717 DEBUG root Attempting to move directory [/tools/google-cloud-sdk] to [/tools/google-cloud-sdk.staging/.install/.backup]
|
152 |
+
2024-04-15 13:25:44,717 INFO ___FILE_ONLY___ ══════════════════════════════
|
153 |
+
2024-04-15 13:25:44,717 DEBUG root Attempting to move directory [/tools/google-cloud-sdk.staging] to [/tools/google-cloud-sdk]
|
154 |
+
2024-04-15 13:25:44,717 INFO ___FILE_ONLY___ ══════════════════════════════
|
155 |
+
2024-04-15 13:25:44,717 INFO ___FILE_ONLY___ ╝
|
156 |
+
|
157 |
+
2024-04-15 13:25:44,722 DEBUG root Updating notification cache...
|
158 |
+
2024-04-15 13:25:44,722 INFO ___FILE_ONLY___
|
159 |
+
|
160 |
+
2024-04-15 13:25:44,724 INFO ___FILE_ONLY___ Performing post processing steps...
|
161 |
+
2024-04-15 13:25:44,725 DEBUG root Executing command: ['/tools/google-cloud-sdk/bin/gcloud', 'components', 'post-process']
|
162 |
+
2024-04-15 13:25:59,176 DEBUG ___FILE_ONLY___
|
163 |
+
2024-04-15 13:25:59,177 DEBUG ___FILE_ONLY___
|
164 |
+
2024-04-15 13:25:59,247 INFO ___FILE_ONLY___
|
165 |
+
Update done!
|
166 |
+
|
167 |
+
|
168 |
+
2024-04-15 13:25:59,251 DEBUG root Chosen display Format:none
|
169 |
+
2024-04-15 13:25:59,252 INFO root Display format: "none"
|
demo/.config/logs/2024.04.15/13.25.45.199675.log
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-04-15 13:25:45,200 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2024-04-15 13:25:45,202 DEBUG root Loaded Command Group: ['gcloud', 'components', 'post_process']
|
3 |
+
2024-04-15 13:25:45,205 DEBUG root Running [gcloud.components.post-process] with arguments: []
|
4 |
+
2024-04-15 13:25:59,091 DEBUG root Chosen display Format:none
|
5 |
+
2024-04-15 13:25:59,092 INFO root Display format: "none"
|
demo/.config/logs/2024.04.15/13.25.59.817323.log
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-04-15 13:25:59,819 DEBUG root Loaded Command Group: ['gcloud', 'config']
|
2 |
+
2024-04-15 13:25:59,944 DEBUG root Loaded Command Group: ['gcloud', 'config', 'set']
|
3 |
+
2024-04-15 13:25:59,947 DEBUG root Running [gcloud.config.set] with arguments: [SECTION/PROPERTY: "component_manager/disable_update_check", VALUE: "true"]
|
4 |
+
2024-04-15 13:25:59,948 INFO ___FILE_ONLY___ Updated property [component_manager/disable_update_check].
|
5 |
+
|
6 |
+
2024-04-15 13:25:59,949 DEBUG root Chosen display Format:default
|
7 |
+
2024-04-15 13:25:59,950 INFO root Display format: "default"
|
8 |
+
2024-04-15 13:25:59,950 DEBUG root SDK update checks are disabled.
|
demo/.config/logs/2024.04.15/13.26.00.519914.log
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-04-15 13:26:00,522 DEBUG root Loaded Command Group: ['gcloud', 'config']
|
2 |
+
2024-04-15 13:26:00,649 DEBUG root Loaded Command Group: ['gcloud', 'config', 'set']
|
3 |
+
2024-04-15 13:26:00,652 DEBUG root Running [gcloud.config.set] with arguments: [SECTION/PROPERTY: "compute/gce_metadata_read_timeout_sec", VALUE: "0"]
|
4 |
+
2024-04-15 13:26:00,653 INFO ___FILE_ONLY___ Updated property [compute/gce_metadata_read_timeout_sec].
|
5 |
+
|
6 |
+
2024-04-15 13:26:00,654 DEBUG root Chosen display Format:default
|
7 |
+
2024-04-15 13:26:00,655 INFO root Display format: "default"
|
8 |
+
2024-04-15 13:26:00,655 DEBUG root SDK update checks are disabled.
|
demo/.gitattributes
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth.1 filter=lfs diff=lfs merge=lfs -text
|
37 |
+
sample_data/mnist_test.csv filter=lfs diff=lfs merge=lfs -text
|
38 |
+
sample_data/mnist_train_small.csv filter=lfs diff=lfs merge=lfs -text
|
demo/README.md
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: demo
|
3 |
+
app_file: /content/dust3r/demo.py
|
4 |
+
sdk: gradio
|
5 |
+
sdk_version: 4.26.0
|
6 |
+
---
|
demo/app.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def greet(name):
|
4 |
+
return "Hello " + name + "!!"
|
5 |
+
|
6 |
+
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
+
iface.launch()
|
demo/dust3r/.gitignore
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
data/
|
2 |
+
checkpoints/
|
3 |
+
|
4 |
+
# Byte-compiled / optimized / DLL files
|
5 |
+
__pycache__/
|
6 |
+
*.py[cod]
|
7 |
+
*$py.class
|
8 |
+
|
9 |
+
# C extensions
|
10 |
+
*.so
|
11 |
+
|
12 |
+
# Distribution / packaging
|
13 |
+
.Python
|
14 |
+
build/
|
15 |
+
develop-eggs/
|
16 |
+
dist/
|
17 |
+
downloads/
|
18 |
+
eggs/
|
19 |
+
.eggs/
|
20 |
+
lib/
|
21 |
+
lib64/
|
22 |
+
parts/
|
23 |
+
sdist/
|
24 |
+
var/
|
25 |
+
wheels/
|
26 |
+
pip-wheel-metadata/
|
27 |
+
share/python-wheels/
|
28 |
+
*.egg-info/
|
29 |
+
.installed.cfg
|
30 |
+
*.egg
|
31 |
+
MANIFEST
|
32 |
+
|
33 |
+
# PyInstaller
|
34 |
+
# Usually these files are written by a python script from a template
|
35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
36 |
+
*.manifest
|
37 |
+
*.spec
|
38 |
+
|
39 |
+
# Installer logs
|
40 |
+
pip-log.txt
|
41 |
+
pip-delete-this-directory.txt
|
42 |
+
|
43 |
+
# Unit test / coverage reports
|
44 |
+
htmlcov/
|
45 |
+
.tox/
|
46 |
+
.nox/
|
47 |
+
.coverage
|
48 |
+
.coverage.*
|
49 |
+
.cache
|
50 |
+
nosetests.xml
|
51 |
+
coverage.xml
|
52 |
+
*.cover
|
53 |
+
*.py,cover
|
54 |
+
.hypothesis/
|
55 |
+
.pytest_cache/
|
56 |
+
|
57 |
+
# Translations
|
58 |
+
*.mo
|
59 |
+
*.pot
|
60 |
+
|
61 |
+
# Django stuff:
|
62 |
+
*.log
|
63 |
+
local_settings.py
|
64 |
+
db.sqlite3
|
65 |
+
db.sqlite3-journal
|
66 |
+
|
67 |
+
# Flask stuff:
|
68 |
+
instance/
|
69 |
+
.webassets-cache
|
70 |
+
|
71 |
+
# Scrapy stuff:
|
72 |
+
.scrapy
|
73 |
+
|
74 |
+
# Sphinx documentation
|
75 |
+
docs/_build/
|
76 |
+
|
77 |
+
# PyBuilder
|
78 |
+
target/
|
79 |
+
|
80 |
+
# Jupyter Notebook
|
81 |
+
.ipynb_checkpoints
|
82 |
+
|
83 |
+
# IPython
|
84 |
+
profile_default/
|
85 |
+
ipython_config.py
|
86 |
+
|
87 |
+
# pyenv
|
88 |
+
.python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
98 |
+
__pypackages__/
|
99 |
+
|
100 |
+
# Celery stuff
|
101 |
+
celerybeat-schedule
|
102 |
+
celerybeat.pid
|
103 |
+
|
104 |
+
# SageMath parsed files
|
105 |
+
*.sage.py
|
106 |
+
|
107 |
+
# Environments
|
108 |
+
.env
|
109 |
+
.venv
|
110 |
+
env/
|
111 |
+
venv/
|
112 |
+
ENV/
|
113 |
+
env.bak/
|
114 |
+
venv.bak/
|
115 |
+
|
116 |
+
# Spyder project settings
|
117 |
+
.spyderproject
|
118 |
+
.spyproject
|
119 |
+
|
120 |
+
# Rope project settings
|
121 |
+
.ropeproject
|
122 |
+
|
123 |
+
# mkdocs documentation
|
124 |
+
/site
|
125 |
+
|
126 |
+
# mypy
|
127 |
+
.mypy_cache/
|
128 |
+
.dmypy.json
|
129 |
+
dmypy.json
|
130 |
+
|
131 |
+
# Pyre type checker
|
132 |
+
.pyre/
|
demo/dust3r/.gitmodules
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "croco"]
|
2 |
+
path = croco
|
3 |
+
url = https://github.com/naver/croco
|
demo/dust3r/LICENSE
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
DUSt3R, Copyright (c) 2024-present Naver Corporation, is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 license.
|
2 |
+
|
3 |
+
A summary of the CC BY-NC-SA 4.0 license is located here:
|
4 |
+
https://creativecommons.org/licenses/by-nc-sa/4.0/
|
5 |
+
|
6 |
+
The CC BY-NC-SA 4.0 license is located here:
|
7 |
+
https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
|
demo/dust3r/NOTICE
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
DUSt3R
|
2 |
+
Copyright 2024-present NAVER Corp.
|
3 |
+
|
4 |
+
This project contains subcomponents with separate copyright notices and license terms.
|
5 |
+
Your use of the source code for these subcomponents is subject to the terms and conditions of the following licenses.
|
6 |
+
|
7 |
+
====
|
8 |
+
|
9 |
+
naver/croco
|
10 |
+
https://github.com/naver/croco/
|
11 |
+
|
12 |
+
Creative Commons Attribution-NonCommercial-ShareAlike 4.0
|
13 |
+
|
demo/dust3r/README.md
ADDED
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# DUSt3R
|
2 |
+
|
3 |
+
Official implementation of `DUSt3R: Geometric 3D Vision Made Easy`
|
4 |
+
[[Project page](https://dust3r.europe.naverlabs.com/)], [[DUSt3R arxiv](https://arxiv.org/abs/2312.14132)]
|
5 |
+
|
6 |
+

|
7 |
+
|
8 |
+

|
9 |
+
|
10 |
+
```bibtex
|
11 |
+
@misc{wang2023dust3r,
|
12 |
+
title={DUSt3R: Geometric 3D Vision Made Easy},
|
13 |
+
author={Shuzhe Wang and Vincent Leroy and Yohann Cabon and Boris Chidlovskii and Jerome Revaud},
|
14 |
+
year={2023},
|
15 |
+
eprint={2312.14132},
|
16 |
+
archivePrefix={arXiv},
|
17 |
+
primaryClass={cs.CV}
|
18 |
+
}
|
19 |
+
```
|
20 |
+
|
21 |
+
## Table of Contents
|
22 |
+
- [DUSt3R](#dust3r)
|
23 |
+
- [License](#license)
|
24 |
+
- [Get Started](#get-started)
|
25 |
+
- [Installation](#installation)
|
26 |
+
- [Checkpoints](#checkpoints)
|
27 |
+
- [Interactive demo](#interactive-demo)
|
28 |
+
- [Usage](#usage)
|
29 |
+
- [Training](#training)
|
30 |
+
- [Demo](#demo)
|
31 |
+
- [Our Hyperparameters](#our-hyperparameters)
|
32 |
+
|
33 |
+
## License
|
34 |
+
The code is distributed under the CC BY-NC-SA 4.0 License. See See [LICENSE](LICENSE) for more information.
|
35 |
+
```python
|
36 |
+
# Copyright (C) 2024-present Naver Corporation. All rights reserved.
|
37 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
38 |
+
```
|
39 |
+
|
40 |
+
## Get Started
|
41 |
+
|
42 |
+
### Installation
|
43 |
+
|
44 |
+
1. Clone DUSt3R
|
45 |
+
```bash
|
46 |
+
git clone --recursive https://github.com/naver/dust3r
|
47 |
+
cd dust3r
|
48 |
+
# if you have already cloned dust3r:
|
49 |
+
# git submodule update --init --recursive
|
50 |
+
```
|
51 |
+
|
52 |
+
2. Create the environment, here we show an example using conda.
|
53 |
+
```bash
|
54 |
+
conda create -n dust3r python=3.11 cmake=3.14.0
|
55 |
+
conda activate dust3r
|
56 |
+
conda install pytorch torchvision pytorch-cuda=12.1 -c pytorch -c nvidia # use the correct version of cuda for your system
|
57 |
+
pip install -r requirements.txt
|
58 |
+
```
|
59 |
+
|
60 |
+
|
61 |
+
3. Optional, compile the cuda kernels for RoPE (as in CroCo v2)
|
62 |
+
```bash
|
63 |
+
# DUST3R relies on RoPE positional embeddings for which you can compile some cuda kernels for faster runtime.
|
64 |
+
cd croco/models/curope/
|
65 |
+
python setup.py build_ext --inplace
|
66 |
+
cd ../../../
|
67 |
+
```
|
68 |
+
|
69 |
+
4. Download pre-trained model
|
70 |
+
```bash
|
71 |
+
mkdir -p checkpoints/
|
72 |
+
wget https://download.europe.naverlabs.com/ComputerVision/DUSt3R/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth -P checkpoints/
|
73 |
+
```
|
74 |
+
|
75 |
+
### Checkpoints
|
76 |
+
|
77 |
+
We provide several pre-trained models:
|
78 |
+
|
79 |
+
| Modelname | Training resolutions | Head | Encoder | Decoder |
|
80 |
+
|-------------|----------------------|------|---------|---------|
|
81 |
+
| [`DUSt3R_ViTLarge_BaseDecoder_224_linear.pth`](https://download.europe.naverlabs.com/ComputerVision/DUSt3R/DUSt3R_ViTLarge_BaseDecoder_224_linear.pth) | 224x224 | Linear | ViT-L | ViT-B |
|
82 |
+
| [`DUSt3R_ViTLarge_BaseDecoder_512_linear.pth`](https://download.europe.naverlabs.com/ComputerVision/DUSt3R/DUSt3R_ViTLarge_BaseDecoder_512_linear.pth) | 512x384, 512x336, 512x288, 512x256, 512x160 | Linear | ViT-L | ViT-B |
|
83 |
+
| [`DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth`](https://download.europe.naverlabs.com/ComputerVision/DUSt3R/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth) | 512x384, 512x336, 512x288, 512x256, 512x160 | DPT | ViT-L | ViT-B |
|
84 |
+
|
85 |
+
You can check the hyperparameters we used to train these models in the [section: Our Hyperparameters](#our-hyperparameters)
|
86 |
+
|
87 |
+
### Interactive demo
|
88 |
+
In this demo, you should be able run DUSt3R on your machine to reconstruct a scene.
|
89 |
+
First select images that depicts the same scene.
|
90 |
+
|
91 |
+
You can ajust the global alignment schedule and its number of iterations.
|
92 |
+
Note: if you selected one or two images, the global alignment procedure will be skipped (mode=GlobalAlignerMode.PairViewer)
|
93 |
+
Hit "Run" and wait.
|
94 |
+
When the global alignment ends, the reconstruction appears.
|
95 |
+
Use the slider "min_conf_thr" to show or remove low confidence areas.
|
96 |
+
|
97 |
+
```bash
|
98 |
+
python3 demo.py --weights checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth
|
99 |
+
|
100 |
+
# Use --image_size to select the correct resolution for your checkpoint. 512 (default) or 224
|
101 |
+
# Use --local_network to make it accessible on the local network, or --server_name to specify the url manually
|
102 |
+
# Use --server_port to change the port, by default it will search for an available port starting at 7860
|
103 |
+
# Use --device to use a different device, by default it's "cuda"
|
104 |
+
```
|
105 |
+
|
106 |
+

|
107 |
+
|
108 |
+
## Usage
|
109 |
+
|
110 |
+
```python
|
111 |
+
from dust3r.inference import inference, load_model
|
112 |
+
from dust3r.utils.image import load_images
|
113 |
+
from dust3r.image_pairs import make_pairs
|
114 |
+
from dust3r.cloud_opt import global_aligner, GlobalAlignerMode
|
115 |
+
|
116 |
+
if __name__ == '__main__':
|
117 |
+
model_path = "checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth"
|
118 |
+
device = 'cuda'
|
119 |
+
batch_size = 1
|
120 |
+
schedule = 'cosine'
|
121 |
+
lr = 0.01
|
122 |
+
niter = 300
|
123 |
+
|
124 |
+
model = load_model(model_path, device)
|
125 |
+
# load_images can take a list of images or a directory
|
126 |
+
images = load_images(['croco/assets/Chateau1.png', 'croco/assets/Chateau2.png'], size=512)
|
127 |
+
pairs = make_pairs(images, scene_graph='complete', prefilter=None, symmetrize=True)
|
128 |
+
output = inference(pairs, model, device, batch_size=batch_size)
|
129 |
+
|
130 |
+
# at this stage, you have the raw dust3r predictions
|
131 |
+
view1, pred1 = output['view1'], output['pred1']
|
132 |
+
view2, pred2 = output['view2'], output['pred2']
|
133 |
+
# here, view1, pred1, view2, pred2 are dicts of lists of len(2)
|
134 |
+
# -> because we symmetrize we have (im1, im2) and (im2, im1) pairs
|
135 |
+
# in each view you have:
|
136 |
+
# an integer image identifier: view1['idx'] and view2['idx']
|
137 |
+
# the img: view1['img'] and view2['img']
|
138 |
+
# the image shape: view1['true_shape'] and view2['true_shape']
|
139 |
+
# an instance string output by the dataloader: view1['instance'] and view2['instance']
|
140 |
+
# pred1 and pred2 contains the confidence values: pred1['conf'] and pred2['conf']
|
141 |
+
# pred1 contains 3D points for view1['img'] in view1['img'] space: pred1['pts3d']
|
142 |
+
# pred2 contains 3D points for view2['img'] in view1['img'] space: pred2['pts3d_in_other_view']
|
143 |
+
|
144 |
+
# next we'll use the global_aligner to align the predictions
|
145 |
+
# depending on your task, you may be fine with the raw output and not need it
|
146 |
+
# with only two input images, you could use GlobalAlignerMode.PairViewer: it would just convert the output
|
147 |
+
# if using GlobalAlignerMode.PairViewer, no need to run compute_global_alignment
|
148 |
+
scene = global_aligner(output, device=device, mode=GlobalAlignerMode.PointCloudOptimizer)
|
149 |
+
loss = scene.compute_global_alignment(init="mst", niter=niter, schedule=schedule, lr=lr)
|
150 |
+
|
151 |
+
# retrieve useful values from scene:
|
152 |
+
imgs = scene.imgs
|
153 |
+
focals = scene.get_focals()
|
154 |
+
poses = scene.get_im_poses()
|
155 |
+
pts3d = scene.get_pts3d()
|
156 |
+
confidence_masks = scene.get_masks()
|
157 |
+
|
158 |
+
# visualize reconstruction
|
159 |
+
scene.show()
|
160 |
+
|
161 |
+
# find 2D-2D matches between the two images
|
162 |
+
from dust3r.utils.geometry import find_reciprocal_matches, xy_grid
|
163 |
+
pts2d_list, pts3d_list = [], []
|
164 |
+
for i in range(2):
|
165 |
+
conf_i = confidence_masks[i].cpu().numpy()
|
166 |
+
pts2d_list.append(xy_grid(*imgs[i].shape[:2][::-1])[conf_i]) # imgs[i].shape[:2] = (H, W)
|
167 |
+
pts3d_list.append(pts3d[i].detach().cpu().numpy()[conf_i])
|
168 |
+
reciprocal_in_P2, nn2_in_P1, num_matches = find_reciprocal_matches(*pts3d_list)
|
169 |
+
print(f'found {num_matches} matches')
|
170 |
+
matches_im1 = pts2d_list[1][reciprocal_in_P2]
|
171 |
+
matches_im0 = pts2d_list[0][nn2_in_P1][reciprocal_in_P2]
|
172 |
+
|
173 |
+
# visualize a few matches
|
174 |
+
import numpy as np
|
175 |
+
from matplotlib import pyplot as pl
|
176 |
+
n_viz = 10
|
177 |
+
match_idx_to_viz = np.round(np.linspace(0, num_matches-1, n_viz)).astype(int)
|
178 |
+
viz_matches_im0, viz_matches_im1 = matches_im0[match_idx_to_viz], matches_im1[match_idx_to_viz]
|
179 |
+
|
180 |
+
H0, W0, H1, W1 = *imgs[0].shape[:2], *imgs[1].shape[:2]
|
181 |
+
img0 = np.pad(imgs[0], ((0, max(H1 - H0, 0)), (0, 0), (0, 0)), 'constant', constant_values=0)
|
182 |
+
img1 = np.pad(imgs[1], ((0, max(H0 - H1, 0)), (0, 0), (0, 0)), 'constant', constant_values=0)
|
183 |
+
img = np.concatenate((img0, img1), axis=1)
|
184 |
+
pl.figure()
|
185 |
+
pl.imshow(img)
|
186 |
+
cmap = pl.get_cmap('jet')
|
187 |
+
for i in range(n_viz):
|
188 |
+
(x0, y0), (x1, y1) = viz_matches_im0[i].T, viz_matches_im1[i].T
|
189 |
+
pl.plot([x0, x1 + W0], [y0, y1], '-+', color=cmap(i / (n_viz - 1)), scalex=False, scaley=False)
|
190 |
+
pl.show(block=True)
|
191 |
+
|
192 |
+
```
|
193 |
+

|
194 |
+
|
195 |
+
## Training
|
196 |
+
In this section, we present propose a short demonstration to get started with training DUSt3R. At the moment, we didn't release the training datasets, so we're going to download and prepare a subset of [CO3Dv2](https://github.com/facebookresearch/co3d) - [Creative Commons Attribution-NonCommercial 4.0 International](https://github.com/facebookresearch/co3d/blob/main/LICENSE) and launch the training code on it.
|
197 |
+
The demo model will be trained for a few epochs on a very small dataset. It will not be very good.
|
198 |
+
|
199 |
+
### Demo
|
200 |
+
|
201 |
+
```bash
|
202 |
+
|
203 |
+
# download and prepare the co3d subset
|
204 |
+
mkdir -p data/co3d_subset
|
205 |
+
cd data/co3d_subset
|
206 |
+
git clone https://github.com/facebookresearch/co3d
|
207 |
+
cd co3d
|
208 |
+
python3 ./co3d/download_dataset.py --download_folder ../ --single_sequence_subset
|
209 |
+
rm ../*.zip
|
210 |
+
cd ../../..
|
211 |
+
|
212 |
+
python3 datasets_preprocess/preprocess_co3d.py --co3d_dir data/co3d_subset --output_dir data/co3d_subset_processed --single_sequence_subset
|
213 |
+
|
214 |
+
# download the pretrained croco v2 checkpoint
|
215 |
+
mkdir -p checkpoints/
|
216 |
+
wget https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo_V2_ViTLarge_BaseDecoder.pth -P checkpoints/
|
217 |
+
|
218 |
+
# the training of dust3r is done in 3 steps.
|
219 |
+
# for this example we'll do fewer epochs, for the actual hyperparameters we used in the paper, see the next section: "Our Hyperparameters"
|
220 |
+
# step 1 - train dust3r for 224 resolution
|
221 |
+
torchrun --nproc_per_node=4 train.py \
|
222 |
+
--train_dataset "1000 @ Co3d(split='train', ROOT='data/co3d_subset_processed', aug_crop=16, mask_bg='rand', resolution=224, transform=ColorJitter)" \
|
223 |
+
--test_dataset "100 @ Co3d(split='test', ROOT='data/co3d_subset_processed', resolution=224, seed=777)" \
|
224 |
+
--model "AsymmetricCroCo3DStereo(pos_embed='RoPE100', img_size=(224, 224), head_type='linear', output_mode='pts3d', depth_mode=('exp', -inf, inf), conf_mode=('exp', 1, inf), enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_depth=12, dec_num_heads=12)" \
|
225 |
+
--train_criterion "ConfLoss(Regr3D(L21, norm_mode='avg_dis'), alpha=0.2)" \
|
226 |
+
--test_criterion "Regr3D_ScaleShiftInv(L21, gt_scale=True)" \
|
227 |
+
--pretrained checkpoints/CroCo_V2_ViTLarge_BaseDecoder.pth \
|
228 |
+
--lr 0.0001 --min_lr 1e-06 --warmup_epochs 1 --epochs 10 --batch_size 16 --accum_iter 1 \
|
229 |
+
--save_freq 1 --keep_freq 5 --eval_freq 1 \
|
230 |
+
--output_dir checkpoints/dust3r_demo_224
|
231 |
+
|
232 |
+
# step 2 - train dust3r for 512 resolution
|
233 |
+
torchrun --nproc_per_node=4 train.py \
|
234 |
+
--train_dataset "1000 @ Co3d(split='train', ROOT='data/co3d_subset_processed', aug_crop=16, mask_bg='rand', resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter)" \
|
235 |
+
--test_dataset="100 @ Co3d(split='test', ROOT='data/co3d_subset_processed', resolution=(512,384), seed=777)" \
|
236 |
+
--model="AsymmetricCroCo3DStereo(pos_embed='RoPE100', patch_embed_cls='ManyAR_PatchEmbed', img_size=(512, 512), head_type='linear', output_mode='pts3d', depth_mode=('exp', -inf, inf), conf_mode=('exp', 1, inf), enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_depth=12, dec_num_heads=12)" \
|
237 |
+
--train_criterion "ConfLoss(Regr3D(L21, norm_mode='avg_dis'), alpha=0.2)" \
|
238 |
+
--test_criterion "Regr3D_ScaleShiftInv(L21, gt_scale=True)" \
|
239 |
+
--pretrained='checkpoints/dust3r_demo_224/checkpoint-best.pth' \
|
240 |
+
--lr=0.0001 --min_lr=1e-06 --warmup_epochs 1 --epochs 10 --batch_size 4 --accum_iter 4 \
|
241 |
+
--save_freq 1 --keep_freq 5 --eval_freq 1 \
|
242 |
+
--output_dir checkpoints/dust3r_demo_512
|
243 |
+
|
244 |
+
# step 3 - train dust3r for 512 resolution with dpt
|
245 |
+
torchrun --nproc_per_node=4 train.py \
|
246 |
+
--train_dataset "1000 @ Co3d(split='train', ROOT='data/co3d_subset_processed', aug_crop=16, mask_bg='rand', resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter)" \
|
247 |
+
--test_dataset="100 @ Co3d(split='test', ROOT='data/co3d_subset_processed', resolution=(512,384), seed=777)" \
|
248 |
+
--model="AsymmetricCroCo3DStereo(pos_embed='RoPE100', patch_embed_cls='ManyAR_PatchEmbed', img_size=(512, 512), head_type='dpt', output_mode='pts3d', depth_mode=('exp', -inf, inf), conf_mode=('exp', 1, inf), enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_depth=12, dec_num_heads=12)" \
|
249 |
+
--train_criterion "ConfLoss(Regr3D(L21, norm_mode='avg_dis'), alpha=0.2)" \
|
250 |
+
--test_criterion "Regr3D_ScaleShiftInv(L21, gt_scale=True)" \
|
251 |
+
--pretrained='checkpoints/dust3r_demo_512/checkpoint-best.pth' \
|
252 |
+
--lr=0.0001 --min_lr=1e-06 --warmup_epochs 1 --epochs 10 --batch_size 2 --accum_iter 8 \
|
253 |
+
--save_freq 1 --keep_freq 5 --eval_freq 1 \
|
254 |
+
--output_dir checkpoints/dust3r_demo_512dpt
|
255 |
+
|
256 |
+
```
|
257 |
+
|
258 |
+
### Our Hyperparameters
|
259 |
+
We didn't release the training datasets, but here are the commands we used for training our models:
|
260 |
+
|
261 |
+
```bash
|
262 |
+
# NOTE: ROOT path omitted for datasets
|
263 |
+
# 224 linear
|
264 |
+
torchrun --nproc_per_node 4 train.py \
|
265 |
+
--train_dataset=" + 100_000 @ Habitat512(1_000_000, split='train', aug_crop=16, resolution=224, transform=ColorJitter) + 100_000 @ BlendedMVS(split='train', aug_crop=16, resolution=224, transform=ColorJitter) + 100_000 @ MegaDepthDense(split='train', aug_crop=16, resolution=224, transform=ColorJitter) + 100_000 @ ARKitScenes(aug_crop=256, resolution=224, transform=ColorJitter) + 100_000 @ Co3d_v3(split='train', aug_crop=16, mask_bg='rand', resolution=224, transform=ColorJitter) + 100_000 @ StaticThings3D(aug_crop=256, mask_bg='rand', resolution=224, transform=ColorJitter) + 100_000 @ ScanNetpp(split='train', aug_crop=256, resolution=224, transform=ColorJitter) + 100_000 @ Waymo(aug_crop=128, resolution=224, transform=ColorJitter) " \
|
266 |
+
--test_dataset=" Habitat512(1_000, split='val', resolution=224, seed=777) + 1_000 @ BlendedMVS(split='val', resolution=224, seed=777) + 1_000 @ MegaDepthDense(split='val', resolution=224, seed=777) + 1_000 @ Co3d_v3(split='test', mask_bg='rand', resolution=224, seed=777) " \
|
267 |
+
--train_criterion="ConfLoss(Regr3D(L21, norm_mode='avg_dis'), alpha=0.2)" \
|
268 |
+
--test_criterion='Regr3D_ScaleShiftInv(L21, gt_scale=True)' \
|
269 |
+
--model="AsymmetricCroCo3DStereo(pos_embed='RoPE100', img_size=(224, 224), head_type='linear', output_mode='pts3d', depth_mode=('exp', -inf, inf), conf_mode=('exp', 1, inf), enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_depth=12, dec_num_heads=12)" \
|
270 |
+
--pretrained="checkpoints/CroCo_V2_ViTLarge_BaseDecoder.pth" \
|
271 |
+
--lr=0.0001 --min_lr=1e-06 --warmup_epochs=10 --epochs=100 --batch_size=16 --accum_iter=1 \
|
272 |
+
--save_freq=5 --keep_freq=10 --eval_freq=1 \
|
273 |
+
--output_dir='checkpoints/dust3r_224'
|
274 |
+
|
275 |
+
# 512 linear
|
276 |
+
torchrun --nproc_per_node 8 train.py \
|
277 |
+
--train_dataset=" + 10_000 @ Habitat512(1_000_000, split='train', aug_crop=16, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ BlendedMVS(split='train', aug_crop=16, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ MegaDepthDense(split='train', aug_crop=16, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ ARKitScenes(aug_crop=256, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ Co3d_v3(split='train', aug_crop=16, mask_bg='rand', resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ StaticThings3D(aug_crop=256, mask_bg='rand', resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ ScanNetpp(split='train', aug_crop=256, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ Waymo(aug_crop=128, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) " \
|
278 |
+
--test_dataset=" Habitat512(1_000, split='val', resolution=(512,384), seed=777) + 1_000 @ BlendedMVS(split='val', resolution=(512,384), seed=777) + 1_000 @ MegaDepthDense(split='val', resolution=(512,336), seed=777) + 1_000 @ Co3d_v3(split='test', resolution=(512,384), seed=777) " \
|
279 |
+
--train_criterion="ConfLoss(Regr3D(L21, norm_mode='avg_dis'), alpha=0.2)" \
|
280 |
+
--test_criterion='Regr3D_ScaleShiftInv(L21, gt_scale=True)' \
|
281 |
+
--model="AsymmetricCroCo3DStereo(pos_embed='RoPE100', patch_embed_cls='ManyAR_PatchEmbed', img_size=(512, 512), head_type='linear', output_mode='pts3d', depth_mode=('exp', -inf, inf), conf_mode=('exp', 1, inf), enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_depth=12, dec_num_heads=12)" \
|
282 |
+
--pretrained='checkpoints/dust3r_224/checkpoint-best.pth' \
|
283 |
+
--lr=0.0001 --min_lr=1e-06 --warmup_epochs=20 --epochs=200 --batch_size=4 --accum_iter=2 \
|
284 |
+
--save_freq=10 --keep_freq=10 --eval_freq=1 --print_freq=10 \
|
285 |
+
--output_dir='checkpoints/dust3r_512'
|
286 |
+
|
287 |
+
# 512 dpt
|
288 |
+
torchrun --nproc_per_node 8 train.py \
|
289 |
+
--train_dataset=" + 10_000 @ Habitat512(1_000_000, split='train', aug_crop=16, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ BlendedMVS(split='train', aug_crop=16, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ MegaDepthDense(split='train', aug_crop=16, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ ARKitScenes(aug_crop=256, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ Co3d_v3(split='train', aug_crop=16, mask_bg='rand', resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ StaticThings3D(aug_crop=256, mask_bg='rand', resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ ScanNetpp(split='train', aug_crop=256, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) + 10_000 @ Waymo(aug_crop=128, resolution=[(512, 384), (512, 336), (512, 288), (512, 256), (512, 160)], transform=ColorJitter) " \
|
290 |
+
--test_dataset=" Habitat512(1_000, split='val', resolution=(512,384), seed=777) + 1_000 @ BlendedMVS(split='val', resolution=(512,384), seed=777) + 1_000 @ MegaDepthDense(split='val', resolution=(512,336), seed=777) + 1_000 @ Co3d_v3(split='test', resolution=(512,384), seed=777) " \
|
291 |
+
--train_criterion="ConfLoss(Regr3D(L21, norm_mode='avg_dis'), alpha=0.2)" \
|
292 |
+
--test_criterion='Regr3D_ScaleShiftInv(L21, gt_scale=True)' \
|
293 |
+
--model="AsymmetricCroCo3DStereo(pos_embed='RoPE100', patch_embed_cls='ManyAR_PatchEmbed', img_size=(512, 512), head_type='dpt', output_mode='pts3d', depth_mode=('exp', -inf, inf), conf_mode=('exp', 1, inf), enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_depth=12, dec_num_heads=12)" \
|
294 |
+
--pretrained='checkpoints/dust3r_512/checkpoint-best.pth' \
|
295 |
+
--lr=0.0001 --min_lr=1e-06 --warmup_epochs=15 --epochs=90 --batch_size=2 --accum_iter=4 \
|
296 |
+
--save_freq=5 --keep_freq=10 --eval_freq=1 --print_freq=10 \
|
297 |
+
--output_dir='checkpoints/dust3r_512dpt'
|
298 |
+
|
299 |
+
```
|
demo/dust3r/assets/demo.jpg
ADDED
![]() |
demo/dust3r/assets/dust3r_archi.jpg
ADDED
![]() |
demo/dust3r/assets/matching.jpg
ADDED
![]() |
demo/dust3r/assets/pipeline1.jpg
ADDED
![]() |
demo/dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15934abef92c7071953f65cb858b23b52737f1dee7934198f4abca67f6eb8949
|
3 |
+
size 407543808
|
demo/dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e8bbf0c4d1d6007f5343f3f45814b956ddc5bbb4d00cb66beaf73afe5c53b34
|
3 |
+
size 2285019929
|
demo/dust3r/croco/LICENSE
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CroCo, Copyright (c) 2022-present Naver Corporation, is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 license.
|
2 |
+
|
3 |
+
A summary of the CC BY-NC-SA 4.0 license is located here:
|
4 |
+
https://creativecommons.org/licenses/by-nc-sa/4.0/
|
5 |
+
|
6 |
+
The CC BY-NC-SA 4.0 license is located here:
|
7 |
+
https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
|
8 |
+
|
9 |
+
|
10 |
+
SEE NOTICE BELOW WITH RESPECT TO THE FILE: models/pos_embed.py, models/blocks.py
|
11 |
+
|
12 |
+
***************************
|
13 |
+
|
14 |
+
NOTICE WITH RESPECT TO THE FILE: models/pos_embed.py
|
15 |
+
|
16 |
+
This software is being redistributed in a modifiled form. The original form is available here:
|
17 |
+
|
18 |
+
https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
|
19 |
+
|
20 |
+
This software in this file incorporates parts of the following software available here:
|
21 |
+
|
22 |
+
Transformer: https://github.com/tensorflow/models/blob/master/official/legacy/transformer/model_utils.py
|
23 |
+
available under the following license: https://github.com/tensorflow/models/blob/master/LICENSE
|
24 |
+
|
25 |
+
MoCo v3: https://github.com/facebookresearch/moco-v3
|
26 |
+
available under the following license: https://github.com/facebookresearch/moco-v3/blob/main/LICENSE
|
27 |
+
|
28 |
+
DeiT: https://github.com/facebookresearch/deit
|
29 |
+
available under the following license: https://github.com/facebookresearch/deit/blob/main/LICENSE
|
30 |
+
|
31 |
+
|
32 |
+
ORIGINAL COPYRIGHT NOTICE AND PERMISSION NOTICE AVAILABLE HERE IS REPRODUCE BELOW:
|
33 |
+
|
34 |
+
https://github.com/facebookresearch/mae/blob/main/LICENSE
|
35 |
+
|
36 |
+
Attribution-NonCommercial 4.0 International
|
37 |
+
|
38 |
+
***************************
|
39 |
+
|
40 |
+
NOTICE WITH RESPECT TO THE FILE: models/blocks.py
|
41 |
+
|
42 |
+
This software is being redistributed in a modifiled form. The original form is available here:
|
43 |
+
|
44 |
+
https://github.com/rwightman/pytorch-image-models
|
45 |
+
|
46 |
+
ORIGINAL COPYRIGHT NOTICE AND PERMISSION NOTICE AVAILABLE HERE IS REPRODUCE BELOW:
|
47 |
+
|
48 |
+
https://github.com/rwightman/pytorch-image-models/blob/master/LICENSE
|
49 |
+
|
50 |
+
Apache License
|
51 |
+
Version 2.0, January 2004
|
52 |
+
http://www.apache.org/licenses/
|
demo/dust3r/croco/NOTICE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CroCo
|
2 |
+
Copyright 2022-present NAVER Corp.
|
3 |
+
|
4 |
+
This project contains subcomponents with separate copyright notices and license terms.
|
5 |
+
Your use of the source code for these subcomponents is subject to the terms and conditions of the following licenses.
|
6 |
+
|
7 |
+
====
|
8 |
+
|
9 |
+
facebookresearch/mae
|
10 |
+
https://github.com/facebookresearch/mae
|
11 |
+
|
12 |
+
Attribution-NonCommercial 4.0 International
|
13 |
+
|
14 |
+
====
|
15 |
+
|
16 |
+
rwightman/pytorch-image-models
|
17 |
+
https://github.com/rwightman/pytorch-image-models
|
18 |
+
|
19 |
+
Apache License
|
20 |
+
Version 2.0, January 2004
|
21 |
+
http://www.apache.org/licenses/
|
demo/dust3r/croco/README.MD
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# CroCo + CroCo v2 / CroCo-Stereo / CroCo-Flow
|
2 |
+
|
3 |
+
[[`CroCo arXiv`](https://arxiv.org/abs/2210.10716)] [[`CroCo v2 arXiv`](https://arxiv.org/abs/2211.10408)] [[`project page and demo`](https://croco.europe.naverlabs.com/)]
|
4 |
+
|
5 |
+
This repository contains the code for our CroCo model presented in our NeurIPS'22 paper [CroCo: Self-Supervised Pre-training for 3D Vision Tasks by Cross-View Completion](https://openreview.net/pdf?id=wZEfHUM5ri) and its follow-up extension published at ICCV'23 [Improved Cross-view Completion Pre-training for Stereo Matching and Optical Flow](https://openaccess.thecvf.com/content/ICCV2023/html/Weinzaepfel_CroCo_v2_Improved_Cross-view_Completion_Pre-training_for_Stereo_Matching_and_ICCV_2023_paper.html), refered to as CroCo v2:
|
6 |
+
|
7 |
+

|
8 |
+
|
9 |
+
```bibtex
|
10 |
+
@inproceedings{croco,
|
11 |
+
title={{CroCo: Self-Supervised Pre-training for 3D Vision Tasks by Cross-View Completion}},
|
12 |
+
author={{Weinzaepfel, Philippe and Leroy, Vincent and Lucas, Thomas and Br\'egier, Romain and Cabon, Yohann and Arora, Vaibhav and Antsfeld, Leonid and Chidlovskii, Boris and Csurka, Gabriela and Revaud J\'er\^ome}},
|
13 |
+
booktitle={{NeurIPS}},
|
14 |
+
year={2022}
|
15 |
+
}
|
16 |
+
|
17 |
+
@inproceedings{croco_v2,
|
18 |
+
title={{CroCo v2: Improved Cross-view Completion Pre-training for Stereo Matching and Optical Flow}},
|
19 |
+
author={Weinzaepfel, Philippe and Lucas, Thomas and Leroy, Vincent and Cabon, Yohann and Arora, Vaibhav and Br{\'e}gier, Romain and Csurka, Gabriela and Antsfeld, Leonid and Chidlovskii, Boris and Revaud, J{\'e}r{\^o}me},
|
20 |
+
booktitle={ICCV},
|
21 |
+
year={2023}
|
22 |
+
}
|
23 |
+
```
|
24 |
+
|
25 |
+
## License
|
26 |
+
|
27 |
+
The code is distributed under the CC BY-NC-SA 4.0 License. See [LICENSE](LICENSE) for more information.
|
28 |
+
Some components are based on code from [MAE](https://github.com/facebookresearch/mae) released under the CC BY-NC-SA 4.0 License and [timm](https://github.com/rwightman/pytorch-image-models) released under the Apache 2.0 License.
|
29 |
+
Some components for stereo matching and optical flow are based on code from [unimatch](https://github.com/autonomousvision/unimatch) released under the MIT license.
|
30 |
+
|
31 |
+
## Preparation
|
32 |
+
|
33 |
+
1. Install dependencies on a machine with a NVidia GPU using e.g. conda. Note that `habitat-sim` is required only for the interactive demo and the synthetic pre-training data generation. If you don't plan to use it, you can ignore the line installing it and use a more recent python version.
|
34 |
+
|
35 |
+
```bash
|
36 |
+
conda create -n croco python=3.7 cmake=3.14.0
|
37 |
+
conda activate croco
|
38 |
+
conda install habitat-sim headless -c conda-forge -c aihabitat
|
39 |
+
conda install pytorch torchvision -c pytorch
|
40 |
+
conda install notebook ipykernel matplotlib
|
41 |
+
conda install ipywidgets widgetsnbextension
|
42 |
+
conda install scikit-learn tqdm quaternion opencv # only for pretraining / habitat data generation
|
43 |
+
|
44 |
+
```
|
45 |
+
|
46 |
+
2. Compile cuda kernels for RoPE
|
47 |
+
|
48 |
+
CroCo v2 relies on RoPE positional embeddings for which you need to compile some cuda kernels.
|
49 |
+
```bash
|
50 |
+
cd models/curope/
|
51 |
+
python setup.py build_ext --inplace
|
52 |
+
cd ../../
|
53 |
+
```
|
54 |
+
|
55 |
+
This can be a bit long as we compile for all cuda architectures, feel free to update L9 of `models/curope/setup.py` to compile for specific architectures only.
|
56 |
+
You might also need to set the environment `CUDA_HOME` in case you use a custom cuda installation.
|
57 |
+
|
58 |
+
In case you cannot provide, we also provide a slow pytorch version, which will be automatically loaded.
|
59 |
+
|
60 |
+
3. Download pre-trained model
|
61 |
+
|
62 |
+
We provide several pre-trained models:
|
63 |
+
|
64 |
+
| modelname | pre-training data | pos. embed. | Encoder | Decoder |
|
65 |
+
|------------------------------------------------------------------------------------------------------------------------------------|-------------------|-------------|---------|---------|
|
66 |
+
| [`CroCo.pth`](https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo.pth) | Habitat | cosine | ViT-B | Small |
|
67 |
+
| [`CroCo_V2_ViTBase_SmallDecoder.pth`](https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo_V2_ViTBase_SmallDecoder.pth) | Habitat + real | RoPE | ViT-B | Small |
|
68 |
+
| [`CroCo_V2_ViTBase_BaseDecoder.pth`](https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo_V2_ViTBase_BaseDecoder.pth) | Habitat + real | RoPE | ViT-B | Base |
|
69 |
+
| [`CroCo_V2_ViTLarge_BaseDecoder.pth`](https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo_V2_ViTLarge_BaseDecoder.pth) | Habitat + real | RoPE | ViT-L | Base |
|
70 |
+
|
71 |
+
To download a specific model, i.e., the first one (`CroCo.pth`)
|
72 |
+
```bash
|
73 |
+
mkdir -p pretrained_models/
|
74 |
+
wget https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo.pth -P pretrained_models/
|
75 |
+
```
|
76 |
+
|
77 |
+
## Reconstruction example
|
78 |
+
|
79 |
+
Simply run after downloading the `CroCo_V2_ViTLarge_BaseDecoder` pretrained model (or update the corresponding line in `demo.py`)
|
80 |
+
```bash
|
81 |
+
python demo.py
|
82 |
+
```
|
83 |
+
|
84 |
+
## Interactive demonstration of cross-view completion reconstruction on the Habitat simulator
|
85 |
+
|
86 |
+
First download the test scene from Habitat:
|
87 |
+
```bash
|
88 |
+
python -m habitat_sim.utils.datasets_download --uids habitat_test_scenes --data-path habitat-sim-data/
|
89 |
+
```
|
90 |
+
|
91 |
+
Then, run the Notebook demo `interactive_demo.ipynb`.
|
92 |
+
|
93 |
+
In this demo, you should be able to sample a random reference viewpoint from an [Habitat](https://github.com/facebookresearch/habitat-sim) test scene. Use the sliders to change viewpoint and select a masked target view to reconstruct using CroCo.
|
94 |
+

|
95 |
+
|
96 |
+
## Pre-training
|
97 |
+
|
98 |
+
### CroCo
|
99 |
+
|
100 |
+
To pre-train CroCo, please first generate the pre-training data from the Habitat simulator, following the instructions in [datasets/habitat_sim/README.MD](datasets/habitat_sim/README.MD) and then run the following command:
|
101 |
+
```
|
102 |
+
torchrun --nproc_per_node=4 pretrain.py --output_dir ./output/pretraining/
|
103 |
+
```
|
104 |
+
|
105 |
+
Our CroCo pre-training was launched on a single server with 4 GPUs.
|
106 |
+
It should take around 10 days with A100 or 15 days with V100 to do the 400 pre-training epochs, but decent performances are obtained earlier in training.
|
107 |
+
Note that, while the code contains the same scaling rule of the learning rate as MAE when changing the effective batch size, we did not experimented if it is valid in our case.
|
108 |
+
The first run can take a few minutes to start, to parse all available pre-training pairs.
|
109 |
+
|
110 |
+
### CroCo v2
|
111 |
+
|
112 |
+
For CroCo v2 pre-training, in addition to the generation of the pre-training data from the Habitat simulator above, please pre-extract the crops from the real datasets following the instructions in [datasets/crops/README.MD](datasets/crops/README.MD).
|
113 |
+
Then, run the following command for the largest model (ViT-L encoder, Base decoder):
|
114 |
+
```
|
115 |
+
torchrun --nproc_per_node=8 pretrain.py --model "CroCoNet(enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_num_heads=12, dec_depth=12, pos_embed='RoPE100')" --dataset "habitat_release+ARKitScenes+MegaDepth+3DStreetView+IndoorVL" --warmup_epochs 12 --max_epoch 125 --epochs 250 --amp 0 --keep_freq 5 --output_dir ./output/pretraining_crocov2/
|
116 |
+
```
|
117 |
+
|
118 |
+
Our CroCo v2 pre-training was launched on a single server with 8 GPUs for the largest model, and on a single server with 4 GPUs for the smaller ones, keeping a batch size of 64 per gpu in all cases.
|
119 |
+
The largest model should take around 12 days on A100.
|
120 |
+
Note that, while the code contains the same scaling rule of the learning rate as MAE when changing the effective batch size, we did not experimented if it is valid in our case.
|
121 |
+
|
122 |
+
## Stereo matching and Optical flow downstream tasks
|
123 |
+
|
124 |
+
For CroCo-Stereo and CroCo-Flow, please refer to [stereoflow/README.MD](stereoflow/README.MD).
|
demo/dust3r/croco/assets/Chateau1.png
ADDED
![]() |
demo/dust3r/croco/assets/Chateau2.png
ADDED
![]() |
demo/dust3r/croco/assets/arch.jpg
ADDED
![]() |
demo/dust3r/croco/croco-stereo-flow-demo.ipynb
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"id": "9bca0f41",
|
6 |
+
"metadata": {},
|
7 |
+
"source": [
|
8 |
+
"# Simple inference example with CroCo-Stereo or CroCo-Flow"
|
9 |
+
]
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"cell_type": "code",
|
13 |
+
"execution_count": null,
|
14 |
+
"id": "80653ef7",
|
15 |
+
"metadata": {},
|
16 |
+
"outputs": [],
|
17 |
+
"source": [
|
18 |
+
"# Copyright (C) 2022-present Naver Corporation. All rights reserved.\n",
|
19 |
+
"# Licensed under CC BY-NC-SA 4.0 (non-commercial use only)."
|
20 |
+
]
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"cell_type": "markdown",
|
24 |
+
"id": "4f033862",
|
25 |
+
"metadata": {},
|
26 |
+
"source": [
|
27 |
+
"First download the model(s) of your choice by running\n",
|
28 |
+
"```\n",
|
29 |
+
"bash stereoflow/download_model.sh crocostereo.pth\n",
|
30 |
+
"bash stereoflow/download_model.sh crocoflow.pth\n",
|
31 |
+
"```"
|
32 |
+
]
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"cell_type": "code",
|
36 |
+
"execution_count": null,
|
37 |
+
"id": "1fb2e392",
|
38 |
+
"metadata": {},
|
39 |
+
"outputs": [],
|
40 |
+
"source": [
|
41 |
+
"import torch\n",
|
42 |
+
"use_gpu = torch.cuda.is_available() and torch.cuda.device_count()>0\n",
|
43 |
+
"device = torch.device('cuda:0' if use_gpu else 'cpu')\n",
|
44 |
+
"import matplotlib.pylab as plt"
|
45 |
+
]
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"cell_type": "code",
|
49 |
+
"execution_count": null,
|
50 |
+
"id": "e0e25d77",
|
51 |
+
"metadata": {},
|
52 |
+
"outputs": [],
|
53 |
+
"source": [
|
54 |
+
"from stereoflow.test import _load_model_and_criterion\n",
|
55 |
+
"from stereoflow.engine import tiled_pred\n",
|
56 |
+
"from stereoflow.datasets_stereo import img_to_tensor, vis_disparity\n",
|
57 |
+
"from stereoflow.datasets_flow import flowToColor\n",
|
58 |
+
"tile_overlap=0.7 # recommended value, higher value can be slightly better but slower"
|
59 |
+
]
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"cell_type": "markdown",
|
63 |
+
"id": "86a921f5",
|
64 |
+
"metadata": {},
|
65 |
+
"source": [
|
66 |
+
"### CroCo-Stereo example"
|
67 |
+
]
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"cell_type": "code",
|
71 |
+
"execution_count": null,
|
72 |
+
"id": "64e483cb",
|
73 |
+
"metadata": {},
|
74 |
+
"outputs": [],
|
75 |
+
"source": [
|
76 |
+
"image1 = np.asarray(Image.open('<path_to_left_image>'))\n",
|
77 |
+
"image2 = np.asarray(Image.open('<path_to_right_image>'))"
|
78 |
+
]
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"cell_type": "code",
|
82 |
+
"execution_count": null,
|
83 |
+
"id": "f0d04303",
|
84 |
+
"metadata": {},
|
85 |
+
"outputs": [],
|
86 |
+
"source": [
|
87 |
+
"model, _, cropsize, with_conf, task, tile_conf_mode = _load_model_and_criterion('stereoflow_models/crocostereo.pth', None, device)\n"
|
88 |
+
]
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"cell_type": "code",
|
92 |
+
"execution_count": null,
|
93 |
+
"id": "47dc14b5",
|
94 |
+
"metadata": {},
|
95 |
+
"outputs": [],
|
96 |
+
"source": [
|
97 |
+
"im1 = img_to_tensor(image1).to(device).unsqueeze(0)\n",
|
98 |
+
"im2 = img_to_tensor(image2).to(device).unsqueeze(0)\n",
|
99 |
+
"with torch.inference_mode():\n",
|
100 |
+
" pred, _, _ = tiled_pred(model, None, im1, im2, None, conf_mode=tile_conf_mode, overlap=tile_overlap, crop=cropsize, with_conf=with_conf, return_time=False)\n",
|
101 |
+
"pred = pred.squeeze(0).squeeze(0).cpu().numpy()"
|
102 |
+
]
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"cell_type": "code",
|
106 |
+
"execution_count": null,
|
107 |
+
"id": "583b9f16",
|
108 |
+
"metadata": {},
|
109 |
+
"outputs": [],
|
110 |
+
"source": [
|
111 |
+
"plt.imshow(vis_disparity(pred))\n",
|
112 |
+
"plt.axis('off')"
|
113 |
+
]
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"cell_type": "markdown",
|
117 |
+
"id": "d2df5d70",
|
118 |
+
"metadata": {},
|
119 |
+
"source": [
|
120 |
+
"### CroCo-Flow example"
|
121 |
+
]
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"cell_type": "code",
|
125 |
+
"execution_count": null,
|
126 |
+
"id": "9ee257a7",
|
127 |
+
"metadata": {},
|
128 |
+
"outputs": [],
|
129 |
+
"source": [
|
130 |
+
"image1 = np.asarray(Image.open('<path_to_first_image>'))\n",
|
131 |
+
"image2 = np.asarray(Image.open('<path_to_second_image>'))"
|
132 |
+
]
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"cell_type": "code",
|
136 |
+
"execution_count": null,
|
137 |
+
"id": "d5edccf0",
|
138 |
+
"metadata": {},
|
139 |
+
"outputs": [],
|
140 |
+
"source": [
|
141 |
+
"model, _, cropsize, with_conf, task, tile_conf_mode = _load_model_and_criterion('stereoflow_models/crocoflow.pth', None, device)\n"
|
142 |
+
]
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"cell_type": "code",
|
146 |
+
"execution_count": null,
|
147 |
+
"id": "b19692c3",
|
148 |
+
"metadata": {},
|
149 |
+
"outputs": [],
|
150 |
+
"source": [
|
151 |
+
"im1 = img_to_tensor(image1).to(device).unsqueeze(0)\n",
|
152 |
+
"im2 = img_to_tensor(image2).to(device).unsqueeze(0)\n",
|
153 |
+
"with torch.inference_mode():\n",
|
154 |
+
" pred, _, _ = tiled_pred(model, None, im1, im2, None, conf_mode=tile_conf_mode, overlap=tile_overlap, crop=cropsize, with_conf=with_conf, return_time=False)\n",
|
155 |
+
"pred = pred.squeeze(0).permute(1,2,0).cpu().numpy()"
|
156 |
+
]
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"cell_type": "code",
|
160 |
+
"execution_count": null,
|
161 |
+
"id": "26f79db3",
|
162 |
+
"metadata": {},
|
163 |
+
"outputs": [],
|
164 |
+
"source": [
|
165 |
+
"plt.imshow(flowToColor(pred))\n",
|
166 |
+
"plt.axis('off')"
|
167 |
+
]
|
168 |
+
}
|
169 |
+
],
|
170 |
+
"metadata": {
|
171 |
+
"kernelspec": {
|
172 |
+
"display_name": "Python 3 (ipykernel)",
|
173 |
+
"language": "python",
|
174 |
+
"name": "python3"
|
175 |
+
},
|
176 |
+
"language_info": {
|
177 |
+
"codemirror_mode": {
|
178 |
+
"name": "ipython",
|
179 |
+
"version": 3
|
180 |
+
},
|
181 |
+
"file_extension": ".py",
|
182 |
+
"mimetype": "text/x-python",
|
183 |
+
"name": "python",
|
184 |
+
"nbconvert_exporter": "python",
|
185 |
+
"pygments_lexer": "ipython3",
|
186 |
+
"version": "3.9.7"
|
187 |
+
}
|
188 |
+
},
|
189 |
+
"nbformat": 4,
|
190 |
+
"nbformat_minor": 5
|
191 |
+
}
|
demo/dust3r/croco/datasets/__init__.py
ADDED
File without changes
|
demo/dust3r/croco/datasets/crops/README.MD
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Generation of crops from the real datasets
|
2 |
+
|
3 |
+
The instructions below allow to generate the crops used for pre-training CroCo v2 from the following real-world datasets: ARKitScenes, MegaDepth, 3DStreetView and IndoorVL.
|
4 |
+
|
5 |
+
### Download the metadata of the crops to generate
|
6 |
+
|
7 |
+
First, download the metadata and put them in `./data/`:
|
8 |
+
```
|
9 |
+
mkdir -p data
|
10 |
+
cd data/
|
11 |
+
wget https://download.europe.naverlabs.com/ComputerVision/CroCo/data/crop_metadata.zip
|
12 |
+
unzip crop_metadata.zip
|
13 |
+
rm crop_metadata.zip
|
14 |
+
cd ..
|
15 |
+
```
|
16 |
+
|
17 |
+
### Prepare the original datasets
|
18 |
+
|
19 |
+
Second, download the original datasets in `./data/original_datasets/`.
|
20 |
+
```
|
21 |
+
mkdir -p data/original_datasets
|
22 |
+
```
|
23 |
+
|
24 |
+
##### ARKitScenes
|
25 |
+
|
26 |
+
Download the `raw` dataset from https://github.com/apple/ARKitScenes/blob/main/DATA.md and put it in `./data/original_datasets/ARKitScenes/`.
|
27 |
+
The resulting file structure should be like:
|
28 |
+
```
|
29 |
+
./data/original_datasets/ARKitScenes/
|
30 |
+
└───Training
|
31 |
+
└───40753679
|
32 |
+
│ │ ultrawide
|
33 |
+
│ │ ...
|
34 |
+
└───40753686
|
35 |
+
│
|
36 |
+
...
|
37 |
+
```
|
38 |
+
|
39 |
+
##### MegaDepth
|
40 |
+
|
41 |
+
Download `MegaDepth v1 Dataset` from https://www.cs.cornell.edu/projects/megadepth/ and put it in `./data/original_datasets/MegaDepth/`.
|
42 |
+
The resulting file structure should be like:
|
43 |
+
|
44 |
+
```
|
45 |
+
./data/original_datasets/MegaDepth/
|
46 |
+
└───0000
|
47 |
+
│ └───images
|
48 |
+
│ │ │ 1000557903_87fa96b8a4_o.jpg
|
49 |
+
│ │ └ ...
|
50 |
+
│ └─── ...
|
51 |
+
└───0001
|
52 |
+
│ │
|
53 |
+
│ └ ...
|
54 |
+
└─── ...
|
55 |
+
```
|
56 |
+
|
57 |
+
##### 3DStreetView
|
58 |
+
|
59 |
+
Download `3D_Street_View` dataset from https://github.com/amir32002/3D_Street_View and put it in `./data/original_datasets/3DStreetView/`.
|
60 |
+
The resulting file structure should be like:
|
61 |
+
|
62 |
+
```
|
63 |
+
./data/original_datasets/3DStreetView/
|
64 |
+
└───dataset_aligned
|
65 |
+
│ └───0002
|
66 |
+
│ │ │ 0000002_0000001_0000002_0000001.jpg
|
67 |
+
│ │ └ ...
|
68 |
+
│ └─── ...
|
69 |
+
└───dataset_unaligned
|
70 |
+
│ └───0003
|
71 |
+
│ │ │ 0000003_0000001_0000002_0000001.jpg
|
72 |
+
│ │ └ ...
|
73 |
+
│ └─── ...
|
74 |
+
```
|
75 |
+
|
76 |
+
##### IndoorVL
|
77 |
+
|
78 |
+
Download the `IndoorVL` datasets using [Kapture](https://github.com/naver/kapture).
|
79 |
+
|
80 |
+
```
|
81 |
+
pip install kapture
|
82 |
+
mkdir -p ./data/original_datasets/IndoorVL
|
83 |
+
cd ./data/original_datasets/IndoorVL
|
84 |
+
kapture_download_dataset.py update
|
85 |
+
kapture_download_dataset.py install "HyundaiDepartmentStore_*"
|
86 |
+
kapture_download_dataset.py install "GangnamStation_*"
|
87 |
+
cd -
|
88 |
+
```
|
89 |
+
|
90 |
+
### Extract the crops
|
91 |
+
|
92 |
+
Now, extract the crops for each of the dataset:
|
93 |
+
```
|
94 |
+
for dataset in ARKitScenes MegaDepth 3DStreetView IndoorVL;
|
95 |
+
do
|
96 |
+
python3 datasets/crops/extract_crops_from_images.py --crops ./data/crop_metadata/${dataset}/crops_release.txt --root-dir ./data/original_datasets/${dataset}/ --output-dir ./data/${dataset}_crops/ --imsize 256 --nthread 8 --max-subdir-levels 5 --ideal-number-pairs-in-dir 500;
|
97 |
+
done
|
98 |
+
```
|
99 |
+
|
100 |
+
##### Note for IndoorVL
|
101 |
+
|
102 |
+
Due to some legal issues, we can only release 144,228 pairs out of the 1,593,689 pairs used in the paper.
|
103 |
+
To account for it in terms of number of pre-training iterations, the pre-training command in this repository uses 125 training epochs including 12 warm-up epochs and learning rate cosine schedule of 250, instead of 100, 10 and 200 respectively.
|
104 |
+
The impact on the performance is negligible.
|
demo/dust3r/croco/datasets/crops/extract_crops_from_images.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
#
|
4 |
+
# --------------------------------------------------------
|
5 |
+
# Extracting crops for pre-training
|
6 |
+
# --------------------------------------------------------
|
7 |
+
|
8 |
+
import os
|
9 |
+
import argparse
|
10 |
+
from tqdm import tqdm
|
11 |
+
from PIL import Image
|
12 |
+
import functools
|
13 |
+
from multiprocessing import Pool
|
14 |
+
import math
|
15 |
+
|
16 |
+
|
17 |
+
def arg_parser():
|
18 |
+
parser = argparse.ArgumentParser('Generate cropped image pairs from image crop list')
|
19 |
+
|
20 |
+
parser.add_argument('--crops', type=str, required=True, help='crop file')
|
21 |
+
parser.add_argument('--root-dir', type=str, required=True, help='root directory')
|
22 |
+
parser.add_argument('--output-dir', type=str, required=True, help='output directory')
|
23 |
+
parser.add_argument('--imsize', type=int, default=256, help='size of the crops')
|
24 |
+
parser.add_argument('--nthread', type=int, required=True, help='number of simultaneous threads')
|
25 |
+
parser.add_argument('--max-subdir-levels', type=int, default=5, help='maximum number of subdirectories')
|
26 |
+
parser.add_argument('--ideal-number-pairs-in-dir', type=int, default=500, help='number of pairs stored in a dir')
|
27 |
+
return parser
|
28 |
+
|
29 |
+
|
30 |
+
def main(args):
|
31 |
+
listing_path = os.path.join(args.output_dir, 'listing.txt')
|
32 |
+
|
33 |
+
print(f'Loading list of crops ... ({args.nthread} threads)')
|
34 |
+
crops, num_crops_to_generate = load_crop_file(args.crops)
|
35 |
+
|
36 |
+
print(f'Preparing jobs ({len(crops)} candidate image pairs)...')
|
37 |
+
num_levels = min(math.ceil(math.log(num_crops_to_generate, args.ideal_number_pairs_in_dir)), args.max_subdir_levels)
|
38 |
+
num_pairs_in_dir = math.ceil(num_crops_to_generate ** (1/num_levels))
|
39 |
+
|
40 |
+
jobs = prepare_jobs(crops, num_levels, num_pairs_in_dir)
|
41 |
+
del crops
|
42 |
+
|
43 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
44 |
+
mmap = Pool(args.nthread).imap_unordered if args.nthread > 1 else map
|
45 |
+
call = functools.partial(save_image_crops, args)
|
46 |
+
|
47 |
+
print(f"Generating cropped images to {args.output_dir} ...")
|
48 |
+
with open(listing_path, 'w') as listing:
|
49 |
+
listing.write('# pair_path\n')
|
50 |
+
for results in tqdm(mmap(call, jobs), total=len(jobs)):
|
51 |
+
for path in results:
|
52 |
+
listing.write(f'{path}\n')
|
53 |
+
print('Finished writing listing to', listing_path)
|
54 |
+
|
55 |
+
|
56 |
+
def load_crop_file(path):
|
57 |
+
data = open(path).read().splitlines()
|
58 |
+
pairs = []
|
59 |
+
num_crops_to_generate = 0
|
60 |
+
for line in tqdm(data):
|
61 |
+
if line.startswith('#'):
|
62 |
+
continue
|
63 |
+
line = line.split(', ')
|
64 |
+
if len(line) < 8:
|
65 |
+
img1, img2, rotation = line
|
66 |
+
pairs.append((img1, img2, int(rotation), []))
|
67 |
+
else:
|
68 |
+
l1, r1, t1, b1, l2, r2, t2, b2 = map(int, line)
|
69 |
+
rect1, rect2 = (l1, t1, r1, b1), (l2, t2, r2, b2)
|
70 |
+
pairs[-1][-1].append((rect1, rect2))
|
71 |
+
num_crops_to_generate += 1
|
72 |
+
return pairs, num_crops_to_generate
|
73 |
+
|
74 |
+
|
75 |
+
def prepare_jobs(pairs, num_levels, num_pairs_in_dir):
|
76 |
+
jobs = []
|
77 |
+
powers = [num_pairs_in_dir**level for level in reversed(range(num_levels))]
|
78 |
+
|
79 |
+
def get_path(idx):
|
80 |
+
idx_array = []
|
81 |
+
d = idx
|
82 |
+
for level in range(num_levels - 1):
|
83 |
+
idx_array.append(idx // powers[level])
|
84 |
+
idx = idx % powers[level]
|
85 |
+
idx_array.append(d)
|
86 |
+
return '/'.join(map(lambda x: hex(x)[2:], idx_array))
|
87 |
+
|
88 |
+
idx = 0
|
89 |
+
for pair_data in tqdm(pairs):
|
90 |
+
img1, img2, rotation, crops = pair_data
|
91 |
+
if -60 <= rotation and rotation <= 60:
|
92 |
+
rotation = 0 # most likely not a true rotation
|
93 |
+
paths = [get_path(idx + k) for k in range(len(crops))]
|
94 |
+
idx += len(crops)
|
95 |
+
jobs.append(((img1, img2), rotation, crops, paths))
|
96 |
+
return jobs
|
97 |
+
|
98 |
+
|
99 |
+
def load_image(path):
|
100 |
+
try:
|
101 |
+
return Image.open(path).convert('RGB')
|
102 |
+
except Exception as e:
|
103 |
+
print('skipping', path, e)
|
104 |
+
raise OSError()
|
105 |
+
|
106 |
+
|
107 |
+
def save_image_crops(args, data):
|
108 |
+
# load images
|
109 |
+
img_pair, rot, crops, paths = data
|
110 |
+
try:
|
111 |
+
img1, img2 = [load_image(os.path.join(args.root_dir, impath)) for impath in img_pair]
|
112 |
+
except OSError as e:
|
113 |
+
return []
|
114 |
+
|
115 |
+
def area(sz):
|
116 |
+
return sz[0] * sz[1]
|
117 |
+
|
118 |
+
tgt_size = (args.imsize, args.imsize)
|
119 |
+
|
120 |
+
def prepare_crop(img, rect, rot=0):
|
121 |
+
# actual crop
|
122 |
+
img = img.crop(rect)
|
123 |
+
|
124 |
+
# resize to desired size
|
125 |
+
interp = Image.Resampling.LANCZOS if area(img.size) > 4*area(tgt_size) else Image.Resampling.BICUBIC
|
126 |
+
img = img.resize(tgt_size, resample=interp)
|
127 |
+
|
128 |
+
# rotate the image
|
129 |
+
rot90 = (round(rot/90) % 4) * 90
|
130 |
+
if rot90 == 90:
|
131 |
+
img = img.transpose(Image.Transpose.ROTATE_90)
|
132 |
+
elif rot90 == 180:
|
133 |
+
img = img.transpose(Image.Transpose.ROTATE_180)
|
134 |
+
elif rot90 == 270:
|
135 |
+
img = img.transpose(Image.Transpose.ROTATE_270)
|
136 |
+
return img
|
137 |
+
|
138 |
+
results = []
|
139 |
+
for (rect1, rect2), path in zip(crops, paths):
|
140 |
+
crop1 = prepare_crop(img1, rect1)
|
141 |
+
crop2 = prepare_crop(img2, rect2, rot)
|
142 |
+
|
143 |
+
fullpath1 = os.path.join(args.output_dir, path+'_1.jpg')
|
144 |
+
fullpath2 = os.path.join(args.output_dir, path+'_2.jpg')
|
145 |
+
os.makedirs(os.path.dirname(fullpath1), exist_ok=True)
|
146 |
+
|
147 |
+
assert not os.path.isfile(fullpath1), fullpath1
|
148 |
+
assert not os.path.isfile(fullpath2), fullpath2
|
149 |
+
crop1.save(fullpath1)
|
150 |
+
crop2.save(fullpath2)
|
151 |
+
results.append(path)
|
152 |
+
|
153 |
+
return results
|
154 |
+
|
155 |
+
|
156 |
+
if __name__ == '__main__':
|
157 |
+
args = arg_parser().parse_args()
|
158 |
+
main(args)
|
159 |
+
|
demo/dust3r/croco/datasets/habitat_sim/README.MD
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Generation of synthetic image pairs using Habitat-Sim
|
2 |
+
|
3 |
+
These instructions allow to generate pre-training pairs from the Habitat simulator.
|
4 |
+
As we did not save metadata of the pairs used in the original paper, they are not strictly the same, but these data use the same setting and are equivalent.
|
5 |
+
|
6 |
+
### Download Habitat-Sim scenes
|
7 |
+
Download Habitat-Sim scenes:
|
8 |
+
- Download links can be found here: https://github.com/facebookresearch/habitat-sim/blob/main/DATASETS.md
|
9 |
+
- We used scenes from the HM3D, habitat-test-scenes, Replica, ReplicaCad and ScanNet datasets.
|
10 |
+
- Please put the scenes under `./data/habitat-sim-data/scene_datasets/` following the structure below, or update manually paths in `paths.py`.
|
11 |
+
```
|
12 |
+
./data/
|
13 |
+
└──habitat-sim-data/
|
14 |
+
└──scene_datasets/
|
15 |
+
├──hm3d/
|
16 |
+
├──gibson/
|
17 |
+
├──habitat-test-scenes/
|
18 |
+
├──replica_cad_baked_lighting/
|
19 |
+
├──replica_cad/
|
20 |
+
├──ReplicaDataset/
|
21 |
+
└──scannet/
|
22 |
+
```
|
23 |
+
|
24 |
+
### Image pairs generation
|
25 |
+
We provide metadata to generate reproducible images pairs for pretraining and validation.
|
26 |
+
Experiments described in the paper used similar data, but whose generation was not reproducible at the time.
|
27 |
+
|
28 |
+
Specifications:
|
29 |
+
- 256x256 resolution images, with 60 degrees field of view .
|
30 |
+
- Up to 1000 image pairs per scene.
|
31 |
+
- Number of scenes considered/number of images pairs per dataset:
|
32 |
+
- Scannet: 1097 scenes / 985 209 pairs
|
33 |
+
- HM3D:
|
34 |
+
- hm3d/train: 800 / 800k pairs
|
35 |
+
- hm3d/val: 100 scenes / 100k pairs
|
36 |
+
- hm3d/minival: 10 scenes / 10k pairs
|
37 |
+
- habitat-test-scenes: 3 scenes / 3k pairs
|
38 |
+
- replica_cad_baked_lighting: 13 scenes / 13k pairs
|
39 |
+
|
40 |
+
- Scenes from hm3d/val and hm3d/minival pairs were not used for the pre-training but kept for validation purposes.
|
41 |
+
|
42 |
+
Download metadata and extract it:
|
43 |
+
```bash
|
44 |
+
mkdir -p data/habitat_release_metadata/
|
45 |
+
cd data/habitat_release_metadata/
|
46 |
+
wget https://download.europe.naverlabs.com/ComputerVision/CroCo/data/habitat_release_metadata/multiview_habitat_metadata.tar.gz
|
47 |
+
tar -xvf multiview_habitat_metadata.tar.gz
|
48 |
+
cd ../..
|
49 |
+
# Location of the metadata
|
50 |
+
METADATA_DIR="./data/habitat_release_metadata/multiview_habitat_metadata"
|
51 |
+
```
|
52 |
+
|
53 |
+
Generate image pairs from metadata:
|
54 |
+
- The following command will print a list of commandlines to generate image pairs for each scene:
|
55 |
+
```bash
|
56 |
+
# Target output directory
|
57 |
+
PAIRS_DATASET_DIR="./data/habitat_release/"
|
58 |
+
python datasets/habitat_sim/generate_from_metadata_files.py --input_dir=$METADATA_DIR --output_dir=$PAIRS_DATASET_DIR
|
59 |
+
```
|
60 |
+
- One can launch multiple of such commands in parallel e.g. using GNU Parallel:
|
61 |
+
```bash
|
62 |
+
python datasets/habitat_sim/generate_from_metadata_files.py --input_dir=$METADATA_DIR --output_dir=$PAIRS_DATASET_DIR | parallel -j 16
|
63 |
+
```
|
64 |
+
|
65 |
+
## Metadata generation
|
66 |
+
|
67 |
+
Image pairs were randomly sampled using the following commands, whose outputs contain randomness and are thus not exactly reproducible:
|
68 |
+
```bash
|
69 |
+
# Print commandlines to generate image pairs from the different scenes available.
|
70 |
+
PAIRS_DATASET_DIR=MY_CUSTOM_PATH
|
71 |
+
python datasets/habitat_sim/generate_multiview_images.py --list_commands --output_dir=$PAIRS_DATASET_DIR
|
72 |
+
|
73 |
+
# Once a dataset is generated, pack metadata files for reproducibility.
|
74 |
+
METADATA_DIR=MY_CUSTON_PATH
|
75 |
+
python datasets/habitat_sim/pack_metadata_files.py $PAIRS_DATASET_DIR $METADATA_DIR
|
76 |
+
```
|
demo/dust3r/croco/datasets/habitat_sim/__init__.py
ADDED
File without changes
|
demo/dust3r/croco/datasets/habitat_sim/generate_from_metadata.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
|
4 |
+
"""
|
5 |
+
Script to generate image pairs for a given scene reproducing poses provided in a metadata file.
|
6 |
+
"""
|
7 |
+
import os
|
8 |
+
from datasets.habitat_sim.multiview_habitat_sim_generator import MultiviewHabitatSimGenerator
|
9 |
+
from datasets.habitat_sim.paths import SCENES_DATASET
|
10 |
+
import argparse
|
11 |
+
import quaternion
|
12 |
+
import PIL.Image
|
13 |
+
import cv2
|
14 |
+
import json
|
15 |
+
from tqdm import tqdm
|
16 |
+
|
17 |
+
def generate_multiview_images_from_metadata(metadata_filename,
|
18 |
+
output_dir,
|
19 |
+
overload_params = dict(),
|
20 |
+
scene_datasets_paths=None,
|
21 |
+
exist_ok=False):
|
22 |
+
"""
|
23 |
+
Generate images from a metadata file for reproducibility purposes.
|
24 |
+
"""
|
25 |
+
# Reorder paths by decreasing label length, to avoid collisions when testing if a string by such label
|
26 |
+
if scene_datasets_paths is not None:
|
27 |
+
scene_datasets_paths = dict(sorted(scene_datasets_paths.items(), key= lambda x: len(x[0]), reverse=True))
|
28 |
+
|
29 |
+
with open(metadata_filename, 'r') as f:
|
30 |
+
input_metadata = json.load(f)
|
31 |
+
metadata = dict()
|
32 |
+
for key, value in input_metadata.items():
|
33 |
+
# Optionally replace some paths
|
34 |
+
if key in ("scene_dataset_config_file", "scene", "navmesh") and value != "":
|
35 |
+
if scene_datasets_paths is not None:
|
36 |
+
for dataset_label, dataset_path in scene_datasets_paths.items():
|
37 |
+
if value.startswith(dataset_label):
|
38 |
+
value = os.path.normpath(os.path.join(dataset_path, os.path.relpath(value, dataset_label)))
|
39 |
+
break
|
40 |
+
metadata[key] = value
|
41 |
+
|
42 |
+
# Overload some parameters
|
43 |
+
for key, value in overload_params.items():
|
44 |
+
metadata[key] = value
|
45 |
+
|
46 |
+
generation_entries = dict([(key, value) for key, value in metadata.items() if not (key in ('multiviews', 'output_dir', 'generate_depth'))])
|
47 |
+
generate_depth = metadata["generate_depth"]
|
48 |
+
|
49 |
+
os.makedirs(output_dir, exist_ok=exist_ok)
|
50 |
+
|
51 |
+
generator = MultiviewHabitatSimGenerator(**generation_entries)
|
52 |
+
|
53 |
+
# Generate views
|
54 |
+
for idx_label, data in tqdm(metadata['multiviews'].items()):
|
55 |
+
positions = data["positions"]
|
56 |
+
orientations = data["orientations"]
|
57 |
+
n = len(positions)
|
58 |
+
for oidx in range(n):
|
59 |
+
observation = generator.render_viewpoint(positions[oidx], quaternion.from_float_array(orientations[oidx]))
|
60 |
+
observation_label = f"{oidx + 1}" # Leonid is indexing starting from 1
|
61 |
+
# Color image saved using PIL
|
62 |
+
img = PIL.Image.fromarray(observation['color'][:,:,:3])
|
63 |
+
filename = os.path.join(output_dir, f"{idx_label}_{observation_label}.jpeg")
|
64 |
+
img.save(filename)
|
65 |
+
if generate_depth:
|
66 |
+
# Depth image as EXR file
|
67 |
+
filename = os.path.join(output_dir, f"{idx_label}_{observation_label}_depth.exr")
|
68 |
+
cv2.imwrite(filename, observation['depth'], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
|
69 |
+
# Camera parameters
|
70 |
+
camera_params = dict([(key, observation[key].tolist()) for key in ("camera_intrinsics", "R_cam2world", "t_cam2world")])
|
71 |
+
filename = os.path.join(output_dir, f"{idx_label}_{observation_label}_camera_params.json")
|
72 |
+
with open(filename, "w") as f:
|
73 |
+
json.dump(camera_params, f)
|
74 |
+
# Save metadata
|
75 |
+
with open(os.path.join(output_dir, "metadata.json"), "w") as f:
|
76 |
+
json.dump(metadata, f)
|
77 |
+
|
78 |
+
generator.close()
|
79 |
+
|
80 |
+
if __name__ == "__main__":
|
81 |
+
parser = argparse.ArgumentParser()
|
82 |
+
parser.add_argument("--metadata_filename", required=True)
|
83 |
+
parser.add_argument("--output_dir", required=True)
|
84 |
+
args = parser.parse_args()
|
85 |
+
|
86 |
+
generate_multiview_images_from_metadata(metadata_filename=args.metadata_filename,
|
87 |
+
output_dir=args.output_dir,
|
88 |
+
scene_datasets_paths=SCENES_DATASET,
|
89 |
+
overload_params=dict(),
|
90 |
+
exist_ok=True)
|
91 |
+
|
92 |
+
|
demo/dust3r/croco/datasets/habitat_sim/generate_from_metadata_files.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
|
4 |
+
"""
|
5 |
+
Script generating commandlines to generate image pairs from metadata files.
|
6 |
+
"""
|
7 |
+
import os
|
8 |
+
import glob
|
9 |
+
from tqdm import tqdm
|
10 |
+
import argparse
|
11 |
+
|
12 |
+
if __name__ == "__main__":
|
13 |
+
parser = argparse.ArgumentParser()
|
14 |
+
parser.add_argument("--input_dir", required=True)
|
15 |
+
parser.add_argument("--output_dir", required=True)
|
16 |
+
parser.add_argument("--prefix", default="", help="Commanline prefix, useful e.g. to setup environment.")
|
17 |
+
args = parser.parse_args()
|
18 |
+
|
19 |
+
input_metadata_filenames = glob.iglob(f"{args.input_dir}/**/metadata.json", recursive=True)
|
20 |
+
|
21 |
+
for metadata_filename in tqdm(input_metadata_filenames):
|
22 |
+
output_dir = os.path.join(args.output_dir, os.path.relpath(os.path.dirname(metadata_filename), args.input_dir))
|
23 |
+
# Do not process the scene if the metadata file already exists
|
24 |
+
if os.path.exists(os.path.join(output_dir, "metadata.json")):
|
25 |
+
continue
|
26 |
+
commandline = f"{args.prefix}python datasets/habitat_sim/generate_from_metadata.py --metadata_filename={metadata_filename} --output_dir={output_dir}"
|
27 |
+
print(commandline)
|
demo/dust3r/croco/datasets/habitat_sim/generate_multiview_images.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
|
4 |
+
import os
|
5 |
+
from tqdm import tqdm
|
6 |
+
import argparse
|
7 |
+
import PIL.Image
|
8 |
+
import numpy as np
|
9 |
+
import json
|
10 |
+
from datasets.habitat_sim.multiview_habitat_sim_generator import MultiviewHabitatSimGenerator, NoNaviguableSpaceError
|
11 |
+
from datasets.habitat_sim.paths import list_scenes_available
|
12 |
+
import cv2
|
13 |
+
import quaternion
|
14 |
+
import shutil
|
15 |
+
|
16 |
+
def generate_multiview_images_for_scene(scene_dataset_config_file,
|
17 |
+
scene,
|
18 |
+
navmesh,
|
19 |
+
output_dir,
|
20 |
+
views_count,
|
21 |
+
size,
|
22 |
+
exist_ok=False,
|
23 |
+
generate_depth=False,
|
24 |
+
**kwargs):
|
25 |
+
"""
|
26 |
+
Generate tuples of overlapping views for a given scene.
|
27 |
+
generate_depth: generate depth images and camera parameters.
|
28 |
+
"""
|
29 |
+
if os.path.exists(output_dir) and not exist_ok:
|
30 |
+
print(f"Scene {scene}: data already generated. Ignoring generation.")
|
31 |
+
return
|
32 |
+
try:
|
33 |
+
print(f"Scene {scene}: {size} multiview acquisitions to generate...")
|
34 |
+
os.makedirs(output_dir, exist_ok=exist_ok)
|
35 |
+
|
36 |
+
metadata_filename = os.path.join(output_dir, "metadata.json")
|
37 |
+
|
38 |
+
metadata_template = dict(scene_dataset_config_file=scene_dataset_config_file,
|
39 |
+
scene=scene,
|
40 |
+
navmesh=navmesh,
|
41 |
+
views_count=views_count,
|
42 |
+
size=size,
|
43 |
+
generate_depth=generate_depth,
|
44 |
+
**kwargs)
|
45 |
+
metadata_template["multiviews"] = dict()
|
46 |
+
|
47 |
+
if os.path.exists(metadata_filename):
|
48 |
+
print("Metadata file already exists:", metadata_filename)
|
49 |
+
print("Loading already generated metadata file...")
|
50 |
+
with open(metadata_filename, "r") as f:
|
51 |
+
metadata = json.load(f)
|
52 |
+
|
53 |
+
for key in metadata_template.keys():
|
54 |
+
if key != "multiviews":
|
55 |
+
assert metadata_template[key] == metadata[key], f"existing file is inconsistent with the input parameters:\nKey: {key}\nmetadata: {metadata[key]}\ntemplate: {metadata_template[key]}."
|
56 |
+
else:
|
57 |
+
print("No temporary file found. Starting generation from scratch...")
|
58 |
+
metadata = metadata_template
|
59 |
+
|
60 |
+
starting_id = len(metadata["multiviews"])
|
61 |
+
print(f"Starting generation from index {starting_id}/{size}...")
|
62 |
+
if starting_id >= size:
|
63 |
+
print("Generation already done.")
|
64 |
+
return
|
65 |
+
|
66 |
+
generator = MultiviewHabitatSimGenerator(scene_dataset_config_file=scene_dataset_config_file,
|
67 |
+
scene=scene,
|
68 |
+
navmesh=navmesh,
|
69 |
+
views_count = views_count,
|
70 |
+
size = size,
|
71 |
+
**kwargs)
|
72 |
+
|
73 |
+
for idx in tqdm(range(starting_id, size)):
|
74 |
+
# Generate / re-generate the observations
|
75 |
+
try:
|
76 |
+
data = generator[idx]
|
77 |
+
observations = data["observations"]
|
78 |
+
positions = data["positions"]
|
79 |
+
orientations = data["orientations"]
|
80 |
+
|
81 |
+
idx_label = f"{idx:08}"
|
82 |
+
for oidx, observation in enumerate(observations):
|
83 |
+
observation_label = f"{oidx + 1}" # Leonid is indexing starting from 1
|
84 |
+
# Color image saved using PIL
|
85 |
+
img = PIL.Image.fromarray(observation['color'][:,:,:3])
|
86 |
+
filename = os.path.join(output_dir, f"{idx_label}_{observation_label}.jpeg")
|
87 |
+
img.save(filename)
|
88 |
+
if generate_depth:
|
89 |
+
# Depth image as EXR file
|
90 |
+
filename = os.path.join(output_dir, f"{idx_label}_{observation_label}_depth.exr")
|
91 |
+
cv2.imwrite(filename, observation['depth'], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
|
92 |
+
# Camera parameters
|
93 |
+
camera_params = dict([(key, observation[key].tolist()) for key in ("camera_intrinsics", "R_cam2world", "t_cam2world")])
|
94 |
+
filename = os.path.join(output_dir, f"{idx_label}_{observation_label}_camera_params.json")
|
95 |
+
with open(filename, "w") as f:
|
96 |
+
json.dump(camera_params, f)
|
97 |
+
metadata["multiviews"][idx_label] = {"positions": positions.tolist(),
|
98 |
+
"orientations": orientations.tolist(),
|
99 |
+
"covisibility_ratios": data["covisibility_ratios"].tolist(),
|
100 |
+
"valid_fractions": data["valid_fractions"].tolist(),
|
101 |
+
"pairwise_visibility_ratios": data["pairwise_visibility_ratios"].tolist()}
|
102 |
+
except RecursionError:
|
103 |
+
print("Recursion error: unable to sample observations for this scene. We will stop there.")
|
104 |
+
break
|
105 |
+
|
106 |
+
# Regularly save a temporary metadata file, in case we need to restart the generation
|
107 |
+
if idx % 10 == 0:
|
108 |
+
with open(metadata_filename, "w") as f:
|
109 |
+
json.dump(metadata, f)
|
110 |
+
|
111 |
+
# Save metadata
|
112 |
+
with open(metadata_filename, "w") as f:
|
113 |
+
json.dump(metadata, f)
|
114 |
+
|
115 |
+
generator.close()
|
116 |
+
except NoNaviguableSpaceError:
|
117 |
+
pass
|
118 |
+
|
119 |
+
def create_commandline(scene_data, generate_depth, exist_ok=False):
|
120 |
+
"""
|
121 |
+
Create a commandline string to generate a scene.
|
122 |
+
"""
|
123 |
+
def my_formatting(val):
|
124 |
+
if val is None or val == "":
|
125 |
+
return '""'
|
126 |
+
else:
|
127 |
+
return val
|
128 |
+
commandline = f"""python {__file__} --scene {my_formatting(scene_data.scene)}
|
129 |
+
--scene_dataset_config_file {my_formatting(scene_data.scene_dataset_config_file)}
|
130 |
+
--navmesh {my_formatting(scene_data.navmesh)}
|
131 |
+
--output_dir {my_formatting(scene_data.output_dir)}
|
132 |
+
--generate_depth {int(generate_depth)}
|
133 |
+
--exist_ok {int(exist_ok)}
|
134 |
+
"""
|
135 |
+
commandline = " ".join(commandline.split())
|
136 |
+
return commandline
|
137 |
+
|
138 |
+
if __name__ == "__main__":
|
139 |
+
os.umask(2)
|
140 |
+
|
141 |
+
parser = argparse.ArgumentParser(description="""Example of use -- listing commands to generate data for scenes available:
|
142 |
+
> python datasets/habitat_sim/generate_multiview_habitat_images.py --list_commands
|
143 |
+
""")
|
144 |
+
|
145 |
+
parser.add_argument("--output_dir", type=str, required=True)
|
146 |
+
parser.add_argument("--list_commands", action='store_true', help="list commandlines to run if true")
|
147 |
+
parser.add_argument("--scene", type=str, default="")
|
148 |
+
parser.add_argument("--scene_dataset_config_file", type=str, default="")
|
149 |
+
parser.add_argument("--navmesh", type=str, default="")
|
150 |
+
|
151 |
+
parser.add_argument("--generate_depth", type=int, default=1)
|
152 |
+
parser.add_argument("--exist_ok", type=int, default=0)
|
153 |
+
|
154 |
+
kwargs = dict(resolution=(256,256), hfov=60, views_count = 2, size=1000)
|
155 |
+
|
156 |
+
args = parser.parse_args()
|
157 |
+
generate_depth=bool(args.generate_depth)
|
158 |
+
exist_ok = bool(args.exist_ok)
|
159 |
+
|
160 |
+
if args.list_commands:
|
161 |
+
# Listing scenes available...
|
162 |
+
scenes_data = list_scenes_available(base_output_dir=args.output_dir)
|
163 |
+
|
164 |
+
for scene_data in scenes_data:
|
165 |
+
print(create_commandline(scene_data, generate_depth=generate_depth, exist_ok=exist_ok))
|
166 |
+
else:
|
167 |
+
if args.scene == "" or args.output_dir == "":
|
168 |
+
print("Missing scene or output dir argument!")
|
169 |
+
print(parser.format_help())
|
170 |
+
else:
|
171 |
+
generate_multiview_images_for_scene(scene=args.scene,
|
172 |
+
scene_dataset_config_file = args.scene_dataset_config_file,
|
173 |
+
navmesh = args.navmesh,
|
174 |
+
output_dir = args.output_dir,
|
175 |
+
exist_ok=exist_ok,
|
176 |
+
generate_depth=generate_depth,
|
177 |
+
**kwargs)
|
demo/dust3r/croco/datasets/habitat_sim/multiview_habitat_sim_generator.py
ADDED
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
|
4 |
+
import os
|
5 |
+
import numpy as np
|
6 |
+
import quaternion
|
7 |
+
import habitat_sim
|
8 |
+
import json
|
9 |
+
from sklearn.neighbors import NearestNeighbors
|
10 |
+
import cv2
|
11 |
+
|
12 |
+
# OpenCV to habitat camera convention transformation
|
13 |
+
R_OPENCV2HABITAT = np.stack((habitat_sim.geo.RIGHT, -habitat_sim.geo.UP, habitat_sim.geo.FRONT), axis=0)
|
14 |
+
R_HABITAT2OPENCV = R_OPENCV2HABITAT.T
|
15 |
+
DEG2RAD = np.pi / 180
|
16 |
+
|
17 |
+
def compute_camera_intrinsics(height, width, hfov):
|
18 |
+
f = width/2 / np.tan(hfov/2 * np.pi/180)
|
19 |
+
cu, cv = width/2, height/2
|
20 |
+
return f, cu, cv
|
21 |
+
|
22 |
+
def compute_camera_pose_opencv_convention(camera_position, camera_orientation):
|
23 |
+
R_cam2world = quaternion.as_rotation_matrix(camera_orientation) @ R_OPENCV2HABITAT
|
24 |
+
t_cam2world = np.asarray(camera_position)
|
25 |
+
return R_cam2world, t_cam2world
|
26 |
+
|
27 |
+
def compute_pointmap(depthmap, hfov):
|
28 |
+
""" Compute a HxWx3 pointmap in camera frame from a HxW depth map."""
|
29 |
+
height, width = depthmap.shape
|
30 |
+
f, cu, cv = compute_camera_intrinsics(height, width, hfov)
|
31 |
+
# Cast depth map to point
|
32 |
+
z_cam = depthmap
|
33 |
+
u, v = np.meshgrid(range(width), range(height))
|
34 |
+
x_cam = (u - cu) / f * z_cam
|
35 |
+
y_cam = (v - cv) / f * z_cam
|
36 |
+
X_cam = np.stack((x_cam, y_cam, z_cam), axis=-1)
|
37 |
+
return X_cam
|
38 |
+
|
39 |
+
def compute_pointcloud(depthmap, hfov, camera_position, camera_rotation):
|
40 |
+
"""Return a 3D point cloud corresponding to valid pixels of the depth map"""
|
41 |
+
R_cam2world, t_cam2world = compute_camera_pose_opencv_convention(camera_position, camera_rotation)
|
42 |
+
|
43 |
+
X_cam = compute_pointmap(depthmap=depthmap, hfov=hfov)
|
44 |
+
valid_mask = (X_cam[:,:,2] != 0.0)
|
45 |
+
|
46 |
+
X_cam = X_cam.reshape(-1, 3)[valid_mask.flatten()]
|
47 |
+
X_world = X_cam @ R_cam2world.T + t_cam2world.reshape(1, 3)
|
48 |
+
return X_world
|
49 |
+
|
50 |
+
def compute_pointcloud_overlaps_scikit(pointcloud1, pointcloud2, distance_threshold, compute_symmetric=False):
|
51 |
+
"""
|
52 |
+
Compute 'overlapping' metrics based on a distance threshold between two point clouds.
|
53 |
+
"""
|
54 |
+
nbrs = NearestNeighbors(n_neighbors=1, algorithm = 'kd_tree').fit(pointcloud2)
|
55 |
+
distances, indices = nbrs.kneighbors(pointcloud1)
|
56 |
+
intersection1 = np.count_nonzero(distances.flatten() < distance_threshold)
|
57 |
+
|
58 |
+
data = {"intersection1": intersection1,
|
59 |
+
"size1": len(pointcloud1)}
|
60 |
+
if compute_symmetric:
|
61 |
+
nbrs = NearestNeighbors(n_neighbors=1, algorithm = 'kd_tree').fit(pointcloud1)
|
62 |
+
distances, indices = nbrs.kneighbors(pointcloud2)
|
63 |
+
intersection2 = np.count_nonzero(distances.flatten() < distance_threshold)
|
64 |
+
data["intersection2"] = intersection2
|
65 |
+
data["size2"] = len(pointcloud2)
|
66 |
+
|
67 |
+
return data
|
68 |
+
|
69 |
+
def _append_camera_parameters(observation, hfov, camera_location, camera_rotation):
|
70 |
+
"""
|
71 |
+
Add camera parameters to the observation dictionnary produced by Habitat-Sim
|
72 |
+
In-place modifications.
|
73 |
+
"""
|
74 |
+
R_cam2world, t_cam2world = compute_camera_pose_opencv_convention(camera_location, camera_rotation)
|
75 |
+
height, width = observation['depth'].shape
|
76 |
+
f, cu, cv = compute_camera_intrinsics(height, width, hfov)
|
77 |
+
K = np.asarray([[f, 0, cu],
|
78 |
+
[0, f, cv],
|
79 |
+
[0, 0, 1.0]])
|
80 |
+
observation["camera_intrinsics"] = K
|
81 |
+
observation["t_cam2world"] = t_cam2world
|
82 |
+
observation["R_cam2world"] = R_cam2world
|
83 |
+
|
84 |
+
def look_at(eye, center, up, return_cam2world=True):
|
85 |
+
"""
|
86 |
+
Return camera pose looking at a given center point.
|
87 |
+
Analogous of gluLookAt function, using OpenCV camera convention.
|
88 |
+
"""
|
89 |
+
z = center - eye
|
90 |
+
z /= np.linalg.norm(z, axis=-1, keepdims=True)
|
91 |
+
y = -up
|
92 |
+
y = y - np.sum(y * z, axis=-1, keepdims=True) * z
|
93 |
+
y /= np.linalg.norm(y, axis=-1, keepdims=True)
|
94 |
+
x = np.cross(y, z, axis=-1)
|
95 |
+
|
96 |
+
if return_cam2world:
|
97 |
+
R = np.stack((x, y, z), axis=-1)
|
98 |
+
t = eye
|
99 |
+
else:
|
100 |
+
# World to camera transformation
|
101 |
+
# Transposed matrix
|
102 |
+
R = np.stack((x, y, z), axis=-2)
|
103 |
+
t = - np.einsum('...ij, ...j', R, eye)
|
104 |
+
return R, t
|
105 |
+
|
106 |
+
def look_at_for_habitat(eye, center, up, return_cam2world=True):
|
107 |
+
R, t = look_at(eye, center, up)
|
108 |
+
orientation = quaternion.from_rotation_matrix(R @ R_OPENCV2HABITAT.T)
|
109 |
+
return orientation, t
|
110 |
+
|
111 |
+
def generate_orientation_noise(pan_range, tilt_range, roll_range):
|
112 |
+
return (quaternion.from_rotation_vector(np.random.uniform(*pan_range) * DEG2RAD * habitat_sim.geo.UP)
|
113 |
+
* quaternion.from_rotation_vector(np.random.uniform(*tilt_range) * DEG2RAD * habitat_sim.geo.RIGHT)
|
114 |
+
* quaternion.from_rotation_vector(np.random.uniform(*roll_range) * DEG2RAD * habitat_sim.geo.FRONT))
|
115 |
+
|
116 |
+
|
117 |
+
class NoNaviguableSpaceError(RuntimeError):
|
118 |
+
def __init__(self, *args):
|
119 |
+
super().__init__(*args)
|
120 |
+
|
121 |
+
class MultiviewHabitatSimGenerator:
|
122 |
+
def __init__(self,
|
123 |
+
scene,
|
124 |
+
navmesh,
|
125 |
+
scene_dataset_config_file,
|
126 |
+
resolution = (240, 320),
|
127 |
+
views_count=2,
|
128 |
+
hfov = 60,
|
129 |
+
gpu_id = 0,
|
130 |
+
size = 10000,
|
131 |
+
minimum_covisibility = 0.5,
|
132 |
+
transform = None):
|
133 |
+
self.scene = scene
|
134 |
+
self.navmesh = navmesh
|
135 |
+
self.scene_dataset_config_file = scene_dataset_config_file
|
136 |
+
self.resolution = resolution
|
137 |
+
self.views_count = views_count
|
138 |
+
assert(self.views_count >= 1)
|
139 |
+
self.hfov = hfov
|
140 |
+
self.gpu_id = gpu_id
|
141 |
+
self.size = size
|
142 |
+
self.transform = transform
|
143 |
+
|
144 |
+
# Noise added to camera orientation
|
145 |
+
self.pan_range = (-3, 3)
|
146 |
+
self.tilt_range = (-10, 10)
|
147 |
+
self.roll_range = (-5, 5)
|
148 |
+
|
149 |
+
# Height range to sample cameras
|
150 |
+
self.height_range = (1.2, 1.8)
|
151 |
+
|
152 |
+
# Random steps between the camera views
|
153 |
+
self.random_steps_count = 5
|
154 |
+
self.random_step_variance = 2.0
|
155 |
+
|
156 |
+
# Minimum fraction of the scene which should be valid (well defined depth)
|
157 |
+
self.minimum_valid_fraction = 0.7
|
158 |
+
|
159 |
+
# Distance threshold to see to select pairs
|
160 |
+
self.distance_threshold = 0.05
|
161 |
+
# Minimum IoU of a view point cloud with respect to the reference view to be kept.
|
162 |
+
self.minimum_covisibility = minimum_covisibility
|
163 |
+
|
164 |
+
# Maximum number of retries.
|
165 |
+
self.max_attempts_count = 100
|
166 |
+
|
167 |
+
self.seed = None
|
168 |
+
self._lazy_initialization()
|
169 |
+
|
170 |
+
def _lazy_initialization(self):
|
171 |
+
# Lazy random seeding and instantiation of the simulator to deal with multiprocessing properly
|
172 |
+
if self.seed == None:
|
173 |
+
# Re-seed numpy generator
|
174 |
+
np.random.seed()
|
175 |
+
self.seed = np.random.randint(2**32-1)
|
176 |
+
sim_cfg = habitat_sim.SimulatorConfiguration()
|
177 |
+
sim_cfg.scene_id = self.scene
|
178 |
+
if self.scene_dataset_config_file is not None and self.scene_dataset_config_file != "":
|
179 |
+
sim_cfg.scene_dataset_config_file = self.scene_dataset_config_file
|
180 |
+
sim_cfg.random_seed = self.seed
|
181 |
+
sim_cfg.load_semantic_mesh = False
|
182 |
+
sim_cfg.gpu_device_id = self.gpu_id
|
183 |
+
|
184 |
+
depth_sensor_spec = habitat_sim.CameraSensorSpec()
|
185 |
+
depth_sensor_spec.uuid = "depth"
|
186 |
+
depth_sensor_spec.sensor_type = habitat_sim.SensorType.DEPTH
|
187 |
+
depth_sensor_spec.resolution = self.resolution
|
188 |
+
depth_sensor_spec.hfov = self.hfov
|
189 |
+
depth_sensor_spec.position = [0.0, 0.0, 0]
|
190 |
+
depth_sensor_spec.orientation
|
191 |
+
|
192 |
+
rgb_sensor_spec = habitat_sim.CameraSensorSpec()
|
193 |
+
rgb_sensor_spec.uuid = "color"
|
194 |
+
rgb_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR
|
195 |
+
rgb_sensor_spec.resolution = self.resolution
|
196 |
+
rgb_sensor_spec.hfov = self.hfov
|
197 |
+
rgb_sensor_spec.position = [0.0, 0.0, 0]
|
198 |
+
agent_cfg = habitat_sim.agent.AgentConfiguration(sensor_specifications=[rgb_sensor_spec, depth_sensor_spec])
|
199 |
+
|
200 |
+
cfg = habitat_sim.Configuration(sim_cfg, [agent_cfg])
|
201 |
+
self.sim = habitat_sim.Simulator(cfg)
|
202 |
+
if self.navmesh is not None and self.navmesh != "":
|
203 |
+
# Use pre-computed navmesh when available (usually better than those generated automatically)
|
204 |
+
self.sim.pathfinder.load_nav_mesh(self.navmesh)
|
205 |
+
|
206 |
+
if not self.sim.pathfinder.is_loaded:
|
207 |
+
# Try to compute a navmesh
|
208 |
+
navmesh_settings = habitat_sim.NavMeshSettings()
|
209 |
+
navmesh_settings.set_defaults()
|
210 |
+
self.sim.recompute_navmesh(self.sim.pathfinder, navmesh_settings, True)
|
211 |
+
|
212 |
+
# Ensure that the navmesh is not empty
|
213 |
+
if not self.sim.pathfinder.is_loaded:
|
214 |
+
raise NoNaviguableSpaceError(f"No naviguable location (scene: {self.scene} -- navmesh: {self.navmesh})")
|
215 |
+
|
216 |
+
self.agent = self.sim.initialize_agent(agent_id=0)
|
217 |
+
|
218 |
+
def close(self):
|
219 |
+
self.sim.close()
|
220 |
+
|
221 |
+
def __del__(self):
|
222 |
+
self.sim.close()
|
223 |
+
|
224 |
+
def __len__(self):
|
225 |
+
return self.size
|
226 |
+
|
227 |
+
def sample_random_viewpoint(self):
|
228 |
+
""" Sample a random viewpoint using the navmesh """
|
229 |
+
nav_point = self.sim.pathfinder.get_random_navigable_point()
|
230 |
+
|
231 |
+
# Sample a random viewpoint height
|
232 |
+
viewpoint_height = np.random.uniform(*self.height_range)
|
233 |
+
viewpoint_position = nav_point + viewpoint_height * habitat_sim.geo.UP
|
234 |
+
viewpoint_orientation = quaternion.from_rotation_vector(np.random.uniform(0, 2 * np.pi) * habitat_sim.geo.UP) * generate_orientation_noise(self.pan_range, self.tilt_range, self.roll_range)
|
235 |
+
return viewpoint_position, viewpoint_orientation, nav_point
|
236 |
+
|
237 |
+
def sample_other_random_viewpoint(self, observed_point, nav_point):
|
238 |
+
""" Sample a random viewpoint close to an existing one, using the navmesh and a reference observed point."""
|
239 |
+
other_nav_point = nav_point
|
240 |
+
|
241 |
+
walk_directions = self.random_step_variance * np.asarray([1,0,1])
|
242 |
+
for i in range(self.random_steps_count):
|
243 |
+
temp = self.sim.pathfinder.snap_point(other_nav_point + walk_directions * np.random.normal(size=3))
|
244 |
+
# Snapping may return nan when it fails
|
245 |
+
if not np.isnan(temp[0]):
|
246 |
+
other_nav_point = temp
|
247 |
+
|
248 |
+
other_viewpoint_height = np.random.uniform(*self.height_range)
|
249 |
+
other_viewpoint_position = other_nav_point + other_viewpoint_height * habitat_sim.geo.UP
|
250 |
+
|
251 |
+
# Set viewing direction towards the central point
|
252 |
+
rotation, position = look_at_for_habitat(eye=other_viewpoint_position, center=observed_point, up=habitat_sim.geo.UP, return_cam2world=True)
|
253 |
+
rotation = rotation * generate_orientation_noise(self.pan_range, self.tilt_range, self.roll_range)
|
254 |
+
return position, rotation, other_nav_point
|
255 |
+
|
256 |
+
def is_other_pointcloud_overlapping(self, ref_pointcloud, other_pointcloud):
|
257 |
+
""" Check if a viewpoint is valid and overlaps significantly with a reference one. """
|
258 |
+
# Observation
|
259 |
+
pixels_count = self.resolution[0] * self.resolution[1]
|
260 |
+
valid_fraction = len(other_pointcloud) / pixels_count
|
261 |
+
assert valid_fraction <= 1.0 and valid_fraction >= 0.0
|
262 |
+
overlap = compute_pointcloud_overlaps_scikit(ref_pointcloud, other_pointcloud, self.distance_threshold, compute_symmetric=True)
|
263 |
+
covisibility = min(overlap["intersection1"] / pixels_count, overlap["intersection2"] / pixels_count)
|
264 |
+
is_valid = (valid_fraction >= self.minimum_valid_fraction) and (covisibility >= self.minimum_covisibility)
|
265 |
+
return is_valid, valid_fraction, covisibility
|
266 |
+
|
267 |
+
def is_other_viewpoint_overlapping(self, ref_pointcloud, observation, position, rotation):
|
268 |
+
""" Check if a viewpoint is valid and overlaps significantly with a reference one. """
|
269 |
+
# Observation
|
270 |
+
other_pointcloud = compute_pointcloud(observation['depth'], self.hfov, position, rotation)
|
271 |
+
return self.is_other_pointcloud_overlapping(ref_pointcloud, other_pointcloud)
|
272 |
+
|
273 |
+
def render_viewpoint(self, viewpoint_position, viewpoint_orientation):
|
274 |
+
agent_state = habitat_sim.AgentState()
|
275 |
+
agent_state.position = viewpoint_position
|
276 |
+
agent_state.rotation = viewpoint_orientation
|
277 |
+
self.agent.set_state(agent_state)
|
278 |
+
viewpoint_observations = self.sim.get_sensor_observations(agent_ids=0)
|
279 |
+
_append_camera_parameters(viewpoint_observations, self.hfov, viewpoint_position, viewpoint_orientation)
|
280 |
+
return viewpoint_observations
|
281 |
+
|
282 |
+
def __getitem__(self, useless_idx):
|
283 |
+
ref_position, ref_orientation, nav_point = self.sample_random_viewpoint()
|
284 |
+
ref_observations = self.render_viewpoint(ref_position, ref_orientation)
|
285 |
+
# Extract point cloud
|
286 |
+
ref_pointcloud = compute_pointcloud(depthmap=ref_observations['depth'], hfov=self.hfov,
|
287 |
+
camera_position=ref_position, camera_rotation=ref_orientation)
|
288 |
+
|
289 |
+
pixels_count = self.resolution[0] * self.resolution[1]
|
290 |
+
ref_valid_fraction = len(ref_pointcloud) / pixels_count
|
291 |
+
assert ref_valid_fraction <= 1.0 and ref_valid_fraction >= 0.0
|
292 |
+
if ref_valid_fraction < self.minimum_valid_fraction:
|
293 |
+
# This should produce a recursion error at some point when something is very wrong.
|
294 |
+
return self[0]
|
295 |
+
# Pick an reference observed point in the point cloud
|
296 |
+
observed_point = np.mean(ref_pointcloud, axis=0)
|
297 |
+
|
298 |
+
# Add the first image as reference
|
299 |
+
viewpoints_observations = [ref_observations]
|
300 |
+
viewpoints_covisibility = [ref_valid_fraction]
|
301 |
+
viewpoints_positions = [ref_position]
|
302 |
+
viewpoints_orientations = [quaternion.as_float_array(ref_orientation)]
|
303 |
+
viewpoints_clouds = [ref_pointcloud]
|
304 |
+
viewpoints_valid_fractions = [ref_valid_fraction]
|
305 |
+
|
306 |
+
for _ in range(self.views_count - 1):
|
307 |
+
# Generate an other viewpoint using some dummy random walk
|
308 |
+
successful_sampling = False
|
309 |
+
for sampling_attempt in range(self.max_attempts_count):
|
310 |
+
position, rotation, _ = self.sample_other_random_viewpoint(observed_point, nav_point)
|
311 |
+
# Observation
|
312 |
+
other_viewpoint_observations = self.render_viewpoint(position, rotation)
|
313 |
+
other_pointcloud = compute_pointcloud(other_viewpoint_observations['depth'], self.hfov, position, rotation)
|
314 |
+
|
315 |
+
is_valid, valid_fraction, covisibility = self.is_other_pointcloud_overlapping(ref_pointcloud, other_pointcloud)
|
316 |
+
if is_valid:
|
317 |
+
successful_sampling = True
|
318 |
+
break
|
319 |
+
if not successful_sampling:
|
320 |
+
print("WARNING: Maximum number of attempts reached.")
|
321 |
+
# Dirty hack, try using a novel original viewpoint
|
322 |
+
return self[0]
|
323 |
+
viewpoints_observations.append(other_viewpoint_observations)
|
324 |
+
viewpoints_covisibility.append(covisibility)
|
325 |
+
viewpoints_positions.append(position)
|
326 |
+
viewpoints_orientations.append(quaternion.as_float_array(rotation)) # WXYZ convention for the quaternion encoding.
|
327 |
+
viewpoints_clouds.append(other_pointcloud)
|
328 |
+
viewpoints_valid_fractions.append(valid_fraction)
|
329 |
+
|
330 |
+
# Estimate relations between all pairs of images
|
331 |
+
pairwise_visibility_ratios = np.ones((len(viewpoints_observations), len(viewpoints_observations)))
|
332 |
+
for i in range(len(viewpoints_observations)):
|
333 |
+
pairwise_visibility_ratios[i,i] = viewpoints_valid_fractions[i]
|
334 |
+
for j in range(i+1, len(viewpoints_observations)):
|
335 |
+
overlap = compute_pointcloud_overlaps_scikit(viewpoints_clouds[i], viewpoints_clouds[j], self.distance_threshold, compute_symmetric=True)
|
336 |
+
pairwise_visibility_ratios[i,j] = overlap['intersection1'] / pixels_count
|
337 |
+
pairwise_visibility_ratios[j,i] = overlap['intersection2'] / pixels_count
|
338 |
+
|
339 |
+
# IoU is relative to the image 0
|
340 |
+
data = {"observations": viewpoints_observations,
|
341 |
+
"positions": np.asarray(viewpoints_positions),
|
342 |
+
"orientations": np.asarray(viewpoints_orientations),
|
343 |
+
"covisibility_ratios": np.asarray(viewpoints_covisibility),
|
344 |
+
"valid_fractions": np.asarray(viewpoints_valid_fractions, dtype=float),
|
345 |
+
"pairwise_visibility_ratios": np.asarray(pairwise_visibility_ratios, dtype=float),
|
346 |
+
}
|
347 |
+
|
348 |
+
if self.transform is not None:
|
349 |
+
data = self.transform(data)
|
350 |
+
return data
|
351 |
+
|
352 |
+
def generate_random_spiral_trajectory(self, images_count = 100, max_radius=0.5, half_turns=5, use_constant_orientation=False):
|
353 |
+
"""
|
354 |
+
Return a list of images corresponding to a spiral trajectory from a random starting point.
|
355 |
+
Useful to generate nice visualisations.
|
356 |
+
Use an even number of half turns to get a nice "C1-continuous" loop effect
|
357 |
+
"""
|
358 |
+
ref_position, ref_orientation, navpoint = self.sample_random_viewpoint()
|
359 |
+
ref_observations = self.render_viewpoint(ref_position, ref_orientation)
|
360 |
+
ref_pointcloud = compute_pointcloud(depthmap=ref_observations['depth'], hfov=self.hfov,
|
361 |
+
camera_position=ref_position, camera_rotation=ref_orientation)
|
362 |
+
pixels_count = self.resolution[0] * self.resolution[1]
|
363 |
+
if len(ref_pointcloud) / pixels_count < self.minimum_valid_fraction:
|
364 |
+
# Dirty hack: ensure that the valid part of the image is significant
|
365 |
+
return self.generate_random_spiral_trajectory(images_count, max_radius, half_turns, use_constant_orientation)
|
366 |
+
|
367 |
+
# Pick an observed point in the point cloud
|
368 |
+
observed_point = np.mean(ref_pointcloud, axis=0)
|
369 |
+
ref_R, ref_t = compute_camera_pose_opencv_convention(ref_position, ref_orientation)
|
370 |
+
|
371 |
+
images = []
|
372 |
+
is_valid = []
|
373 |
+
# Spiral trajectory, use_constant orientation
|
374 |
+
for i, alpha in enumerate(np.linspace(0, 1, images_count)):
|
375 |
+
r = max_radius * np.abs(np.sin(alpha * np.pi)) # Increase then decrease the radius
|
376 |
+
theta = alpha * half_turns * np.pi
|
377 |
+
x = r * np.cos(theta)
|
378 |
+
y = r * np.sin(theta)
|
379 |
+
z = 0.0
|
380 |
+
position = ref_position + (ref_R @ np.asarray([x, y, z]).reshape(3,1)).flatten()
|
381 |
+
if use_constant_orientation:
|
382 |
+
orientation = ref_orientation
|
383 |
+
else:
|
384 |
+
# trajectory looking at a mean point in front of the ref observation
|
385 |
+
orientation, position = look_at_for_habitat(eye=position, center=observed_point, up=habitat_sim.geo.UP)
|
386 |
+
observations = self.render_viewpoint(position, orientation)
|
387 |
+
images.append(observations['color'][...,:3])
|
388 |
+
_is_valid, valid_fraction, iou = self.is_other_viewpoint_overlapping(ref_pointcloud, observations, position, orientation)
|
389 |
+
is_valid.append(_is_valid)
|
390 |
+
return images, np.all(is_valid)
|
demo/dust3r/croco/datasets/habitat_sim/pack_metadata_files.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
"""
|
4 |
+
Utility script to pack metadata files of the dataset in order to be able to re-generate it elsewhere.
|
5 |
+
"""
|
6 |
+
import os
|
7 |
+
import glob
|
8 |
+
from tqdm import tqdm
|
9 |
+
import shutil
|
10 |
+
import json
|
11 |
+
from datasets.habitat_sim.paths import *
|
12 |
+
import argparse
|
13 |
+
import collections
|
14 |
+
|
15 |
+
if __name__ == "__main__":
|
16 |
+
parser = argparse.ArgumentParser()
|
17 |
+
parser.add_argument("input_dir")
|
18 |
+
parser.add_argument("output_dir")
|
19 |
+
args = parser.parse_args()
|
20 |
+
|
21 |
+
input_dirname = args.input_dir
|
22 |
+
output_dirname = args.output_dir
|
23 |
+
|
24 |
+
input_metadata_filenames = glob.iglob(f"{input_dirname}/**/metadata.json", recursive=True)
|
25 |
+
|
26 |
+
images_count = collections.defaultdict(lambda : 0)
|
27 |
+
|
28 |
+
os.makedirs(output_dirname)
|
29 |
+
for input_filename in tqdm(input_metadata_filenames):
|
30 |
+
# Ignore empty files
|
31 |
+
with open(input_filename, "r") as f:
|
32 |
+
original_metadata = json.load(f)
|
33 |
+
if "multiviews" not in original_metadata or len(original_metadata["multiviews"]) == 0:
|
34 |
+
print("No views in", input_filename)
|
35 |
+
continue
|
36 |
+
|
37 |
+
relpath = os.path.relpath(input_filename, input_dirname)
|
38 |
+
print(relpath)
|
39 |
+
|
40 |
+
# Copy metadata, while replacing scene paths by generic keys depending on the dataset, for portability.
|
41 |
+
# Data paths are sorted by decreasing length to avoid potential bugs due to paths starting by the same string pattern.
|
42 |
+
scenes_dataset_paths = dict(sorted(SCENES_DATASET.items(), key=lambda x: len(x[1]), reverse=True))
|
43 |
+
metadata = dict()
|
44 |
+
for key, value in original_metadata.items():
|
45 |
+
if key in ("scene_dataset_config_file", "scene", "navmesh") and value != "":
|
46 |
+
known_path = False
|
47 |
+
for dataset, dataset_path in scenes_dataset_paths.items():
|
48 |
+
if value.startswith(dataset_path):
|
49 |
+
value = os.path.join(dataset, os.path.relpath(value, dataset_path))
|
50 |
+
known_path = True
|
51 |
+
break
|
52 |
+
if not known_path:
|
53 |
+
raise KeyError("Unknown path:" + value)
|
54 |
+
metadata[key] = value
|
55 |
+
|
56 |
+
# Compile some general statistics while packing data
|
57 |
+
scene_split = metadata["scene"].split("/")
|
58 |
+
upper_level = "/".join(scene_split[:2]) if scene_split[0] == "hm3d" else scene_split[0]
|
59 |
+
images_count[upper_level] += len(metadata["multiviews"])
|
60 |
+
|
61 |
+
output_filename = os.path.join(output_dirname, relpath)
|
62 |
+
os.makedirs(os.path.dirname(output_filename), exist_ok=True)
|
63 |
+
with open(output_filename, "w") as f:
|
64 |
+
json.dump(metadata, f)
|
65 |
+
|
66 |
+
# Print statistics
|
67 |
+
print("Images count:")
|
68 |
+
for upper_level, count in images_count.items():
|
69 |
+
print(f"- {upper_level}: {count}")
|
demo/dust3r/croco/datasets/habitat_sim/paths.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
|
4 |
+
"""
|
5 |
+
Paths to Habitat-Sim scenes
|
6 |
+
"""
|
7 |
+
|
8 |
+
import os
|
9 |
+
import json
|
10 |
+
import collections
|
11 |
+
from tqdm import tqdm
|
12 |
+
|
13 |
+
|
14 |
+
# Hardcoded path to the different scene datasets
|
15 |
+
SCENES_DATASET = {
|
16 |
+
"hm3d": "./data/habitat-sim-data/scene_datasets/hm3d/",
|
17 |
+
"gibson": "./data/habitat-sim-data/scene_datasets/gibson/",
|
18 |
+
"habitat-test-scenes": "./data/habitat-sim/scene_datasets/habitat-test-scenes/",
|
19 |
+
"replica_cad_baked_lighting": "./data/habitat-sim/scene_datasets/replica_cad_baked_lighting/",
|
20 |
+
"replica_cad": "./data/habitat-sim/scene_datasets/replica_cad/",
|
21 |
+
"replica": "./data/habitat-sim/scene_datasets/ReplicaDataset/",
|
22 |
+
"scannet": "./data/habitat-sim/scene_datasets/scannet/"
|
23 |
+
}
|
24 |
+
|
25 |
+
SceneData = collections.namedtuple("SceneData", ["scene_dataset_config_file", "scene", "navmesh", "output_dir"])
|
26 |
+
|
27 |
+
def list_replicacad_scenes(base_output_dir, base_path=SCENES_DATASET["replica_cad"]):
|
28 |
+
scene_dataset_config_file = os.path.join(base_path, "replicaCAD.scene_dataset_config.json")
|
29 |
+
scenes = [f"apt_{i}" for i in range(6)] + ["empty_stage"]
|
30 |
+
navmeshes = [f"navmeshes/apt_{i}_static_furniture.navmesh" for i in range(6)] + ["empty_stage.navmesh"]
|
31 |
+
scenes_data = []
|
32 |
+
for idx in range(len(scenes)):
|
33 |
+
output_dir = os.path.join(base_output_dir, "ReplicaCAD", scenes[idx])
|
34 |
+
# Add scene
|
35 |
+
data = SceneData(scene_dataset_config_file=scene_dataset_config_file,
|
36 |
+
scene = scenes[idx] + ".scene_instance.json",
|
37 |
+
navmesh = os.path.join(base_path, navmeshes[idx]),
|
38 |
+
output_dir = output_dir)
|
39 |
+
scenes_data.append(data)
|
40 |
+
return scenes_data
|
41 |
+
|
42 |
+
def list_replica_cad_baked_lighting_scenes(base_output_dir, base_path=SCENES_DATASET["replica_cad_baked_lighting"]):
|
43 |
+
scene_dataset_config_file = os.path.join(base_path, "replicaCAD_baked.scene_dataset_config.json")
|
44 |
+
scenes = sum([[f"Baked_sc{i}_staging_{j:02}" for i in range(5)] for j in range(21)], [])
|
45 |
+
navmeshes = ""#[f"navmeshes/apt_{i}_static_furniture.navmesh" for i in range(6)] + ["empty_stage.navmesh"]
|
46 |
+
scenes_data = []
|
47 |
+
for idx in range(len(scenes)):
|
48 |
+
output_dir = os.path.join(base_output_dir, "replica_cad_baked_lighting", scenes[idx])
|
49 |
+
data = SceneData(scene_dataset_config_file=scene_dataset_config_file,
|
50 |
+
scene = scenes[idx],
|
51 |
+
navmesh = "",
|
52 |
+
output_dir = output_dir)
|
53 |
+
scenes_data.append(data)
|
54 |
+
return scenes_data
|
55 |
+
|
56 |
+
def list_replica_scenes(base_output_dir, base_path):
|
57 |
+
scenes_data = []
|
58 |
+
for scene_id in os.listdir(base_path):
|
59 |
+
scene = os.path.join(base_path, scene_id, "mesh.ply")
|
60 |
+
navmesh = os.path.join(base_path, scene_id, "habitat/mesh_preseg_semantic.navmesh") # Not sure if I should use it
|
61 |
+
scene_dataset_config_file = ""
|
62 |
+
output_dir = os.path.join(base_output_dir, scene_id)
|
63 |
+
# Add scene only if it does not exist already, or if exist_ok
|
64 |
+
data = SceneData(scene_dataset_config_file = scene_dataset_config_file,
|
65 |
+
scene = scene,
|
66 |
+
navmesh = navmesh,
|
67 |
+
output_dir = output_dir)
|
68 |
+
scenes_data.append(data)
|
69 |
+
return scenes_data
|
70 |
+
|
71 |
+
|
72 |
+
def list_scenes(base_output_dir, base_path):
|
73 |
+
"""
|
74 |
+
Generic method iterating through a base_path folder to find scenes.
|
75 |
+
"""
|
76 |
+
scenes_data = []
|
77 |
+
for root, dirs, files in os.walk(base_path, followlinks=True):
|
78 |
+
folder_scenes_data = []
|
79 |
+
for file in files:
|
80 |
+
name, ext = os.path.splitext(file)
|
81 |
+
if ext == ".glb":
|
82 |
+
scene = os.path.join(root, name + ".glb")
|
83 |
+
navmesh = os.path.join(root, name + ".navmesh")
|
84 |
+
if not os.path.exists(navmesh):
|
85 |
+
navmesh = ""
|
86 |
+
relpath = os.path.relpath(root, base_path)
|
87 |
+
output_dir = os.path.abspath(os.path.join(base_output_dir, relpath, name))
|
88 |
+
data = SceneData(scene_dataset_config_file="",
|
89 |
+
scene = scene,
|
90 |
+
navmesh = navmesh,
|
91 |
+
output_dir = output_dir)
|
92 |
+
folder_scenes_data.append(data)
|
93 |
+
|
94 |
+
# Specific check for HM3D:
|
95 |
+
# When two meshesxxxx.basis.glb and xxxx.glb are present, use the 'basis' version.
|
96 |
+
basis_scenes = [data.scene[:-len(".basis.glb")] for data in folder_scenes_data if data.scene.endswith(".basis.glb")]
|
97 |
+
if len(basis_scenes) != 0:
|
98 |
+
folder_scenes_data = [data for data in folder_scenes_data if not (data.scene[:-len(".glb")] in basis_scenes)]
|
99 |
+
|
100 |
+
scenes_data.extend(folder_scenes_data)
|
101 |
+
return scenes_data
|
102 |
+
|
103 |
+
def list_scenes_available(base_output_dir, scenes_dataset_paths=SCENES_DATASET):
|
104 |
+
scenes_data = []
|
105 |
+
|
106 |
+
# HM3D
|
107 |
+
for split in ("minival", "train", "val", "examples"):
|
108 |
+
scenes_data += list_scenes(base_output_dir=os.path.join(base_output_dir, f"hm3d/{split}/"),
|
109 |
+
base_path=f"{scenes_dataset_paths['hm3d']}/{split}")
|
110 |
+
|
111 |
+
# Gibson
|
112 |
+
scenes_data += list_scenes(base_output_dir=os.path.join(base_output_dir, "gibson"),
|
113 |
+
base_path=scenes_dataset_paths["gibson"])
|
114 |
+
|
115 |
+
# Habitat test scenes (just a few)
|
116 |
+
scenes_data += list_scenes(base_output_dir=os.path.join(base_output_dir, "habitat-test-scenes"),
|
117 |
+
base_path=scenes_dataset_paths["habitat-test-scenes"])
|
118 |
+
|
119 |
+
# ReplicaCAD (baked lightning)
|
120 |
+
scenes_data += list_replica_cad_baked_lighting_scenes(base_output_dir=base_output_dir)
|
121 |
+
|
122 |
+
# ScanNet
|
123 |
+
scenes_data += list_scenes(base_output_dir=os.path.join(base_output_dir, "scannet"),
|
124 |
+
base_path=scenes_dataset_paths["scannet"])
|
125 |
+
|
126 |
+
# Replica
|
127 |
+
list_replica_scenes(base_output_dir=os.path.join(base_output_dir, "replica"),
|
128 |
+
base_path=scenes_dataset_paths["replica"])
|
129 |
+
return scenes_data
|
demo/dust3r/croco/datasets/pairs_dataset.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
|
4 |
+
import os
|
5 |
+
from torch.utils.data import Dataset
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
from datasets.transforms import get_pair_transforms
|
9 |
+
|
10 |
+
def load_image(impath):
|
11 |
+
return Image.open(impath)
|
12 |
+
|
13 |
+
def load_pairs_from_cache_file(fname, root=''):
|
14 |
+
assert os.path.isfile(fname), "cannot parse pairs from {:s}, file does not exist".format(fname)
|
15 |
+
with open(fname, 'r') as fid:
|
16 |
+
lines = fid.read().strip().splitlines()
|
17 |
+
pairs = [ (os.path.join(root,l.split()[0]), os.path.join(root,l.split()[1])) for l in lines]
|
18 |
+
return pairs
|
19 |
+
|
20 |
+
def load_pairs_from_list_file(fname, root=''):
|
21 |
+
assert os.path.isfile(fname), "cannot parse pairs from {:s}, file does not exist".format(fname)
|
22 |
+
with open(fname, 'r') as fid:
|
23 |
+
lines = fid.read().strip().splitlines()
|
24 |
+
pairs = [ (os.path.join(root,l+'_1.jpg'), os.path.join(root,l+'_2.jpg')) for l in lines if not l.startswith('#')]
|
25 |
+
return pairs
|
26 |
+
|
27 |
+
|
28 |
+
def write_cache_file(fname, pairs, root=''):
|
29 |
+
if len(root)>0:
|
30 |
+
if not root.endswith('/'): root+='/'
|
31 |
+
assert os.path.isdir(root)
|
32 |
+
s = ''
|
33 |
+
for im1, im2 in pairs:
|
34 |
+
if len(root)>0:
|
35 |
+
assert im1.startswith(root), im1
|
36 |
+
assert im2.startswith(root), im2
|
37 |
+
s += '{:s} {:s}\n'.format(im1[len(root):], im2[len(root):])
|
38 |
+
with open(fname, 'w') as fid:
|
39 |
+
fid.write(s[:-1])
|
40 |
+
|
41 |
+
def parse_and_cache_all_pairs(dname, data_dir='./data/'):
|
42 |
+
if dname=='habitat_release':
|
43 |
+
dirname = os.path.join(data_dir, 'habitat_release')
|
44 |
+
assert os.path.isdir(dirname), "cannot find folder for habitat_release pairs: "+dirname
|
45 |
+
cache_file = os.path.join(dirname, 'pairs.txt')
|
46 |
+
assert not os.path.isfile(cache_file), "cache file already exists: "+cache_file
|
47 |
+
|
48 |
+
print('Parsing pairs for dataset: '+dname)
|
49 |
+
pairs = []
|
50 |
+
for root, dirs, files in os.walk(dirname):
|
51 |
+
if 'val' in root: continue
|
52 |
+
dirs.sort()
|
53 |
+
pairs += [ (os.path.join(root,f), os.path.join(root,f[:-len('_1.jpeg')]+'_2.jpeg')) for f in sorted(files) if f.endswith('_1.jpeg')]
|
54 |
+
print('Found {:,} pairs'.format(len(pairs)))
|
55 |
+
print('Writing cache to: '+cache_file)
|
56 |
+
write_cache_file(cache_file, pairs, root=dirname)
|
57 |
+
|
58 |
+
else:
|
59 |
+
raise NotImplementedError('Unknown dataset: '+dname)
|
60 |
+
|
61 |
+
def dnames_to_image_pairs(dnames, data_dir='./data/'):
|
62 |
+
"""
|
63 |
+
dnames: list of datasets with image pairs, separated by +
|
64 |
+
"""
|
65 |
+
all_pairs = []
|
66 |
+
for dname in dnames.split('+'):
|
67 |
+
if dname=='habitat_release':
|
68 |
+
dirname = os.path.join(data_dir, 'habitat_release')
|
69 |
+
assert os.path.isdir(dirname), "cannot find folder for habitat_release pairs: "+dirname
|
70 |
+
cache_file = os.path.join(dirname, 'pairs.txt')
|
71 |
+
assert os.path.isfile(cache_file), "cannot find cache file for habitat_release pairs, please first create the cache file, see instructions. "+cache_file
|
72 |
+
pairs = load_pairs_from_cache_file(cache_file, root=dirname)
|
73 |
+
elif dname in ['ARKitScenes', 'MegaDepth', '3DStreetView', 'IndoorVL']:
|
74 |
+
dirname = os.path.join(data_dir, dname+'_crops')
|
75 |
+
assert os.path.isdir(dirname), "cannot find folder for {:s} pairs: {:s}".format(dname, dirname)
|
76 |
+
list_file = os.path.join(dirname, 'listing.txt')
|
77 |
+
assert os.path.isfile(list_file), "cannot find list file for {:s} pairs, see instructions. {:s}".format(dname, list_file)
|
78 |
+
pairs = load_pairs_from_list_file(list_file, root=dirname)
|
79 |
+
print(' {:s}: {:,} pairs'.format(dname, len(pairs)))
|
80 |
+
all_pairs += pairs
|
81 |
+
if '+' in dnames: print(' Total: {:,} pairs'.format(len(all_pairs)))
|
82 |
+
return all_pairs
|
83 |
+
|
84 |
+
|
85 |
+
class PairsDataset(Dataset):
|
86 |
+
|
87 |
+
def __init__(self, dnames, trfs='', totensor=True, normalize=True, data_dir='./data/'):
|
88 |
+
super().__init__()
|
89 |
+
self.image_pairs = dnames_to_image_pairs(dnames, data_dir=data_dir)
|
90 |
+
self.transforms = get_pair_transforms(transform_str=trfs, totensor=totensor, normalize=normalize)
|
91 |
+
|
92 |
+
def __len__(self):
|
93 |
+
return len(self.image_pairs)
|
94 |
+
|
95 |
+
def __getitem__(self, index):
|
96 |
+
im1path, im2path = self.image_pairs[index]
|
97 |
+
im1 = load_image(im1path)
|
98 |
+
im2 = load_image(im2path)
|
99 |
+
if self.transforms is not None: im1, im2 = self.transforms(im1, im2)
|
100 |
+
return im1, im2
|
101 |
+
|
102 |
+
|
103 |
+
if __name__=="__main__":
|
104 |
+
import argparse
|
105 |
+
parser = argparse.ArgumentParser(prog="Computing and caching list of pairs for a given dataset")
|
106 |
+
parser.add_argument('--data_dir', default='./data/', type=str, help="path where data are stored")
|
107 |
+
parser.add_argument('--dataset', default='habitat_release', type=str, help="name of the dataset")
|
108 |
+
args = parser.parse_args()
|
109 |
+
parse_and_cache_all_pairs(dname=args.dataset, data_dir=args.data_dir)
|
demo/dust3r/croco/datasets/transforms.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torchvision.transforms
|
6 |
+
import torchvision.transforms.functional as F
|
7 |
+
|
8 |
+
# "Pair": apply a transform on a pair
|
9 |
+
# "Both": apply the exact same transform to both images
|
10 |
+
|
11 |
+
class ComposePair(torchvision.transforms.Compose):
|
12 |
+
def __call__(self, img1, img2):
|
13 |
+
for t in self.transforms:
|
14 |
+
img1, img2 = t(img1, img2)
|
15 |
+
return img1, img2
|
16 |
+
|
17 |
+
class NormalizeBoth(torchvision.transforms.Normalize):
|
18 |
+
def forward(self, img1, img2):
|
19 |
+
img1 = super().forward(img1)
|
20 |
+
img2 = super().forward(img2)
|
21 |
+
return img1, img2
|
22 |
+
|
23 |
+
class ToTensorBoth(torchvision.transforms.ToTensor):
|
24 |
+
def __call__(self, img1, img2):
|
25 |
+
img1 = super().__call__(img1)
|
26 |
+
img2 = super().__call__(img2)
|
27 |
+
return img1, img2
|
28 |
+
|
29 |
+
class RandomCropPair(torchvision.transforms.RandomCrop):
|
30 |
+
# the crop will be intentionally different for the two images with this class
|
31 |
+
def forward(self, img1, img2):
|
32 |
+
img1 = super().forward(img1)
|
33 |
+
img2 = super().forward(img2)
|
34 |
+
return img1, img2
|
35 |
+
|
36 |
+
class ColorJitterPair(torchvision.transforms.ColorJitter):
|
37 |
+
# can be symmetric (same for both images) or assymetric (different jitter params for each image) depending on assymetric_prob
|
38 |
+
def __init__(self, assymetric_prob, **kwargs):
|
39 |
+
super().__init__(**kwargs)
|
40 |
+
self.assymetric_prob = assymetric_prob
|
41 |
+
def jitter_one(self, img, fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor):
|
42 |
+
for fn_id in fn_idx:
|
43 |
+
if fn_id == 0 and brightness_factor is not None:
|
44 |
+
img = F.adjust_brightness(img, brightness_factor)
|
45 |
+
elif fn_id == 1 and contrast_factor is not None:
|
46 |
+
img = F.adjust_contrast(img, contrast_factor)
|
47 |
+
elif fn_id == 2 and saturation_factor is not None:
|
48 |
+
img = F.adjust_saturation(img, saturation_factor)
|
49 |
+
elif fn_id == 3 and hue_factor is not None:
|
50 |
+
img = F.adjust_hue(img, hue_factor)
|
51 |
+
return img
|
52 |
+
|
53 |
+
def forward(self, img1, img2):
|
54 |
+
|
55 |
+
fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = self.get_params(
|
56 |
+
self.brightness, self.contrast, self.saturation, self.hue
|
57 |
+
)
|
58 |
+
img1 = self.jitter_one(img1, fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor)
|
59 |
+
if torch.rand(1) < self.assymetric_prob: # assymetric:
|
60 |
+
fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = self.get_params(
|
61 |
+
self.brightness, self.contrast, self.saturation, self.hue
|
62 |
+
)
|
63 |
+
img2 = self.jitter_one(img2, fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor)
|
64 |
+
return img1, img2
|
65 |
+
|
66 |
+
def get_pair_transforms(transform_str, totensor=True, normalize=True):
|
67 |
+
# transform_str is eg crop224+color
|
68 |
+
trfs = []
|
69 |
+
for s in transform_str.split('+'):
|
70 |
+
if s.startswith('crop'):
|
71 |
+
size = int(s[len('crop'):])
|
72 |
+
trfs.append(RandomCropPair(size))
|
73 |
+
elif s=='acolor':
|
74 |
+
trfs.append(ColorJitterPair(assymetric_prob=1.0, brightness=(0.6, 1.4), contrast=(0.6, 1.4), saturation=(0.6, 1.4), hue=0.0))
|
75 |
+
elif s=='': # if transform_str was ""
|
76 |
+
pass
|
77 |
+
else:
|
78 |
+
raise NotImplementedError('Unknown augmentation: '+s)
|
79 |
+
|
80 |
+
if totensor:
|
81 |
+
trfs.append( ToTensorBoth() )
|
82 |
+
if normalize:
|
83 |
+
trfs.append( NormalizeBoth(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) )
|
84 |
+
|
85 |
+
if len(trfs)==0:
|
86 |
+
return None
|
87 |
+
elif len(trfs)==1:
|
88 |
+
return trfs
|
89 |
+
else:
|
90 |
+
return ComposePair(trfs)
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
demo/dust3r/croco/demo.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2022-present Naver Corporation. All rights reserved.
|
2 |
+
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from models.croco import CroCoNet
|
6 |
+
from PIL import Image
|
7 |
+
import torchvision.transforms
|
8 |
+
from torchvision.transforms import ToTensor, Normalize, Compose
|
9 |
+
|
10 |
+
def main():
|
11 |
+
device = torch.device('cuda:0' if torch.cuda.is_available() and torch.cuda.device_count()>0 else 'cpu')
|
12 |
+
|
13 |
+
# load 224x224 images and transform them to tensor
|
14 |
+
imagenet_mean = [0.485, 0.456, 0.406]
|
15 |
+
imagenet_mean_tensor = torch.tensor(imagenet_mean).view(1,3,1,1).to(device, non_blocking=True)
|
16 |
+
imagenet_std = [0.229, 0.224, 0.225]
|
17 |
+
imagenet_std_tensor = torch.tensor(imagenet_std).view(1,3,1,1).to(device, non_blocking=True)
|
18 |
+
trfs = Compose([ToTensor(), Normalize(mean=imagenet_mean, std=imagenet_std)])
|
19 |
+
image1 = trfs(Image.open('assets/Chateau1.png').convert('RGB')).to(device, non_blocking=True).unsqueeze(0)
|
20 |
+
image2 = trfs(Image.open('assets/Chateau2.png').convert('RGB')).to(device, non_blocking=True).unsqueeze(0)
|
21 |
+
|
22 |
+
# load model
|
23 |
+
ckpt = torch.load('pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth', 'cpu')
|
24 |
+
model = CroCoNet( **ckpt.get('croco_kwargs',{})).to(device)
|
25 |
+
model.eval()
|
26 |
+
msg = model.load_state_dict(ckpt['model'], strict=True)
|
27 |
+
|
28 |
+
# forward
|
29 |
+
with torch.inference_mode():
|
30 |
+
out, mask, target = model(image1, image2)
|
31 |
+
|
32 |
+
# the output is normalized, thus use the mean/std of the actual image to go back to RGB space
|
33 |
+
patchified = model.patchify(image1)
|
34 |
+
mean = patchified.mean(dim=-1, keepdim=True)
|
35 |
+
var = patchified.var(dim=-1, keepdim=True)
|
36 |
+
decoded_image = model.unpatchify(out * (var + 1.e-6)**.5 + mean)
|
37 |
+
# undo imagenet normalization, prepare masked image
|
38 |
+
decoded_image = decoded_image * imagenet_std_tensor + imagenet_mean_tensor
|
39 |
+
input_image = image1 * imagenet_std_tensor + imagenet_mean_tensor
|
40 |
+
ref_image = image2 * imagenet_std_tensor + imagenet_mean_tensor
|
41 |
+
image_masks = model.unpatchify(model.patchify(torch.ones_like(ref_image)) * mask[:,:,None])
|
42 |
+
masked_input_image = ((1 - image_masks) * input_image)
|
43 |
+
|
44 |
+
# make visualization
|
45 |
+
visualization = torch.cat((ref_image, masked_input_image, decoded_image, input_image), dim=3) # 4*(B, 3, H, W) -> B, 3, H, W*4
|
46 |
+
B, C, H, W = visualization.shape
|
47 |
+
visualization = visualization.permute(1, 0, 2, 3).reshape(C, B*H, W)
|
48 |
+
visualization = torchvision.transforms.functional.to_pil_image(torch.clamp(visualization, 0, 1))
|
49 |
+
fname = "demo_output.png"
|
50 |
+
visualization.save(fname)
|
51 |
+
print('Visualization save in '+fname)
|
52 |
+
|
53 |
+
|
54 |
+
if __name__=="__main__":
|
55 |
+
main()
|