lc700x commited on
Commit
18937f8
·
verified ·
1 Parent(s): f1caae3

Upload 3 files

Browse files
Files changed (3) hide show
  1. config.json +98 -0
  2. model.safetensors +3 -0
  3. preprocessor_config.json +27 -0
config.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DepthAnythingForDepthEstimation"
4
+ ],
5
+ "backbone": null,
6
+ "backbone_config": {
7
+ "apply_layernorm": true,
8
+ "architectures": [
9
+ "Dinov2Model"
10
+ ],
11
+ "attention_probs_dropout_prob": 0.0,
12
+ "drop_path_rate": 0.0,
13
+ "hidden_act": "gelu",
14
+ "hidden_dropout_prob": 0.0,
15
+ "hidden_size": 1024,
16
+ "image_size": 518,
17
+ "initializer_range": 0.02,
18
+ "layer_norm_eps": 1e-06,
19
+ "layerscale_value": 1.0,
20
+ "mlp_ratio": 4,
21
+ "model_type": "dinov2",
22
+ "num_attention_heads": 16,
23
+ "num_channels": 3,
24
+ "num_hidden_layers": 24,
25
+ "out_features": [
26
+ "stage21",
27
+ "stage22",
28
+ "stage23",
29
+ "stage24"
30
+ ],
31
+ "out_indices": [
32
+ 21,
33
+ 22,
34
+ 23,
35
+ 24
36
+ ],
37
+ "patch_size": 14,
38
+ "qkv_bias": true,
39
+ "reshape_hidden_states": false,
40
+ "stage_names": [
41
+ "stem",
42
+ "stage1",
43
+ "stage2",
44
+ "stage3",
45
+ "stage4",
46
+ "stage5",
47
+ "stage6",
48
+ "stage7",
49
+ "stage8",
50
+ "stage9",
51
+ "stage10",
52
+ "stage11",
53
+ "stage12",
54
+ "stage13",
55
+ "stage14",
56
+ "stage15",
57
+ "stage16",
58
+ "stage17",
59
+ "stage18",
60
+ "stage19",
61
+ "stage20",
62
+ "stage21",
63
+ "stage22",
64
+ "stage23",
65
+ "stage24"
66
+ ],
67
+ "torch_dtype": "float32",
68
+ "use_mask_token": true,
69
+ "use_swiglu_ffn": false
70
+ },
71
+ "backbone_kwargs": null,
72
+ "depth_estimation_type": "relative",
73
+ "fusion_hidden_size": 256,
74
+ "head_hidden_size": 32,
75
+ "head_in_index": -1,
76
+ "initializer_range": 0.02,
77
+ "max_depth": 1,
78
+ "model_type": "depth_anything",
79
+ "neck_hidden_sizes": [
80
+ 256,
81
+ 512,
82
+ 1024,
83
+ 1024
84
+ ],
85
+ "output_attentions": false,
86
+ "patch_size": 14,
87
+ "reassemble_factors": [
88
+ 4,
89
+ 2,
90
+ 1,
91
+ 0.5
92
+ ],
93
+ "reassemble_hidden_size": 1024,
94
+ "torch_dtype": "float32",
95
+ "transformers_version": null,
96
+ "use_pretrained_backbone": false,
97
+ "use_timm_backbone": false
98
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc27360a3e6906e5ddd8f618e2dcde11362327361918b8f76793e42e25de31b3
3
+ size 1341322868
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_pad": false,
4
+ "do_reduce_labels": false,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "ensure_multiple_of": 14,
8
+ "image_mean": [
9
+ 0.485,
10
+ 0.456,
11
+ 0.406
12
+ ],
13
+ "image_processor_type": "DPTImageProcessor",
14
+ "image_std": [
15
+ 0.229,
16
+ 0.224,
17
+ 0.225
18
+ ],
19
+ "keep_aspect_ratio": true,
20
+ "resample": 3,
21
+ "rescale_factor": 0.00392156862745098,
22
+ "size": {
23
+ "height": 518,
24
+ "width": 518
25
+ },
26
+ "size_divisor": null
27
+ }