Upload nanoVLM using push_to_hub
Browse files- config.json +53 -5
- model.safetensors +2 -2
config.json
CHANGED
@@ -15,14 +15,14 @@
|
|
15 |
"lm_re_base": 100000,
|
16 |
"lm_max_position_embeddings": 8192,
|
17 |
"lm_base_vocab_size": 49152,
|
18 |
-
"extra_token_amount":
|
19 |
-
"lm_vocab_size":
|
20 |
"lm_n_heads": 15,
|
21 |
"lm_n_kv_heads": 5,
|
22 |
"lm_dropout": 0.0,
|
23 |
"lm_n_blocks": 32,
|
24 |
"lm_attn_scaling": 1.0,
|
25 |
-
"lm_max_length":
|
26 |
"lm_use_tokens": false,
|
27 |
"lm_tie_weights": true,
|
28 |
"lm_model_type": "HuggingFaceTB/SmolLM2-360M-Instruct",
|
@@ -30,25 +30,73 @@
|
|
30 |
"lm_chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
31 |
"mp_pixel_shuffle_factor": 4,
|
32 |
"mp_image_token_length": 64,
|
33 |
-
"max_img_size":
|
34 |
"vlm_extra_tokens": {
|
35 |
"image_token": "<|image|>",
|
36 |
"r1c1": "<row_1_col_1>",
|
37 |
"r1c2": "<row_1_col_2>",
|
38 |
"r1c3": "<row_1_col_3>",
|
39 |
"r1c4": "<row_1_col_4>",
|
|
|
|
|
|
|
|
|
40 |
"r2c1": "<row_2_col_1>",
|
41 |
"r2c2": "<row_2_col_2>",
|
42 |
"r2c3": "<row_2_col_3>",
|
43 |
"r2c4": "<row_2_col_4>",
|
|
|
|
|
|
|
|
|
44 |
"r3c1": "<row_3_col_1>",
|
45 |
"r3c2": "<row_3_col_2>",
|
46 |
"r3c3": "<row_3_col_3>",
|
47 |
"r3c4": "<row_3_col_4>",
|
|
|
|
|
|
|
|
|
48 |
"r4c1": "<row_4_col_1>",
|
49 |
"r4c2": "<row_4_col_2>",
|
50 |
"r4c3": "<row_4_col_3>",
|
51 |
-
"r4c4": "<row_4_col_4>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
},
|
53 |
"vlm_load_backbone_weights": true,
|
54 |
"vlm_checkpoint_path": "checkpoints",
|
|
|
15 |
"lm_re_base": 100000,
|
16 |
"lm_max_position_embeddings": 8192,
|
17 |
"lm_base_vocab_size": 49152,
|
18 |
+
"extra_token_amount": 65,
|
19 |
+
"lm_vocab_size": 49217,
|
20 |
"lm_n_heads": 15,
|
21 |
"lm_n_kv_heads": 5,
|
22 |
"lm_dropout": 0.0,
|
23 |
"lm_n_blocks": 32,
|
24 |
"lm_attn_scaling": 1.0,
|
25 |
+
"lm_max_length": 4096,
|
26 |
"lm_use_tokens": false,
|
27 |
"lm_tie_weights": true,
|
28 |
"lm_model_type": "HuggingFaceTB/SmolLM2-360M-Instruct",
|
|
|
30 |
"lm_chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
31 |
"mp_pixel_shuffle_factor": 4,
|
32 |
"mp_image_token_length": 64,
|
33 |
+
"max_img_size": 2048,
|
34 |
"vlm_extra_tokens": {
|
35 |
"image_token": "<|image|>",
|
36 |
"r1c1": "<row_1_col_1>",
|
37 |
"r1c2": "<row_1_col_2>",
|
38 |
"r1c3": "<row_1_col_3>",
|
39 |
"r1c4": "<row_1_col_4>",
|
40 |
+
"r1c5": "<row_1_col_5>",
|
41 |
+
"r1c6": "<row_1_col_6>",
|
42 |
+
"r1c7": "<row_1_col_7>",
|
43 |
+
"r1c8": "<row_1_col_8>",
|
44 |
"r2c1": "<row_2_col_1>",
|
45 |
"r2c2": "<row_2_col_2>",
|
46 |
"r2c3": "<row_2_col_3>",
|
47 |
"r2c4": "<row_2_col_4>",
|
48 |
+
"r2c5": "<row_2_col_5>",
|
49 |
+
"r2c6": "<row_2_col_6>",
|
50 |
+
"r2c7": "<row_2_col_7>",
|
51 |
+
"r2c8": "<row_2_col_8>",
|
52 |
"r3c1": "<row_3_col_1>",
|
53 |
"r3c2": "<row_3_col_2>",
|
54 |
"r3c3": "<row_3_col_3>",
|
55 |
"r3c4": "<row_3_col_4>",
|
56 |
+
"r3c5": "<row_3_col_5>",
|
57 |
+
"r3c6": "<row_3_col_6>",
|
58 |
+
"r3c7": "<row_3_col_7>",
|
59 |
+
"r3c8": "<row_3_col_8>",
|
60 |
"r4c1": "<row_4_col_1>",
|
61 |
"r4c2": "<row_4_col_2>",
|
62 |
"r4c3": "<row_4_col_3>",
|
63 |
+
"r4c4": "<row_4_col_4>",
|
64 |
+
"r4c5": "<row_4_col_5>",
|
65 |
+
"r4c6": "<row_4_col_6>",
|
66 |
+
"r4c7": "<row_4_col_7>",
|
67 |
+
"r4c8": "<row_4_col_8>",
|
68 |
+
"r5c1": "<row_5_col_1>",
|
69 |
+
"r5c2": "<row_5_col_2>",
|
70 |
+
"r5c3": "<row_5_col_3>",
|
71 |
+
"r5c4": "<row_5_col_4>",
|
72 |
+
"r5c5": "<row_5_col_5>",
|
73 |
+
"r5c6": "<row_5_col_6>",
|
74 |
+
"r5c7": "<row_5_col_7>",
|
75 |
+
"r5c8": "<row_5_col_8>",
|
76 |
+
"r6c1": "<row_6_col_1>",
|
77 |
+
"r6c2": "<row_6_col_2>",
|
78 |
+
"r6c3": "<row_6_col_3>",
|
79 |
+
"r6c4": "<row_6_col_4>",
|
80 |
+
"r6c5": "<row_6_col_5>",
|
81 |
+
"r6c6": "<row_6_col_6>",
|
82 |
+
"r6c7": "<row_6_col_7>",
|
83 |
+
"r6c8": "<row_6_col_8>",
|
84 |
+
"r7c1": "<row_7_col_1>",
|
85 |
+
"r7c2": "<row_7_col_2>",
|
86 |
+
"r7c3": "<row_7_col_3>",
|
87 |
+
"r7c4": "<row_7_col_4>",
|
88 |
+
"r7c5": "<row_7_col_5>",
|
89 |
+
"r7c6": "<row_7_col_6>",
|
90 |
+
"r7c7": "<row_7_col_7>",
|
91 |
+
"r7c8": "<row_7_col_8>",
|
92 |
+
"r8c1": "<row_8_col_1>",
|
93 |
+
"r8c2": "<row_8_col_2>",
|
94 |
+
"r8c3": "<row_8_col_3>",
|
95 |
+
"r8c4": "<row_8_col_4>",
|
96 |
+
"r8c5": "<row_8_col_5>",
|
97 |
+
"r8c6": "<row_8_col_6>",
|
98 |
+
"r8c7": "<row_8_col_7>",
|
99 |
+
"r8c8": "<row_8_col_8>"
|
100 |
},
|
101 |
"vlm_load_backbone_weights": true,
|
102 |
"vlm_checkpoint_path": "checkpoints",
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dae42036d7a15464f763a342c2df3b7727998de9555a7f1d579609450b68cd8c
|
3 |
+
size 1840500664
|