Xenova HF Staff commited on
Commit
7166ce2
·
verified ·
1 Parent(s): ead262f

Upload Idefics3ForConditionalGeneration

Browse files
Files changed (3) hide show
  1. config.json +263 -0
  2. generation_config.json +12 -0
  3. model.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ckg/tiny-idefics3-llama",
3
+ "architectures": [
4
+ "Idefics3ForConditionalGeneration"
5
+ ],
6
+ "image_token_id": 128257,
7
+ "model_type": "idefics3",
8
+ "scale_factor": 2,
9
+ "text_config": {
10
+ "_attn_implementation_autoset": false,
11
+ "_flash_attn_2_enabled": true,
12
+ "_name_or_path": "None",
13
+ "add_cross_attention": false,
14
+ "architectures": [
15
+ "Idefics3ForVisionText2Text"
16
+ ],
17
+ "attention_bias": false,
18
+ "attention_dropout": 0.0,
19
+ "auto_map": {
20
+ "AutoConfig": "configuration_idefics3.Idefics3Config",
21
+ "AutoModelForCausalLM": "modeling_idefics3.Idefics3ForVisionText2Text"
22
+ },
23
+ "bad_words_ids": null,
24
+ "begin_suppress_tokens": null,
25
+ "bos_token_id": 128000,
26
+ "chunk_size_feed_forward": 0,
27
+ "cross_attention_hidden_size": null,
28
+ "decoder_start_token_id": null,
29
+ "diversity_penalty": 0.0,
30
+ "do_sample": false,
31
+ "early_stopping": false,
32
+ "encoder_no_repeat_ngram_size": 0,
33
+ "eos_token_id": [
34
+ 128001,
35
+ 128008,
36
+ 128009
37
+ ],
38
+ "exponential_decay_length_penalty": null,
39
+ "finetuning_task": null,
40
+ "forced_bos_token_id": null,
41
+ "forced_eos_token_id": null,
42
+ "head_dim": 16,
43
+ "hidden_act": "silu",
44
+ "hidden_size": 64,
45
+ "id2label": {
46
+ "0": "LABEL_0",
47
+ "1": "LABEL_1"
48
+ },
49
+ "initializer_range": 0.02,
50
+ "intermediate_size": 256,
51
+ "is_decoder": false,
52
+ "is_encoder_decoder": false,
53
+ "label2id": {
54
+ "LABEL_0": 0,
55
+ "LABEL_1": 1
56
+ },
57
+ "length_penalty": 1.0,
58
+ "max_length": 20,
59
+ "max_position_embeddings": 131072,
60
+ "min_length": 0,
61
+ "mlp_bias": false,
62
+ "model_type": "llama",
63
+ "neftune_noise_alpha": 0.0,
64
+ "no_repeat_ngram_size": 0,
65
+ "num_attention_heads": 4,
66
+ "num_beam_groups": 1,
67
+ "num_beams": 1,
68
+ "num_hidden_layers": 8,
69
+ "num_key_value_heads": 1,
70
+ "num_return_sequences": 1,
71
+ "output_attentions": false,
72
+ "output_hidden_states": false,
73
+ "output_scores": false,
74
+ "pad_token_id": 128002,
75
+ "perceiver_config": {
76
+ "_name_or_path": "",
77
+ "add_cross_attention": false,
78
+ "architectures": null,
79
+ "attention_dropout": 0.0,
80
+ "bad_words_ids": null,
81
+ "begin_suppress_tokens": null,
82
+ "bos_token_id": null,
83
+ "chunk_size_feed_forward": 0,
84
+ "cross_attention_hidden_size": null,
85
+ "decoder_start_token_id": null,
86
+ "diversity_penalty": 0.0,
87
+ "do_sample": false,
88
+ "early_stopping": false,
89
+ "encoder_no_repeat_ngram_size": 0,
90
+ "eos_token_id": null,
91
+ "exponential_decay_length_penalty": null,
92
+ "finetuning_task": null,
93
+ "forced_bos_token_id": null,
94
+ "forced_eos_token_id": null,
95
+ "hidden_act": "silu",
96
+ "id2label": {
97
+ "0": "LABEL_0",
98
+ "1": "LABEL_1"
99
+ },
100
+ "is_decoder": false,
101
+ "is_encoder_decoder": false,
102
+ "label2id": {
103
+ "LABEL_0": 0,
104
+ "LABEL_1": 1
105
+ },
106
+ "length_penalty": 1.0,
107
+ "max_length": 20,
108
+ "min_length": 0,
109
+ "model_type": "idefics3",
110
+ "no_repeat_ngram_size": 0,
111
+ "num_beam_groups": 1,
112
+ "num_beams": 1,
113
+ "num_key_value_heads": 1,
114
+ "num_return_sequences": 1,
115
+ "output_attentions": false,
116
+ "output_hidden_states": false,
117
+ "output_scores": false,
118
+ "pad_token_id": 128002,
119
+ "prefix": null,
120
+ "problem_type": null,
121
+ "pruned_heads": {},
122
+ "qk_layer_norms_perceiver": false,
123
+ "remove_invalid_values": false,
124
+ "repetition_penalty": 1.0,
125
+ "resampler_depth": 6,
126
+ "resampler_head_dim": 96,
127
+ "resampler_n_heads": 16,
128
+ "resampler_n_latents": 64,
129
+ "return_dict": true,
130
+ "return_dict_in_generate": false,
131
+ "sep_token_id": null,
132
+ "suppress_tokens": null,
133
+ "task_specific_params": null,
134
+ "temperature": 1.0,
135
+ "tf_legacy_loss": false,
136
+ "tie_encoder_decoder": false,
137
+ "tie_word_embeddings": true,
138
+ "tokenizer_class": null,
139
+ "top_k": 50,
140
+ "top_p": 1.0,
141
+ "torch_dtype": null,
142
+ "torchscript": false,
143
+ "transformers_version": "4.43.2",
144
+ "typical_p": 1.0,
145
+ "use_bfloat16": false
146
+ },
147
+ "prefix": null,
148
+ "pretraining_tp": 1,
149
+ "problem_type": null,
150
+ "pruned_heads": {},
151
+ "qk_layer_norms": false,
152
+ "remove_invalid_values": false,
153
+ "repetition_penalty": 1.0,
154
+ "return_dict": true,
155
+ "return_dict_in_generate": false,
156
+ "rms_norm_eps": 1e-05,
157
+ "rope_scaling": {
158
+ "factor": 8.0,
159
+ "high_freq_factor": 4.0,
160
+ "low_freq_factor": 1.0,
161
+ "original_max_position_embeddings": 8192,
162
+ "rope_type": "llama3"
163
+ },
164
+ "rope_theta": 500000.0,
165
+ "sep_token_id": null,
166
+ "suppress_tokens": null,
167
+ "task_specific_params": null,
168
+ "temperature": 1.0,
169
+ "tf_legacy_loss": false,
170
+ "tie_encoder_decoder": false,
171
+ "tie_word_embeddings": false,
172
+ "tokenizer_class": null,
173
+ "top_k": 50,
174
+ "top_p": 1.0,
175
+ "torch_dtype": "bfloat16",
176
+ "torchscript": false,
177
+ "typical_p": 1.0,
178
+ "use_bfloat16": false,
179
+ "use_cache": true,
180
+ "use_resampler": false,
181
+ "vocab_size": 128259
182
+ },
183
+ "tie_word_embeddings": false,
184
+ "torch_dtype": "bfloat16",
185
+ "transformers_version": "4.46.2",
186
+ "use_cache": true,
187
+ "vision_config": {
188
+ "_attn_implementation_autoset": false,
189
+ "_name_or_path": "",
190
+ "add_cross_attention": false,
191
+ "architectures": null,
192
+ "attention_dropout": 0.0,
193
+ "bad_words_ids": null,
194
+ "begin_suppress_tokens": null,
195
+ "bos_token_id": null,
196
+ "chunk_size_feed_forward": 0,
197
+ "cross_attention_hidden_size": null,
198
+ "decoder_start_token_id": null,
199
+ "diversity_penalty": 0.0,
200
+ "do_sample": false,
201
+ "early_stopping": false,
202
+ "encoder_no_repeat_ngram_size": 0,
203
+ "eos_token_id": null,
204
+ "exponential_decay_length_penalty": null,
205
+ "finetuning_task": null,
206
+ "forced_bos_token_id": null,
207
+ "forced_eos_token_id": null,
208
+ "hidden_act": "gelu_pytorch_tanh",
209
+ "hidden_size": 64,
210
+ "id2label": {
211
+ "0": "LABEL_0",
212
+ "1": "LABEL_1"
213
+ },
214
+ "image_size": 364,
215
+ "initializer_range": 0.02,
216
+ "intermediate_size": 256,
217
+ "is_decoder": false,
218
+ "is_encoder_decoder": false,
219
+ "label2id": {
220
+ "LABEL_0": 0,
221
+ "LABEL_1": 1
222
+ },
223
+ "layer_norm_eps": 1e-06,
224
+ "length_penalty": 1.0,
225
+ "max_length": 20,
226
+ "min_length": 0,
227
+ "model_type": "idefics3",
228
+ "no_repeat_ngram_size": 0,
229
+ "num_attention_heads": 4,
230
+ "num_beam_groups": 1,
231
+ "num_beams": 1,
232
+ "num_channels": 3,
233
+ "num_hidden_layers": 4,
234
+ "num_return_sequences": 1,
235
+ "old_vision_model_name": "/fsx/hugo/siglip-so400m-14-364-flash-attn2-navit",
236
+ "output_attentions": false,
237
+ "output_hidden_states": false,
238
+ "output_scores": false,
239
+ "pad_token_id": 128002,
240
+ "patch_size": 14,
241
+ "prefix": null,
242
+ "problem_type": null,
243
+ "pruned_heads": {},
244
+ "remove_invalid_values": false,
245
+ "repetition_penalty": 1.0,
246
+ "return_dict": true,
247
+ "return_dict_in_generate": false,
248
+ "sep_token_id": null,
249
+ "suppress_tokens": null,
250
+ "task_specific_params": null,
251
+ "temperature": 1.0,
252
+ "tf_legacy_loss": false,
253
+ "tie_encoder_decoder": false,
254
+ "tie_word_embeddings": true,
255
+ "tokenizer_class": null,
256
+ "top_k": 50,
257
+ "top_p": 1.0,
258
+ "torch_dtype": "bfloat16",
259
+ "torchscript": false,
260
+ "typical_p": 1.0,
261
+ "use_bfloat16": false
262
+ }
263
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 128000,
4
+ "eos_token_id": [
5
+ 128001,
6
+ 128008,
7
+ 128009,
8
+ 128258
9
+ ],
10
+ "pad_token_id": 128002,
11
+ "transformers_version": "4.46.2"
12
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75b281032c41261650e37318c338060987c1e8aa284b12fffec229ec828fdd3b
3
+ size 34399128