Image-Text-to-Text
Transformers
Safetensors
English
internvl_chat
feature-extraction
mathematics
reasoning
multi-modal-qa
math-qa
figure-qa
geometry-qa
math-word-problem
textbook-qa
vqa
geometry-diagram
synthetic-scene
chart
plot
scientific-figure
table
function-plot
abstract-scene
puzzle-test
document-image
science
conversational
custom_code
{ | |
"_commit_hash": null, | |
"_name_or_path": "./pretrained/Mini-InternVL-Chat-2B-V1-5", | |
"architectures": [ | |
"InternVLChatModel" | |
], | |
"auto_map": { | |
"AutoConfig": "configuration_internvl_chat.InternVLChatConfig", | |
"AutoModel": "modeling_internvl_chat.InternVLChatModel", | |
"AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel" | |
}, | |
"downsample_ratio": 0.5, | |
"dynamic_image_size": true, | |
"force_image_size": 448, | |
"llm_config": { | |
"_name_or_path": "pretrained/internlm2-chat-1_8b", | |
"add_cross_attention": false, | |
"architectures": [ | |
"InternLM2ForCausalLM" | |
], | |
"attn_implementation": "flash_attention_2", | |
"auto_map": { | |
"AutoConfig": "configuration_internlm2.InternLM2Config", | |
"AutoModel": "modeling_internlm2.InternLM2ForCausalLM", | |
"AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM" | |
}, | |
"bad_words_ids": null, | |
"begin_suppress_tokens": null, | |
"bias": false, | |
"bos_token_id": 1, | |
"chunk_size_feed_forward": 0, | |
"cross_attention_hidden_size": null, | |
"decoder_start_token_id": null, | |
"diversity_penalty": 0.0, | |
"do_sample": false, | |
"early_stopping": false, | |
"encoder_no_repeat_ngram_size": 0, | |
"eos_token_id": 2, | |
"exponential_decay_length_penalty": null, | |
"finetuning_task": null, | |
"forced_bos_token_id": null, | |
"forced_eos_token_id": null, | |
"hidden_act": "silu", | |
"hidden_size": 2048, | |
"id2label": { | |
"0": "LABEL_0", | |
"1": "LABEL_1" | |
}, | |
"initializer_range": 0.02, | |
"intermediate_size": 8192, | |
"is_decoder": false, | |
"is_encoder_decoder": false, | |
"label2id": { | |
"LABEL_0": 0, | |
"LABEL_1": 1 | |
}, | |
"length_penalty": 1.0, | |
"max_length": 20, | |
"max_position_embeddings": 32768, | |
"min_length": 0, | |
"model_type": "internlm2", | |
"no_repeat_ngram_size": 0, | |
"num_attention_heads": 16, | |
"num_beam_groups": 1, | |
"num_beams": 1, | |
"num_hidden_layers": 24, | |
"num_key_value_heads": 8, | |
"num_return_sequences": 1, | |
"output_attentions": false, | |
"output_hidden_states": false, | |
"output_scores": false, | |
"pad_token_id": 2, | |
"prefix": null, | |
"problem_type": null, | |
"pruned_heads": {}, | |
"remove_invalid_values": false, | |
"repetition_penalty": 1.0, | |
"return_dict": true, | |
"return_dict_in_generate": false, | |
"rms_norm_eps": 1e-05, | |
"rope_scaling": { | |
"factor": 3.0, | |
"type": "dynamic" | |
}, | |
"rope_theta": 1000000, | |
"sep_token_id": null, | |
"suppress_tokens": null, | |
"task_specific_params": null, | |
"temperature": 1.0, | |
"tf_legacy_loss": false, | |
"tie_encoder_decoder": false, | |
"tie_word_embeddings": false, | |
"tokenizer_class": null, | |
"top_k": 50, | |
"top_p": 1.0, | |
"torch_dtype": "bfloat16", | |
"torchscript": false, | |
"transformers_version": "4.39.0", | |
"typical_p": 1.0, | |
"use_bfloat16": false, | |
"use_cache": false, | |
"vocab_size": 92557 | |
}, | |
"max_dynamic_patch": 12, | |
"min_dynamic_patch": 1, | |
"model_type": "internvl_chat", | |
"pad2square": false, | |
"ps_version": "v2", | |
"select_layer": -1, | |
"template": "internlm2-chat", | |
"torch_dtype": "bfloat16", | |
"transformers_version": null, | |
"use_backbone_lora": 0, | |
"use_llm_lora": 0, | |
"use_thumbnail": true, | |
"vision_config": { | |
"_name_or_path": "OpenGVLab/InternViT-300M-448px", | |
"add_cross_attention": false, | |
"architectures": [ | |
"InternVisionModel" | |
], | |
"attention_dropout": 0.0, | |
"auto_map": { | |
"AutoConfig": "configuration_intern_vit.InternVisionConfig", | |
"AutoModel": "modeling_intern_vit.InternVisionModel" | |
}, | |
"bad_words_ids": null, | |
"begin_suppress_tokens": null, | |
"bos_token_id": null, | |
"chunk_size_feed_forward": 0, | |
"cross_attention_hidden_size": null, | |
"decoder_start_token_id": null, | |
"diversity_penalty": 0.0, | |
"do_sample": false, | |
"drop_path_rate": 0.1, | |
"dropout": 0.0, | |
"early_stopping": false, | |
"encoder_no_repeat_ngram_size": 0, | |
"eos_token_id": null, | |
"exponential_decay_length_penalty": null, | |
"finetuning_task": null, | |
"forced_bos_token_id": null, | |
"forced_eos_token_id": null, | |
"hidden_act": "gelu", | |
"hidden_size": 1024, | |
"id2label": { | |
"0": "LABEL_0", | |
"1": "LABEL_1" | |
}, | |
"image_size": 448, | |
"initializer_factor": 1.0, | |
"initializer_range": 0.02, | |
"intermediate_size": 4096, | |
"is_decoder": false, | |
"is_encoder_decoder": false, | |
"label2id": { | |
"LABEL_0": 0, | |
"LABEL_1": 1 | |
}, | |
"layer_norm_eps": 1e-06, | |
"length_penalty": 1.0, | |
"max_length": 20, | |
"min_length": 0, | |
"model_type": "intern_vit_6b", | |
"no_repeat_ngram_size": 0, | |
"norm_type": "layer_norm", | |
"num_attention_heads": 16, | |
"num_beam_groups": 1, | |
"num_beams": 1, | |
"num_channels": 3, | |
"num_hidden_layers": 24, | |
"num_return_sequences": 1, | |
"output_attentions": false, | |
"output_hidden_states": false, | |
"output_scores": false, | |
"pad_token_id": null, | |
"patch_size": 14, | |
"prefix": null, | |
"problem_type": null, | |
"pruned_heads": {}, | |
"qk_normalization": false, | |
"qkv_bias": true, | |
"remove_invalid_values": false, | |
"repetition_penalty": 1.0, | |
"return_dict": true, | |
"return_dict_in_generate": false, | |
"sep_token_id": null, | |
"suppress_tokens": null, | |
"task_specific_params": null, | |
"temperature": 1.0, | |
"tf_legacy_loss": false, | |
"tie_encoder_decoder": false, | |
"tie_word_embeddings": true, | |
"tokenizer_class": null, | |
"top_k": 50, | |
"top_p": 1.0, | |
"torch_dtype": "bfloat16", | |
"torchscript": false, | |
"transformers_version": "4.39.0", | |
"typical_p": 1.0, | |
"use_bfloat16": true, | |
"use_flash_attn": true | |
} | |
} | |