applied-ai-018 commited on
Commit
7523c62
·
verified ·
1 Parent(s): 324085e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/18.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  2. ckpts/universal/global_step40/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. venv/lib/python3.10/site-packages/transformers/models/blip_2/__init__.py +71 -0
  4. venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/__init__.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/configuration_blip_2.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/convert_blip_2_original_to_pytorch.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/modeling_blip_2.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/processing_blip_2.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/transformers/models/blip_2/configuration_blip_2.py +355 -0
  10. venv/lib/python3.10/site-packages/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py +291 -0
  11. venv/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py +1853 -0
  12. venv/lib/python3.10/site-packages/transformers/models/blip_2/processing_blip_2.py +155 -0
  13. venv/lib/python3.10/site-packages/transformers/models/electra/modeling_electra.py +1679 -0
  14. venv/lib/python3.10/site-packages/transformers/models/idefics2/__init__.py +74 -0
  15. venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/__init__.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/configuration_idefics2.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/convert_idefics2_weights_to_hf.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/image_processing_idefics2.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/modeling_idefics2.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/processing_idefics2.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/transformers/models/idefics2/configuration_idefics2.py +262 -0
  22. venv/lib/python3.10/site-packages/transformers/models/idefics2/convert_idefics2_weights_to_hf.py +185 -0
  23. venv/lib/python3.10/site-packages/transformers/models/idefics2/image_processing_idefics2.py +596 -0
  24. venv/lib/python3.10/site-packages/transformers/models/idefics2/modeling_idefics2.py +1956 -0
  25. venv/lib/python3.10/site-packages/transformers/models/idefics2/processing_idefics2.py +348 -0
  26. venv/lib/python3.10/site-packages/transformers/models/llava_next/__init__.py +74 -0
  27. venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/configuration_llava_next.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/convert_llava_next_weights_to_hf.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/image_processing_llava_next.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/modeling_llava_next.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/processing_llava_next.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/llava_next/configuration_llava_next.py +141 -0
  34. venv/lib/python3.10/site-packages/transformers/models/llava_next/convert_llava_next_weights_to_hf.py +342 -0
  35. venv/lib/python3.10/site-packages/transformers/models/llava_next/image_processing_llava_next.py +608 -0
  36. venv/lib/python3.10/site-packages/transformers/models/llava_next/modeling_llava_next.py +698 -0
  37. venv/lib/python3.10/site-packages/transformers/models/llava_next/processing_llava_next.py +135 -0
  38. venv/lib/python3.10/site-packages/transformers/models/pix2struct/__init__.py +86 -0
  39. venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/configuration_pix2struct.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/convert_pix2struct_original_pytorch_to_hf.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/image_processing_pix2struct.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/modeling_pix2struct.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/processing_pix2struct.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/pix2struct/configuration_pix2struct.py +387 -0
  46. venv/lib/python3.10/site-packages/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py +155 -0
  47. venv/lib/python3.10/site-packages/transformers/models/pix2struct/image_processing_pix2struct.py +460 -0
  48. venv/lib/python3.10/site-packages/transformers/models/pix2struct/modeling_pix2struct.py +1786 -0
  49. venv/lib/python3.10/site-packages/transformers/models/pix2struct/processing_pix2struct.py +163 -0
  50. venv/lib/python3.10/site-packages/transformers/models/resnet/__init__.py +110 -0
ckpts/universal/global_step40/zero/18.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0769c5b5aaef7264ba0816d4b9a6e5f55ba8b53e523b4d30389cfcb9b0206b99
3
+ size 33555533
ckpts/universal/global_step40/zero/8.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ec5663693ba1b9b64b1244f7ba894f00c9bcb300dc96df4a899aaa8df75e7a2
3
+ size 33555612
venv/lib/python3.10/site-packages/transformers/models/blip_2/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_blip_2": [
21
+ "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "Blip2Config",
23
+ "Blip2QFormerConfig",
24
+ "Blip2VisionConfig",
25
+ ],
26
+ "processing_blip_2": ["Blip2Processor"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_blip_2"] = [
36
+ "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "Blip2Model",
38
+ "Blip2QFormerModel",
39
+ "Blip2PreTrainedModel",
40
+ "Blip2ForConditionalGeneration",
41
+ "Blip2VisionModel",
42
+ ]
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_blip_2 import (
46
+ BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
47
+ Blip2Config,
48
+ Blip2QFormerConfig,
49
+ Blip2VisionConfig,
50
+ )
51
+ from .processing_blip_2 import Blip2Processor
52
+
53
+ try:
54
+ if not is_torch_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .modeling_blip_2 import (
60
+ BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
61
+ Blip2ForConditionalGeneration,
62
+ Blip2Model,
63
+ Blip2PreTrainedModel,
64
+ Blip2QFormerModel,
65
+ Blip2VisionModel,
66
+ )
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/configuration_blip_2.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/convert_blip_2_original_to_pytorch.cpython-310.pyc ADDED
Binary file (7.97 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/modeling_blip_2.cpython-310.pyc ADDED
Binary file (55 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/processing_blip_2.cpython-310.pyc ADDED
Binary file (4.34 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/blip_2/configuration_blip_2.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BLIP-2 model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
22
+ from ...utils import logging
23
+ from ..auto import CONFIG_MAPPING
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class Blip2VisionConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`Blip2VisionModel`]. It is used to instantiate a
35
+ BLIP-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
36
+ configuration defaults will yield a similar configuration to that of the BLIP-2
37
+ [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+ Args:
43
+ hidden_size (`int`, *optional*, defaults to 1408):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ intermediate_size (`int`, *optional*, defaults to 6144):
46
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
47
+ num_hidden_layers (`int`, *optional*, defaults to 39):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 16):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ image_size (`int`, *optional*, defaults to 224):
52
+ The size (resolution) of each image.
53
+ patch_size (`int`, *optional*, defaults to 14):
54
+ The size (resolution) of each patch.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults
58
+ to 1e-5): The epsilon used by the layer normalization layers.
59
+ attention_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio for the attention probabilities.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ qkv_bias (`bool`, *optional*, defaults to `True`):
64
+ Whether to add a bias to the queries and values in the self-attention layers.
65
+
66
+ Example:
67
+
68
+ ```python
69
+ >>> from transformers import Blip2VisionConfig, Blip2VisionModel
70
+
71
+ >>> # Initializing a Blip2VisionConfig with Salesforce/blip2-opt-2.7b style configuration
72
+ >>> configuration = Blip2VisionConfig()
73
+
74
+ >>> # Initializing a Blip2VisionModel (with random weights) from the Salesforce/blip2-opt-2.7b style configuration
75
+ >>> model = Blip2VisionModel(configuration)
76
+
77
+ >>> # Accessing the model configuration
78
+ >>> configuration = model.config
79
+ ```"""
80
+
81
+ model_type = "blip_2_vision_model"
82
+
83
+ def __init__(
84
+ self,
85
+ hidden_size=1408,
86
+ intermediate_size=6144,
87
+ num_hidden_layers=39,
88
+ num_attention_heads=16,
89
+ image_size=224,
90
+ patch_size=14,
91
+ hidden_act="gelu",
92
+ layer_norm_eps=1e-6,
93
+ attention_dropout=0.0,
94
+ initializer_range=1e-10,
95
+ qkv_bias=True,
96
+ **kwargs,
97
+ ):
98
+ super().__init__(**kwargs)
99
+
100
+ self.hidden_size = hidden_size
101
+ self.intermediate_size = intermediate_size
102
+ self.num_hidden_layers = num_hidden_layers
103
+ self.num_attention_heads = num_attention_heads
104
+ self.patch_size = patch_size
105
+ self.image_size = image_size
106
+ self.initializer_range = initializer_range
107
+ self.attention_dropout = attention_dropout
108
+ self.layer_norm_eps = layer_norm_eps
109
+ self.hidden_act = hidden_act
110
+ self.qkv_bias = qkv_bias
111
+
112
+ @classmethod
113
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
114
+ cls._set_token_in_kwargs(kwargs)
115
+
116
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
117
+
118
+ # get the vision config dict if we are loading from Blip2Config
119
+ if config_dict.get("model_type") == "blip-2":
120
+ config_dict = config_dict["vision_config"]
121
+
122
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
123
+ logger.warning(
124
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
125
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
126
+ )
127
+
128
+ return cls.from_dict(config_dict, **kwargs)
129
+
130
+
131
+ class Blip2QFormerConfig(PretrainedConfig):
132
+ r"""
133
+ This is the configuration class to store the configuration of a [`Blip2QFormerModel`]. It is used to instantiate a
134
+ BLIP-2 Querying Transformer (Q-Former) model according to the specified arguments, defining the model architecture.
135
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-2
136
+ [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects
137
+ inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from
138
+ [`PretrainedConfig`] for more information.
139
+
140
+ Note that [`Blip2QFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.
141
+
142
+ Args:
143
+ vocab_size (`int`, *optional*, defaults to 30522):
144
+ Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by
145
+ the `inputs_ids` passed when calling the model.
146
+ hidden_size (`int`, *optional*, defaults to 768):
147
+ Dimensionality of the encoder layers and the pooler layer.
148
+ num_hidden_layers (`int`, *optional*, defaults to 12):
149
+ Number of hidden layers in the Transformer encoder.
150
+ num_attention_heads (`int`, *optional*, defaults to 12):
151
+ Number of attention heads for each attention layer in the Transformer encoder.
152
+ intermediate_size (`int`, *optional*, defaults to 3072):
153
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
154
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
155
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
156
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
157
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
158
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
159
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
160
+ The dropout ratio for the attention probabilities.
161
+ max_position_embeddings (`int`, *optional*, defaults to 512):
162
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
163
+ just in case (e.g., 512 or 1024 or 2048).
164
+ initializer_range (`float`, *optional*, defaults to 0.02):
165
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
166
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
167
+ The epsilon used by the layer normalization layers.
168
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
169
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
170
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
171
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
172
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
173
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
174
+ cross_attention_frequency (`int`, *optional*, defaults to 2):
175
+ The frequency of adding cross-attention to the Transformer layers.
176
+ encoder_hidden_size (`int`, *optional*, defaults to 1408):
177
+ The hidden size of the hidden states for cross-attention.
178
+
179
+ Examples:
180
+
181
+ ```python
182
+ >>> from transformers import Blip2QFormerConfig, Blip2QFormerModel
183
+
184
+ >>> # Initializing a BLIP-2 Salesforce/blip2-opt-2.7b style configuration
185
+ >>> configuration = Blip2QFormerConfig()
186
+
187
+ >>> # Initializing a model (with random weights) from the Salesforce/blip2-opt-2.7b style configuration
188
+ >>> model = Blip2QFormerModel(configuration)
189
+ >>> # Accessing the model configuration
190
+ >>> configuration = model.config
191
+ ```"""
192
+
193
+ model_type = "blip_2_qformer"
194
+
195
+ def __init__(
196
+ self,
197
+ vocab_size=30522,
198
+ hidden_size=768,
199
+ num_hidden_layers=12,
200
+ num_attention_heads=12,
201
+ intermediate_size=3072,
202
+ hidden_act="gelu",
203
+ hidden_dropout_prob=0.1,
204
+ attention_probs_dropout_prob=0.1,
205
+ max_position_embeddings=512,
206
+ initializer_range=0.02,
207
+ layer_norm_eps=1e-12,
208
+ pad_token_id=0,
209
+ position_embedding_type="absolute",
210
+ cross_attention_frequency=2,
211
+ encoder_hidden_size=1408,
212
+ **kwargs,
213
+ ):
214
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
215
+
216
+ self.vocab_size = vocab_size
217
+ self.hidden_size = hidden_size
218
+ self.num_hidden_layers = num_hidden_layers
219
+ self.num_attention_heads = num_attention_heads
220
+ self.hidden_act = hidden_act
221
+ self.intermediate_size = intermediate_size
222
+ self.hidden_dropout_prob = hidden_dropout_prob
223
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
224
+ self.max_position_embeddings = max_position_embeddings
225
+ self.initializer_range = initializer_range
226
+ self.layer_norm_eps = layer_norm_eps
227
+ self.position_embedding_type = position_embedding_type
228
+ self.cross_attention_frequency = cross_attention_frequency
229
+ self.encoder_hidden_size = encoder_hidden_size
230
+
231
+ @classmethod
232
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
233
+ cls._set_token_in_kwargs(kwargs)
234
+
235
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
236
+
237
+ # get the qformer config dict if we are loading from Blip2Config
238
+ if config_dict.get("model_type") == "blip-2":
239
+ config_dict = config_dict["qformer_config"]
240
+
241
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
242
+ logger.warning(
243
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
244
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
245
+ )
246
+
247
+ return cls.from_dict(config_dict, **kwargs)
248
+
249
+
250
+ class Blip2Config(PretrainedConfig):
251
+ r"""
252
+ [`Blip2Config`] is the configuration class to store the configuration of a [`Blip2ForConditionalGeneration`]. It is
253
+ used to instantiate a BLIP-2 model according to the specified arguments, defining the vision model, Q-Former model
254
+ and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to
255
+ that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture.
256
+
257
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
258
+ documentation from [`PretrainedConfig`] for more information.
259
+
260
+ Args:
261
+ vision_config (`dict`, *optional*):
262
+ Dictionary of configuration options used to initialize [`Blip2VisionConfig`].
263
+ qformer_config (`dict`, *optional*):
264
+ Dictionary of configuration options used to initialize [`Blip2QFormerConfig`].
265
+ text_config (`dict`, *optional*):
266
+ Dictionary of configuration options used to initialize any [`PretrainedConfig`].
267
+ num_query_tokens (`int`, *optional*, defaults to 32):
268
+ The number of query tokens passed through the Transformer.
269
+
270
+ kwargs (*optional*):
271
+ Dictionary of keyword arguments.
272
+
273
+ Example:
274
+
275
+ ```python
276
+ >>> from transformers import (
277
+ ... Blip2VisionConfig,
278
+ ... Blip2QFormerConfig,
279
+ ... OPTConfig,
280
+ ... Blip2Config,
281
+ ... Blip2ForConditionalGeneration,
282
+ ... )
283
+
284
+ >>> # Initializing a Blip2Config with Salesforce/blip2-opt-2.7b style configuration
285
+ >>> configuration = Blip2Config()
286
+
287
+ >>> # Initializing a Blip2ForConditionalGeneration (with random weights) from the Salesforce/blip2-opt-2.7b style configuration
288
+ >>> model = Blip2ForConditionalGeneration(configuration)
289
+
290
+ >>> # Accessing the model configuration
291
+ >>> configuration = model.config
292
+
293
+ >>> # We can also initialize a Blip2Config from a Blip2VisionConfig, Blip2QFormerConfig and any PretrainedConfig
294
+
295
+ >>> # Initializing BLIP-2 vision, BLIP-2 Q-Former and language model configurations
296
+ >>> vision_config = Blip2VisionConfig()
297
+ >>> qformer_config = Blip2QFormerConfig()
298
+ >>> text_config = OPTConfig()
299
+
300
+ >>> config = Blip2Config.from_text_vision_configs(vision_config, qformer_config, text_config)
301
+ ```"""
302
+
303
+ model_type = "blip-2"
304
+
305
+ def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, **kwargs):
306
+ super().__init__(**kwargs)
307
+
308
+ if vision_config is None:
309
+ vision_config = {}
310
+ logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.")
311
+
312
+ if qformer_config is None:
313
+ qformer_config = {}
314
+ logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.")
315
+
316
+ if text_config is None:
317
+ text_config = {}
318
+ logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
319
+
320
+ self.vision_config = Blip2VisionConfig(**vision_config)
321
+ self.qformer_config = Blip2QFormerConfig(**qformer_config)
322
+ text_model_type = text_config["model_type"] if "model_type" in text_config else "opt"
323
+ self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
324
+
325
+ self.tie_word_embeddings = self.text_config.tie_word_embeddings
326
+ self.is_encoder_decoder = self.text_config.is_encoder_decoder
327
+
328
+ self.num_query_tokens = num_query_tokens
329
+ self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
330
+ self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
331
+ self.initializer_factor = 1.0
332
+ self.initializer_range = 0.02
333
+
334
+ @classmethod
335
+ def from_vision_qformer_text_configs(
336
+ cls,
337
+ vision_config: Blip2VisionConfig,
338
+ qformer_config: Blip2QFormerConfig,
339
+ text_config: PretrainedConfig,
340
+ **kwargs,
341
+ ):
342
+ r"""
343
+ Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model
344
+ configurations.
345
+
346
+ Returns:
347
+ [`Blip2Config`]: An instance of a configuration object
348
+ """
349
+
350
+ return cls(
351
+ vision_config=vision_config.to_dict(),
352
+ qformer_config=qformer_config.to_dict(),
353
+ text_config=text_config.to_dict(),
354
+ **kwargs,
355
+ )
venv/lib/python3.10/site-packages/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Convert BLIP-2 checkpoints from the original repository.
17
+
18
+ URL: https://github.com/salesforce/LAVIS/tree/main/projects/blip2
19
+ """
20
+
21
+ import argparse
22
+
23
+ import requests
24
+ import torch
25
+
26
+ # pip3 install salesforce-lavis
27
+ # I'm actually installing a slightly modified version: pip3 install -U git+https://github.com/nielsrogge/LAVIS.git@blip2_float32
28
+ # to make sure we can compare both original and HF implementation in float32
29
+ from lavis.models import load_model_and_preprocess
30
+ from PIL import Image
31
+
32
+ from transformers import (
33
+ AutoTokenizer,
34
+ Blip2Config,
35
+ Blip2ForConditionalGeneration,
36
+ Blip2Processor,
37
+ Blip2VisionConfig,
38
+ BlipImageProcessor,
39
+ OPTConfig,
40
+ T5Config,
41
+ set_seed,
42
+ )
43
+ from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
44
+
45
+
46
+ def load_demo_image():
47
+ url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
48
+ image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
49
+
50
+ return image
51
+
52
+
53
+ # here we list all keys to be renamed (original name on the left, our name on the right)
54
+ def create_rename_keys(config):
55
+ rename_keys = []
56
+ # fmt: off
57
+
58
+ # vision encoder
59
+ rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding"))
60
+ rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding"))
61
+ rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight"))
62
+ rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias"))
63
+ rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight"))
64
+ rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias"))
65
+
66
+ for i in range(config.vision_config.num_hidden_layers):
67
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight"))
68
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias"))
69
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight"))
70
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias"))
71
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight"))
72
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",))
73
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias"))
74
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight"))
75
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias"))
76
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight"))
77
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias"))
78
+
79
+ # QFormer
80
+ rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight"))
81
+ rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias"))
82
+
83
+ # fmt: on
84
+ return rename_keys
85
+
86
+
87
+ def rename_key(dct, old, new):
88
+ val = dct.pop(old)
89
+ dct[new] = val
90
+
91
+
92
+ def read_in_q_v_bias(state_dict, config):
93
+ for i in range(config.vision_config.num_hidden_layers):
94
+ # read in original q and v biases
95
+ q_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias")
96
+ v_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias")
97
+
98
+ # next, set bias in the state dict
99
+ qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
100
+ state_dict[f"vision_model.encoder.layers.{i}.self_attn.qkv.bias"] = qkv_bias
101
+
102
+
103
+ def get_blip2_config(model_name, eos_token_id):
104
+ image_size = 364 if "coco" in model_name else 224
105
+ vision_config = Blip2VisionConfig(image_size=image_size).to_dict()
106
+
107
+ # make sure the models have proper bos_token_id and eos_token_id set (important for generation)
108
+ # seems like flan-T5 models don't have bos_token_id properly set?
109
+ if "opt-2.7b" in model_name:
110
+ text_config = OPTConfig.from_pretrained("facebook/opt-2.7b", eos_token_id=eos_token_id).to_dict()
111
+ elif "opt-6.7b" in model_name:
112
+ text_config = OPTConfig.from_pretrained("facebook/opt-6.7b", eos_token_id=eos_token_id).to_dict()
113
+ elif "t5-xl" in model_name:
114
+ text_config = T5Config.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1).to_dict()
115
+ elif "t5-xxl" in model_name:
116
+ text_config = T5Config.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1).to_dict()
117
+
118
+ config = Blip2Config(vision_config=vision_config, text_config=text_config)
119
+
120
+ return config, image_size
121
+
122
+
123
+ @torch.no_grad()
124
+ def convert_blip2_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
125
+ """
126
+ Copy/paste/tweak model's weights to Transformers design.
127
+ """
128
+ tokenizer = (
129
+ AutoTokenizer.from_pretrained("facebook/opt-2.7b")
130
+ if "opt" in model_name
131
+ else AutoTokenizer.from_pretrained("google/flan-t5-xl")
132
+ )
133
+ eos_token_id = tokenizer("\n", add_special_tokens=False).input_ids[0]
134
+ config, image_size = get_blip2_config(model_name, eos_token_id=eos_token_id)
135
+
136
+ hf_model = Blip2ForConditionalGeneration(config).eval()
137
+
138
+ model_name_to_original = {
139
+ "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
140
+ "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
141
+ "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
142
+ "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
143
+ "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
144
+ "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
145
+ "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
146
+ }
147
+
148
+ name, type = model_name_to_original[model_name]
149
+
150
+ # note: this script is tested on 2 GPUs, as models are compared in float32,
151
+ # which requires quite some memory. Hence loading both on a
152
+ # separate device is the easiest to compare
153
+ hf_model_device = "cuda:0" if torch.cuda.is_available() else "cpu"
154
+ lavis_device = "cuda:1" if torch.cuda.is_available() else "cpu"
155
+
156
+ # load original model
157
+ print("Loading original model...")
158
+ original_model, vis_processors, _ = load_model_and_preprocess(
159
+ name=name, model_type=type, is_eval=True, device=lavis_device
160
+ )
161
+ original_model.eval()
162
+ print("Done!")
163
+
164
+ # update state dict keys
165
+ state_dict = original_model.state_dict()
166
+ rename_keys = create_rename_keys(config)
167
+ for src, dest in rename_keys:
168
+ rename_key(state_dict, src, dest)
169
+
170
+ # some keys can be renamed efficiently
171
+ for key, val in state_dict.copy().items():
172
+ val = state_dict.pop(key)
173
+ if key.startswith("Qformer.bert"):
174
+ key = key.replace("Qformer.bert", "qformer")
175
+ if "attention.self" in key:
176
+ key = key.replace("self", "attention")
177
+ if "opt_proj" in key:
178
+ key = key.replace("opt_proj", "language_projection")
179
+ if "t5_proj" in key:
180
+ key = key.replace("t5_proj", "language_projection")
181
+ if key.startswith("opt"):
182
+ key = key.replace("opt", "language")
183
+ if key.startswith("t5"):
184
+ key = key.replace("t5", "language")
185
+ state_dict[key] = val
186
+
187
+ # read in qv biases
188
+ read_in_q_v_bias(state_dict, config)
189
+
190
+ missing_keys, unexpected_keys = hf_model.load_state_dict(state_dict, strict=False)
191
+ assert len(missing_keys) == 0
192
+ assert unexpected_keys == ["qformer.embeddings.position_ids"]
193
+
194
+ image = load_demo_image()
195
+ original_pixel_values = vis_processors["eval"](image).unsqueeze(0).to(lavis_device)
196
+ input_ids = tokenizer(["\n"], return_tensors="pt").input_ids.to(hf_model_device)
197
+
198
+ # create processor
199
+ image_processor = BlipImageProcessor(
200
+ size={"height": image_size, "width": image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD
201
+ )
202
+ processor = Blip2Processor(image_processor=image_processor, tokenizer=tokenizer)
203
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(hf_model_device)
204
+
205
+ # make sure processor creates exact same pixel values
206
+ assert torch.allclose(pixel_values, original_pixel_values.to(pixel_values.device))
207
+
208
+ original_model.to(lavis_device)
209
+ hf_model.to(hf_model_device)
210
+ with torch.no_grad():
211
+ if "opt" in model_name:
212
+ original_logits = original_model({"image": original_pixel_values, "text_input": [""]}).logits
213
+ logits = hf_model(pixel_values, input_ids).logits
214
+ else:
215
+ original_logits = original_model(
216
+ {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]}
217
+ ).logits
218
+ labels = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100)
219
+ logits = hf_model(pixel_values, input_ids, labels=labels).logits
220
+
221
+ assert original_logits.shape == logits.shape
222
+ print("First values of original logits:", original_logits[0, :3, :3])
223
+ print("First values of HF logits:", logits[0, :3, :3])
224
+
225
+ # assert values
226
+ assert torch.allclose(original_logits.to(logits.device), logits, atol=1e-4)
227
+ print("Looks ok!")
228
+
229
+ print("Generating a caption...")
230
+ prompt = "Question: what object is in this image? Answer:"
231
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(hf_model_device)
232
+
233
+ set_seed(42)
234
+
235
+ original_outputs = original_model.generate(
236
+ {"image": original_pixel_values, "prompt": prompt}, use_nucleus_sampling=True
237
+ )
238
+ outputs = hf_model.generate(
239
+ pixel_values,
240
+ input_ids,
241
+ do_sample=True,
242
+ num_beams=5,
243
+ max_length=30,
244
+ min_length=1,
245
+ top_p=0.9,
246
+ repetition_penalty=1.0,
247
+ length_penalty=1.0,
248
+ temperature=1,
249
+ )
250
+ output_text = processor.batch_decode(outputs, skip_special_tokens=True)
251
+ output_text = [text.strip() for text in output_text]
252
+ print("Original generation:", original_outputs)
253
+ print("HF generation:", output_text)
254
+
255
+ if pytorch_dump_folder_path is not None:
256
+ processor.save_pretrained(pytorch_dump_folder_path)
257
+ hf_model.save_pretrained(pytorch_dump_folder_path)
258
+
259
+ if push_to_hub:
260
+ processor.push_to_hub(f"nielsr/{model_name}")
261
+ hf_model.push_to_hub(f"nielsr/{model_name}")
262
+
263
+
264
+ if __name__ == "__main__":
265
+ parser = argparse.ArgumentParser()
266
+ choices = [
267
+ "blip2-opt-2.7b",
268
+ "blip2-opt-6.7b",
269
+ "blip2-opt-2.7b-coco",
270
+ "blip2-opt-6.7b-coco",
271
+ "blip2-flan-t5-xl",
272
+ "blip2-flan-t5-xl-coco",
273
+ "blip2-flan-t5-xxl",
274
+ ]
275
+ parser.add_argument(
276
+ "--model_name",
277
+ default="blip2-opt-2.7b",
278
+ choices=choices,
279
+ type=str,
280
+ help="Path to hf config.json of model to convert",
281
+ )
282
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
283
+ parser.add_argument(
284
+ "--push_to_hub",
285
+ action="store_true",
286
+ help="Whether to push the model and processor to the hub after converting",
287
+ )
288
+
289
+ args = parser.parse_args()
290
+
291
+ convert_blip2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py ADDED
@@ -0,0 +1,1853 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Salesforce Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BLIP-2 model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Any, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ BaseModelOutputWithPooling,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
35
+ from ...utils import (
36
+ ModelOutput,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM
43
+ from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ _CHECKPOINT_FOR_DOC = "Salesforce/blip2-opt-2.7b"
49
+
50
+
51
+ from ..deprecated._archive_maps import BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
52
+
53
+
54
+ @dataclass
55
+ class Blip2ForConditionalGenerationModelOutput(ModelOutput):
56
+ """
57
+ Class defining the outputs of [`Blip2ForConditionalGeneration`].
58
+
59
+ Args:
60
+ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
61
+ Language modeling loss from the language model.
62
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
63
+ Prediction scores of the language modeling head of the language model.
64
+ vision_outputs (`BaseModelOutputWithPooling`):
65
+ Outputs of the vision encoder.
66
+ qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
67
+ Outputs of the Q-Former (Querying Transformer).
68
+ language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
69
+ Outputs of the language model.
70
+ """
71
+
72
+ loss: Optional[Tuple[torch.FloatTensor]] = None
73
+ logits: Optional[Tuple[torch.FloatTensor]] = None
74
+ vision_outputs: Optional[torch.FloatTensor] = None
75
+ qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None
76
+ language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
77
+
78
+ def to_tuple(self) -> Tuple[Any]:
79
+ return tuple(
80
+ self[k]
81
+ if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"]
82
+ else getattr(self, k).to_tuple()
83
+ for k in self.keys()
84
+ )
85
+
86
+
87
+ # Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2
88
+ class Blip2VisionEmbeddings(nn.Module):
89
+ def __init__(self, config: Blip2VisionConfig):
90
+ super().__init__()
91
+ self.config = config
92
+ self.embed_dim = config.hidden_size
93
+ self.image_size = config.image_size
94
+ self.patch_size = config.patch_size
95
+
96
+ self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
97
+
98
+ self.patch_embedding = nn.Conv2d(
99
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
100
+ )
101
+
102
+ self.num_patches = (self.image_size // self.patch_size) ** 2
103
+ self.num_positions = self.num_patches + 1
104
+
105
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
106
+
107
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
108
+ batch_size = pixel_values.shape[0]
109
+ target_dtype = self.patch_embedding.weight.dtype
110
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
111
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
112
+
113
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
114
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
115
+ embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
116
+ return embeddings
117
+
118
+
119
+ class Blip2Attention(nn.Module):
120
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
121
+
122
+ def __init__(self, config):
123
+ super().__init__()
124
+ self.config = config
125
+ self.embed_dim = config.hidden_size
126
+ self.num_heads = config.num_attention_heads
127
+ self.head_dim = self.embed_dim // self.num_heads
128
+ if self.head_dim * self.num_heads != self.embed_dim:
129
+ raise ValueError(
130
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
131
+ f" {self.num_heads})."
132
+ )
133
+ self.scale = self.head_dim**-0.5
134
+ self.dropout = nn.Dropout(config.attention_dropout)
135
+
136
+ # small tweak here compared to CLIP, no bias here
137
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
138
+
139
+ if config.qkv_bias:
140
+ q_bias = nn.Parameter(torch.zeros(self.embed_dim))
141
+ v_bias = nn.Parameter(torch.zeros(self.embed_dim))
142
+ else:
143
+ q_bias = None
144
+ v_bias = None
145
+
146
+ if q_bias is not None:
147
+ qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
148
+ self.qkv.bias = nn.Parameter(qkv_bias)
149
+
150
+ self.projection = nn.Linear(self.embed_dim, self.embed_dim)
151
+
152
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
153
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
154
+
155
+ def forward(
156
+ self,
157
+ hidden_states: torch.Tensor,
158
+ head_mask: Optional[torch.Tensor] = None,
159
+ output_attentions: Optional[bool] = False,
160
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
161
+ """Input shape: Batch x Time x Channel"""
162
+
163
+ bsz, tgt_len, embed_dim = hidden_states.size()
164
+
165
+ mixed_qkv = self.qkv(hidden_states)
166
+
167
+ mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
168
+ 2, 0, 3, 1, 4
169
+ )
170
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
171
+
172
+ # Take the dot product between "query" and "key" to get the raw attention scores.
173
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
174
+
175
+ attention_scores = attention_scores * self.scale
176
+
177
+ # Normalize the attention scores to probabilities.
178
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
179
+
180
+ # This is actually dropping out entire tokens to attend to, which might
181
+ # seem a bit unusual, but is taken from the original Transformer paper.
182
+ attention_probs = self.dropout(attention_probs)
183
+
184
+ # Mask heads if we want to
185
+ if head_mask is not None:
186
+ attention_probs = attention_probs * head_mask
187
+
188
+ context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
189
+
190
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
191
+ context_layer = context_layer.reshape(new_context_layer_shape)
192
+
193
+ output = self.projection(context_layer)
194
+
195
+ outputs = (output, attention_probs) if output_attentions else (output, None)
196
+
197
+ return outputs
198
+
199
+
200
+ # Copied from transformers.models.blip.modeling_blip.BlipMLP
201
+ class Blip2MLP(nn.Module):
202
+ def __init__(self, config):
203
+ super().__init__()
204
+ self.config = config
205
+ self.activation_fn = ACT2FN[config.hidden_act]
206
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
207
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
208
+
209
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
210
+ hidden_states = self.fc1(hidden_states)
211
+ hidden_states = self.activation_fn(hidden_states)
212
+ hidden_states = self.fc2(hidden_states)
213
+ return hidden_states
214
+
215
+
216
+ # Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->Blip2
217
+ class Blip2EncoderLayer(nn.Module):
218
+ def __init__(self, config: Blip2Config):
219
+ super().__init__()
220
+ self.embed_dim = config.hidden_size
221
+ self.self_attn = Blip2Attention(config)
222
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
223
+ self.mlp = Blip2MLP(config)
224
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
225
+
226
+ def forward(
227
+ self,
228
+ hidden_states: torch.Tensor,
229
+ attention_mask: torch.Tensor,
230
+ output_attentions: Optional[bool] = False,
231
+ ) -> Tuple[torch.FloatTensor]:
232
+ """
233
+ Args:
234
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
235
+ attention_mask (`torch.FloatTensor`): attention mask of size
236
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
237
+ `(config.encoder_attention_heads,)`.
238
+ output_attentions (`bool`, *optional*):
239
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
240
+ returned tensors for more detail.
241
+ """
242
+ residual = hidden_states
243
+
244
+ hidden_states = self.layer_norm1(hidden_states)
245
+ hidden_states, attn_weights = self.self_attn(
246
+ hidden_states=hidden_states,
247
+ head_mask=attention_mask,
248
+ output_attentions=output_attentions,
249
+ )
250
+ hidden_states = hidden_states + residual
251
+ residual = hidden_states
252
+ hidden_states = self.layer_norm2(hidden_states)
253
+ hidden_states = self.mlp(hidden_states)
254
+
255
+ hidden_states = hidden_states + residual
256
+
257
+ outputs = (hidden_states,)
258
+
259
+ if output_attentions:
260
+ outputs += (attn_weights,)
261
+
262
+ return outputs
263
+
264
+
265
+ class Blip2PreTrainedModel(PreTrainedModel):
266
+ """
267
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
268
+ models.
269
+ """
270
+
271
+ config_class = Blip2Config
272
+ base_model_prefix = "blip"
273
+ supports_gradient_checkpointing = True
274
+ _no_split_modules = ["Blip2Attention", "T5Block", "OPTDecoderLayer"]
275
+ _skip_keys_device_placement = "past_key_values"
276
+ _keep_in_fp32_modules = ["wo"]
277
+
278
+ def _init_weights(self, module):
279
+ """Initialize the weights"""
280
+ factor = self.config.initializer_range
281
+ if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
282
+ module.weight.data.normal_(mean=0.0, std=factor)
283
+ if hasattr(module, "bias") and module.bias is not None:
284
+ module.bias.data.zero_()
285
+
286
+ if isinstance(module, Blip2VisionEmbeddings):
287
+ if hasattr(self.config, "vision_config"):
288
+ factor = self.config.vision_config.initializer_range
289
+ nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
290
+ nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
291
+
292
+ elif isinstance(module, nn.LayerNorm):
293
+ module.bias.data.zero_()
294
+ module.weight.data.fill_(1.0)
295
+ elif isinstance(module, nn.Linear) and module.bias is not None:
296
+ module.bias.data.zero_()
297
+
298
+
299
+ BLIP_2_START_DOCSTRING = r"""
300
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
301
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
302
+ etc.)
303
+
304
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
305
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
306
+ and behavior.
307
+
308
+ Parameters:
309
+ config ([`Blip2Config`]): Model configuration class with all the parameters of the model.
310
+ Initializing with a config file does not load the weights associated with the model, only the
311
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
312
+ """
313
+
314
+ BLIP_2_VISION_INPUTS_DOCSTRING = r"""
315
+ Args:
316
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
317
+ Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for
318
+ details.
319
+ output_attentions (`bool`, *optional*):
320
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
321
+ tensors for more detail.
322
+ output_hidden_states (`bool`, *optional*):
323
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
324
+ more detail.
325
+ return_dict (`bool`, *optional*):
326
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
327
+ """
328
+
329
+ BLIP_2_TEXT_INPUTS_DOCSTRING = r"""
330
+ Args:
331
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
332
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
333
+ it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
334
+ [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
335
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
336
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
337
+ - 1 for tokens that are **not masked**,
338
+ - 0 for tokens that are **masked**.
339
+ [What are attention masks?](../glossary#attention-mask)
340
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
341
+ Indices of decoder input sequence tokens in the vocabulary.
342
+
343
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
344
+ [`PreTrainedTokenizer.__call__`] for details.
345
+
346
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
347
+
348
+ T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
349
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
350
+
351
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
352
+ Training](./t5#training).
353
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
354
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
355
+ be used by default.
356
+ output_attentions (`bool`, *optional*):
357
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
358
+ tensors for more detail.
359
+ output_hidden_states (`bool`, *optional*):
360
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
361
+ more detail.
362
+ return_dict (`bool`, *optional*):
363
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
364
+ """
365
+
366
+ BLIP_2_INPUTS_DOCSTRING = r"""
367
+ Args:
368
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
369
+ Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for
370
+ details.
371
+
372
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
373
+ Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
374
+ provided to serve as text prompt, which the language model can continue.
375
+
376
+ Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details.
377
+
378
+ [What are input IDs?](../glossary#input-ids)
379
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
380
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
381
+
382
+ - 1 for tokens that are **not masked**,
383
+ - 0 for tokens that are **masked**.
384
+
385
+ [What are attention masks?](../glossary#attention-mask)
386
+
387
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
388
+ Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an
389
+ encoder-decoder language model (like T5) is used.
390
+
391
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
392
+ [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)
393
+
394
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
395
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
396
+ be used by default.
397
+
398
+ Only relevant in case an encoder-decoder language model (like T5) is used.
399
+
400
+ output_attentions (`bool`, *optional*):
401
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
402
+ tensors for more detail.
403
+ output_hidden_states (`bool`, *optional*):
404
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
405
+ more detail.
406
+ return_dict (`bool`, *optional*):
407
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
408
+ """
409
+
410
+
411
+ # Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->Blip2
412
+ class Blip2Encoder(nn.Module):
413
+ """
414
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
415
+ [`Blip2EncoderLayer`].
416
+
417
+ Args:
418
+ config (`Blip2Config`):
419
+ The corresponding vision configuration for the `Blip2Encoder`.
420
+ """
421
+
422
+ def __init__(self, config: Blip2Config):
423
+ super().__init__()
424
+ self.config = config
425
+ self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
426
+ self.gradient_checkpointing = False
427
+
428
+ def forward(
429
+ self,
430
+ inputs_embeds,
431
+ attention_mask: Optional[torch.Tensor] = None,
432
+ output_attentions: Optional[bool] = None,
433
+ output_hidden_states: Optional[bool] = None,
434
+ return_dict: Optional[bool] = None,
435
+ ) -> Union[Tuple, BaseModelOutput]:
436
+ r"""
437
+ Args:
438
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
439
+ Embedded representation of the inputs. Should be float, not int tokens.
440
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
441
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
442
+
443
+ - 1 for tokens that are **not masked**,
444
+ - 0 for tokens that are **masked**.
445
+
446
+ [What are attention masks?](../glossary#attention-mask)
447
+ output_attentions (`bool`, *optional*):
448
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
449
+ returned tensors for more detail.
450
+ output_hidden_states (`bool`, *optional*):
451
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
452
+ for more detail.
453
+ return_dict (`bool`, *optional*):
454
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
455
+ """
456
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
457
+ output_hidden_states = (
458
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
459
+ )
460
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
461
+
462
+ encoder_states = () if output_hidden_states else None
463
+ all_attentions = () if output_attentions else None
464
+
465
+ hidden_states = inputs_embeds
466
+ for idx, encoder_layer in enumerate(self.layers):
467
+ if output_hidden_states:
468
+ encoder_states = encoder_states + (hidden_states,)
469
+ if self.gradient_checkpointing and self.training:
470
+ layer_outputs = self._gradient_checkpointing_func(
471
+ encoder_layer.__call__,
472
+ hidden_states,
473
+ attention_mask,
474
+ output_attentions,
475
+ )
476
+ else:
477
+ layer_outputs = encoder_layer(
478
+ hidden_states,
479
+ attention_mask,
480
+ output_attentions=output_attentions,
481
+ )
482
+
483
+ hidden_states = layer_outputs[0]
484
+
485
+ if output_attentions:
486
+ all_attentions = all_attentions + (layer_outputs[1],)
487
+
488
+ if output_hidden_states:
489
+ encoder_states = encoder_states + (hidden_states,)
490
+
491
+ if not return_dict:
492
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
493
+ return BaseModelOutput(
494
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
495
+ )
496
+
497
+
498
+ # Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2
499
+ class Blip2VisionModel(Blip2PreTrainedModel):
500
+ main_input_name = "pixel_values"
501
+ config_class = Blip2VisionConfig
502
+
503
+ def __init__(self, config: Blip2VisionConfig):
504
+ super().__init__(config)
505
+ self.config = config
506
+ embed_dim = config.hidden_size
507
+
508
+ self.embeddings = Blip2VisionEmbeddings(config)
509
+ self.encoder = Blip2Encoder(config)
510
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
511
+
512
+ self.post_init()
513
+
514
+ @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING)
515
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig)
516
+ def forward(
517
+ self,
518
+ pixel_values: Optional[torch.FloatTensor] = None,
519
+ output_attentions: Optional[bool] = None,
520
+ output_hidden_states: Optional[bool] = None,
521
+ return_dict: Optional[bool] = None,
522
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
523
+ r"""
524
+ Returns:
525
+
526
+ """
527
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
528
+ output_hidden_states = (
529
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
530
+ )
531
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
532
+
533
+ if pixel_values is None:
534
+ raise ValueError("You have to specify pixel_values")
535
+
536
+ hidden_states = self.embeddings(pixel_values)
537
+
538
+ encoder_outputs = self.encoder(
539
+ inputs_embeds=hidden_states,
540
+ output_attentions=output_attentions,
541
+ output_hidden_states=output_hidden_states,
542
+ return_dict=return_dict,
543
+ )
544
+
545
+ last_hidden_state = encoder_outputs[0]
546
+ last_hidden_state = self.post_layernorm(last_hidden_state)
547
+
548
+ pooled_output = last_hidden_state[:, 0, :]
549
+ pooled_output = self.post_layernorm(pooled_output)
550
+
551
+ if not return_dict:
552
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
553
+
554
+ return BaseModelOutputWithPooling(
555
+ last_hidden_state=last_hidden_state,
556
+ pooler_output=pooled_output,
557
+ hidden_states=encoder_outputs.hidden_states,
558
+ attentions=encoder_outputs.attentions,
559
+ )
560
+
561
+ def get_input_embeddings(self):
562
+ return self.embeddings
563
+
564
+
565
+ class Blip2QFormerMultiHeadAttention(nn.Module):
566
+ def __init__(self, config, is_cross_attention=False):
567
+ super().__init__()
568
+ self.config = config
569
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
570
+ raise ValueError(
571
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
572
+ % (config.hidden_size, config.num_attention_heads)
573
+ )
574
+
575
+ self.num_attention_heads = config.num_attention_heads
576
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
577
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
578
+
579
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
580
+ if is_cross_attention:
581
+ self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
582
+ self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
583
+ else:
584
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
585
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
586
+
587
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
588
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
589
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
590
+ self.max_position_embeddings = config.max_position_embeddings
591
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
592
+ self.save_attention = False
593
+
594
+ def save_attn_gradients(self, attn_gradients):
595
+ self.attn_gradients = attn_gradients
596
+
597
+ def get_attn_gradients(self):
598
+ return self.attn_gradients
599
+
600
+ def save_attention_map(self, attention_map):
601
+ self.attention_map = attention_map
602
+
603
+ def get_attention_map(self):
604
+ return self.attention_map
605
+
606
+ def transpose_for_scores(self, x):
607
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
608
+ x = x.view(*new_x_shape)
609
+ return x.permute(0, 2, 1, 3)
610
+
611
+ def forward(
612
+ self,
613
+ hidden_states,
614
+ attention_mask=None,
615
+ head_mask=None,
616
+ encoder_hidden_states=None,
617
+ encoder_attention_mask=None,
618
+ past_key_value=None,
619
+ output_attentions=False,
620
+ ):
621
+ # If this is instantiated as a cross-attention module, the keys
622
+ # and values come from an encoder; the attention mask needs to be
623
+ # such that the encoder's padding tokens are not attended to.
624
+ is_cross_attention = encoder_hidden_states is not None
625
+
626
+ if is_cross_attention:
627
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
628
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
629
+ attention_mask = encoder_attention_mask
630
+ elif past_key_value is not None:
631
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
632
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
633
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
634
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
635
+ else:
636
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
637
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
638
+
639
+ mixed_query_layer = self.query(hidden_states)
640
+
641
+ query_layer = self.transpose_for_scores(mixed_query_layer)
642
+
643
+ past_key_value = (key_layer, value_layer)
644
+
645
+ # Take the dot product between "query" and "key" to get the raw attention scores.
646
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
647
+
648
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
649
+ seq_length = hidden_states.size()[1]
650
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
651
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
652
+ distance = position_ids_l - position_ids_r
653
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
654
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
655
+
656
+ if self.position_embedding_type == "relative_key":
657
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
658
+ attention_scores = attention_scores + relative_position_scores
659
+ elif self.position_embedding_type == "relative_key_query":
660
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
661
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
662
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
663
+
664
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
665
+
666
+ if attention_mask is not None:
667
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
668
+ attention_scores = attention_scores + attention_mask
669
+
670
+ # Normalize the attention scores to probabilities.
671
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
672
+
673
+ if is_cross_attention and self.save_attention:
674
+ self.save_attention_map(attention_probs)
675
+ attention_probs.register_hook(self.save_attn_gradients)
676
+
677
+ # This is actually dropping out entire tokens to attend to, which might
678
+ # seem a bit unusual, but is taken from the original Transformer paper.
679
+ attention_probs_dropped = self.dropout(attention_probs)
680
+
681
+ # Mask heads if we want to
682
+ if head_mask is not None:
683
+ attention_probs_dropped = attention_probs_dropped * head_mask
684
+
685
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
686
+
687
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
688
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
689
+ context_layer = context_layer.view(*new_context_layer_shape)
690
+
691
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
692
+
693
+ outputs = outputs + (past_key_value,)
694
+ return outputs
695
+
696
+
697
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Blip2QFormer
698
+ class Blip2QFormerSelfOutput(nn.Module):
699
+ def __init__(self, config):
700
+ super().__init__()
701
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
702
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
703
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
704
+
705
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
706
+ hidden_states = self.dense(hidden_states)
707
+ hidden_states = self.dropout(hidden_states)
708
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
709
+ return hidden_states
710
+
711
+
712
+ class Blip2QFormerAttention(nn.Module):
713
+ def __init__(self, config, is_cross_attention=False):
714
+ super().__init__()
715
+ self.attention = Blip2QFormerMultiHeadAttention(config, is_cross_attention)
716
+ self.output = Blip2QFormerSelfOutput(config)
717
+ self.pruned_heads = set()
718
+
719
+ def prune_heads(self, heads):
720
+ if len(heads) == 0:
721
+ return
722
+ heads, index = find_pruneable_heads_and_indices(
723
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
724
+ )
725
+
726
+ # Prune linear layers
727
+ self.attention.query = prune_linear_layer(self.attention.query, index)
728
+ self.attention.key = prune_linear_layer(self.attention.key, index)
729
+ self.attention.value = prune_linear_layer(self.attention.value, index)
730
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
731
+
732
+ # Update hyper params and store pruned heads
733
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
734
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
735
+ self.pruned_heads = self.pruned_heads.union(heads)
736
+
737
+ def forward(
738
+ self,
739
+ hidden_states: torch.Tensor,
740
+ attention_mask: Optional[torch.FloatTensor] = None,
741
+ head_mask: Optional[torch.FloatTensor] = None,
742
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
743
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
744
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
745
+ output_attentions: Optional[bool] = False,
746
+ ) -> Tuple[torch.Tensor]:
747
+ self_outputs = self.attention(
748
+ hidden_states,
749
+ attention_mask,
750
+ head_mask,
751
+ encoder_hidden_states,
752
+ encoder_attention_mask,
753
+ past_key_value,
754
+ output_attentions,
755
+ )
756
+ attention_output = self.output(self_outputs[0], hidden_states)
757
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
758
+ return outputs
759
+
760
+
761
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Blip2QFormer
762
+ class Blip2QFormerIntermediate(nn.Module):
763
+ def __init__(self, config):
764
+ super().__init__()
765
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
766
+ if isinstance(config.hidden_act, str):
767
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
768
+ else:
769
+ self.intermediate_act_fn = config.hidden_act
770
+
771
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
772
+ hidden_states = self.dense(hidden_states)
773
+ hidden_states = self.intermediate_act_fn(hidden_states)
774
+ return hidden_states
775
+
776
+
777
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Blip2QFormer
778
+ class Blip2QFormerOutput(nn.Module):
779
+ def __init__(self, config):
780
+ super().__init__()
781
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
782
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
783
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
784
+
785
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
786
+ hidden_states = self.dense(hidden_states)
787
+ hidden_states = self.dropout(hidden_states)
788
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
789
+ return hidden_states
790
+
791
+
792
+ class Blip2QFormerLayer(nn.Module):
793
+ def __init__(self, config, layer_idx):
794
+ super().__init__()
795
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
796
+ self.seq_len_dim = 1
797
+ self.attention = Blip2QFormerAttention(config)
798
+
799
+ self.layer_idx = layer_idx
800
+
801
+ if layer_idx % config.cross_attention_frequency == 0:
802
+ self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True)
803
+ self.has_cross_attention = True
804
+ else:
805
+ self.has_cross_attention = False
806
+
807
+ self.intermediate_query = Blip2QFormerIntermediate(config)
808
+ self.output_query = Blip2QFormerOutput(config)
809
+
810
+ def forward(
811
+ self,
812
+ hidden_states,
813
+ attention_mask=None,
814
+ head_mask=None,
815
+ encoder_hidden_states=None,
816
+ encoder_attention_mask=None,
817
+ past_key_value=None,
818
+ output_attentions=False,
819
+ query_length=0,
820
+ ):
821
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
822
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
823
+ self_attention_outputs = self.attention(
824
+ hidden_states,
825
+ attention_mask,
826
+ head_mask,
827
+ output_attentions=output_attentions,
828
+ past_key_value=self_attn_past_key_value,
829
+ )
830
+ attention_output = self_attention_outputs[0]
831
+ outputs = self_attention_outputs[1:-1]
832
+
833
+ present_key_value = self_attention_outputs[-1]
834
+
835
+ if query_length > 0:
836
+ query_attention_output = attention_output[:, :query_length, :]
837
+
838
+ if self.has_cross_attention:
839
+ if encoder_hidden_states is None:
840
+ raise ValueError("encoder_hidden_states must be given for cross-attention layers")
841
+ cross_attention_outputs = self.crossattention(
842
+ query_attention_output,
843
+ attention_mask,
844
+ head_mask,
845
+ encoder_hidden_states,
846
+ encoder_attention_mask,
847
+ output_attentions=output_attentions,
848
+ )
849
+ query_attention_output = cross_attention_outputs[0]
850
+ # add cross attentions if we output attention weights
851
+ outputs = outputs + cross_attention_outputs[1:-1]
852
+
853
+ layer_output = apply_chunking_to_forward(
854
+ self.feed_forward_chunk_query,
855
+ self.chunk_size_feed_forward,
856
+ self.seq_len_dim,
857
+ query_attention_output,
858
+ )
859
+
860
+ if attention_output.shape[1] > query_length:
861
+ layer_output_text = apply_chunking_to_forward(
862
+ self.feed_forward_chunk,
863
+ self.chunk_size_feed_forward,
864
+ self.seq_len_dim,
865
+ attention_output[:, query_length:, :],
866
+ )
867
+ layer_output = torch.cat([layer_output, layer_output_text], dim=1)
868
+ else:
869
+ layer_output = apply_chunking_to_forward(
870
+ self.feed_forward_chunk,
871
+ self.chunk_size_feed_forward,
872
+ self.seq_len_dim,
873
+ attention_output,
874
+ )
875
+ outputs = (layer_output,) + outputs
876
+
877
+ outputs = outputs + (present_key_value,)
878
+
879
+ return outputs
880
+
881
+ def feed_forward_chunk(self, attention_output):
882
+ intermediate_output = self.intermediate(attention_output)
883
+ layer_output = self.output(intermediate_output, attention_output)
884
+ return layer_output
885
+
886
+ def feed_forward_chunk_query(self, attention_output):
887
+ intermediate_output = self.intermediate_query(attention_output)
888
+ layer_output = self.output_query(intermediate_output, attention_output)
889
+ return layer_output
890
+
891
+
892
+ class Blip2QFormerEncoder(nn.Module):
893
+ def __init__(self, config):
894
+ super().__init__()
895
+ self.config = config
896
+ self.layer = nn.ModuleList(
897
+ [Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
898
+ )
899
+ self.gradient_checkpointing = False
900
+
901
+ def forward(
902
+ self,
903
+ hidden_states,
904
+ attention_mask=None,
905
+ head_mask=None,
906
+ encoder_hidden_states=None,
907
+ encoder_attention_mask=None,
908
+ past_key_values=None,
909
+ use_cache=None,
910
+ output_attentions=False,
911
+ output_hidden_states=False,
912
+ return_dict=True,
913
+ query_length=0,
914
+ ):
915
+ all_hidden_states = () if output_hidden_states else None
916
+ all_self_attentions = () if output_attentions else None
917
+ all_cross_attentions = () if output_attentions else None
918
+
919
+ next_decoder_cache = () if use_cache else None
920
+
921
+ for i in range(self.config.num_hidden_layers):
922
+ layer_module = self.layer[i]
923
+ if output_hidden_states:
924
+ all_hidden_states = all_hidden_states + (hidden_states,)
925
+
926
+ layer_head_mask = head_mask[i] if head_mask is not None else None
927
+ past_key_value = past_key_values[i] if past_key_values is not None else None
928
+
929
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
930
+ if use_cache:
931
+ logger.warning(
932
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
933
+ )
934
+ use_cache = False
935
+ layer_outputs = self._gradient_checkpointing_func(
936
+ layer_module.__call__,
937
+ hidden_states,
938
+ attention_mask,
939
+ layer_head_mask,
940
+ encoder_hidden_states,
941
+ encoder_attention_mask,
942
+ )
943
+ else:
944
+ layer_outputs = layer_module(
945
+ hidden_states,
946
+ attention_mask,
947
+ layer_head_mask,
948
+ encoder_hidden_states,
949
+ encoder_attention_mask,
950
+ past_key_value,
951
+ output_attentions,
952
+ query_length,
953
+ )
954
+
955
+ hidden_states = layer_outputs[0]
956
+ if use_cache:
957
+ next_decoder_cache += (layer_outputs[-1],)
958
+ if output_attentions:
959
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
960
+ if layer_module.has_cross_attention:
961
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
962
+
963
+ if output_hidden_states:
964
+ all_hidden_states = all_hidden_states + (hidden_states,)
965
+
966
+ if not return_dict:
967
+ return tuple(
968
+ v
969
+ for v in [
970
+ hidden_states,
971
+ next_decoder_cache,
972
+ all_hidden_states,
973
+ all_self_attentions,
974
+ all_cross_attentions,
975
+ ]
976
+ if v is not None
977
+ )
978
+ return BaseModelOutputWithPastAndCrossAttentions(
979
+ last_hidden_state=hidden_states,
980
+ past_key_values=next_decoder_cache,
981
+ hidden_states=all_hidden_states,
982
+ attentions=all_self_attentions,
983
+ cross_attentions=all_cross_attentions,
984
+ )
985
+
986
+
987
+ class Blip2QFormerModel(Blip2PreTrainedModel):
988
+ """
989
+ Querying Transformer (Q-Former), used in BLIP-2.
990
+ """
991
+
992
+ def __init__(self, config: Blip2QFormerConfig):
993
+ super().__init__(config)
994
+ self.config = config
995
+
996
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
997
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
998
+
999
+ self.encoder = Blip2QFormerEncoder(config)
1000
+
1001
+ self.post_init()
1002
+
1003
+ def get_input_embeddings(self):
1004
+ return self.embeddings.word_embeddings
1005
+
1006
+ def set_input_embeddings(self, value):
1007
+ self.embeddings.word_embeddings = value
1008
+
1009
+ def _prune_heads(self, heads_to_prune):
1010
+ """
1011
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1012
+ class PreTrainedModel
1013
+ """
1014
+ for layer, heads in heads_to_prune.items():
1015
+ self.encoder.layer[layer].attention.prune_heads(heads)
1016
+
1017
+ def get_extended_attention_mask(
1018
+ self,
1019
+ attention_mask: torch.Tensor,
1020
+ input_shape: Tuple[int],
1021
+ device: torch.device,
1022
+ has_query: bool = False,
1023
+ ) -> torch.Tensor:
1024
+ """
1025
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
1026
+
1027
+ Arguments:
1028
+ attention_mask (`torch.Tensor`):
1029
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
1030
+ input_shape (`Tuple[int]`):
1031
+ The shape of the input to the model.
1032
+ device (`torch.device`):
1033
+ The device of the input to the model.
1034
+
1035
+ Returns:
1036
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
1037
+ """
1038
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1039
+ # ourselves in which case we just need to make it broadcastable to all heads.
1040
+ if attention_mask.dim() == 3:
1041
+ extended_attention_mask = attention_mask[:, None, :, :]
1042
+ elif attention_mask.dim() == 2:
1043
+ # Provided a padding mask of dimensions [batch_size, seq_length]
1044
+ # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
1045
+ extended_attention_mask = attention_mask[:, None, None, :]
1046
+ else:
1047
+ raise ValueError(
1048
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
1049
+ input_shape, attention_mask.shape
1050
+ )
1051
+ )
1052
+
1053
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
1054
+ # masked positions, this operation will create a tensor which is 0.0 for
1055
+ # positions we want to attend and -10000.0 for masked positions.
1056
+ # Since we are adding it to the raw scores before the softmax, this is
1057
+ # effectively the same as removing these entirely.
1058
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
1059
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
1060
+ return extended_attention_mask
1061
+
1062
+ def forward(
1063
+ self,
1064
+ query_embeds: torch.FloatTensor,
1065
+ attention_mask: Optional[torch.FloatTensor] = None,
1066
+ head_mask: Optional[torch.FloatTensor] = None,
1067
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1068
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1069
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1070
+ use_cache: Optional[bool] = None,
1071
+ output_attentions: Optional[bool] = None,
1072
+ output_hidden_states: Optional[bool] = None,
1073
+ return_dict: Optional[bool] = None,
1074
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
1075
+ r"""
1076
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`):
1077
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1078
+ the model is configured as a decoder.
1079
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):
1080
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1081
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1082
+ - 1 for tokens that are **not masked**,
1083
+ - 0 for tokens that are **masked**.
1084
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
1085
+ shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
1086
+ value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
1087
+ used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
1088
+ value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
1089
+ `(batch_size, sequence_length)`.
1090
+ use_cache (`bool`, `optional`):
1091
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1092
+ `past_key_values`).
1093
+ """
1094
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1095
+ output_hidden_states = (
1096
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1097
+ )
1098
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1099
+
1100
+ # past_key_values_length
1101
+ past_key_values_length = (
1102
+ past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
1103
+ )
1104
+
1105
+ query_length = query_embeds.shape[1] if query_embeds is not None else 0
1106
+
1107
+ embedding_output = self.layernorm(query_embeds)
1108
+ embedding_output = self.dropout(embedding_output)
1109
+
1110
+ input_shape = embedding_output.size()[:-1]
1111
+ batch_size, seq_length = input_shape
1112
+ device = embedding_output.device
1113
+
1114
+ if attention_mask is None:
1115
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
1116
+
1117
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1118
+ # ourselves in which case we just need to make it broadcastable to all heads.
1119
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
1120
+
1121
+ # If a 2D or 3D attention mask is provided for the cross-attention
1122
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1123
+ if encoder_hidden_states is not None:
1124
+ if isinstance(encoder_hidden_states, list):
1125
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
1126
+ else:
1127
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1128
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1129
+
1130
+ if isinstance(encoder_attention_mask, list):
1131
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
1132
+ elif encoder_attention_mask is None:
1133
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1134
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1135
+ else:
1136
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1137
+ else:
1138
+ encoder_extended_attention_mask = None
1139
+
1140
+ # Prepare head mask if needed
1141
+ # 1.0 in head_mask indicate we keep the head
1142
+ # attention_probs has shape bsz x n_heads x N x N
1143
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1144
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1145
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1146
+
1147
+ encoder_outputs = self.encoder(
1148
+ embedding_output,
1149
+ attention_mask=extended_attention_mask,
1150
+ head_mask=head_mask,
1151
+ encoder_hidden_states=encoder_hidden_states,
1152
+ encoder_attention_mask=encoder_extended_attention_mask,
1153
+ past_key_values=past_key_values,
1154
+ use_cache=use_cache,
1155
+ output_attentions=output_attentions,
1156
+ output_hidden_states=output_hidden_states,
1157
+ return_dict=return_dict,
1158
+ query_length=query_length,
1159
+ )
1160
+ sequence_output = encoder_outputs[0]
1161
+ pooled_output = sequence_output[:, 0, :]
1162
+
1163
+ if not return_dict:
1164
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1165
+
1166
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1167
+ last_hidden_state=sequence_output,
1168
+ pooler_output=pooled_output,
1169
+ past_key_values=encoder_outputs.past_key_values,
1170
+ hidden_states=encoder_outputs.hidden_states,
1171
+ attentions=encoder_outputs.attentions,
1172
+ cross_attentions=encoder_outputs.cross_attentions,
1173
+ )
1174
+
1175
+
1176
+ @add_start_docstrings(
1177
+ """
1178
+ BLIP-2 Model for generating text and image features. The model consists of a vision encoder, Querying Transformer
1179
+ (Q-Former) and a language model.
1180
+ """,
1181
+ BLIP_2_START_DOCSTRING,
1182
+ )
1183
+ class Blip2Model(Blip2PreTrainedModel):
1184
+ config_class = Blip2Config
1185
+ main_input_name = "pixel_values"
1186
+
1187
+ def __init__(self, config: Blip2Config):
1188
+ super().__init__(config)
1189
+
1190
+ self.vision_model = Blip2VisionModel(config.vision_config)
1191
+
1192
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
1193
+ self.qformer = Blip2QFormerModel(config.qformer_config)
1194
+
1195
+ self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
1196
+ if config.use_decoder_only_language_model:
1197
+ language_model = AutoModelForCausalLM.from_config(config.text_config)
1198
+ else:
1199
+ language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
1200
+
1201
+ # Update _tied_weights_keys using the base model used.
1202
+ if language_model._tied_weights_keys is not None:
1203
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
1204
+
1205
+ self.language_model = language_model
1206
+
1207
+ # Initialize weights and apply final processing
1208
+ self.post_init()
1209
+
1210
+ def get_input_embeddings(self):
1211
+ return self.language_model.get_input_embeddings()
1212
+
1213
+ def set_input_embeddings(self, value):
1214
+ self.language_model.set_input_embeddings(value)
1215
+
1216
+ def set_output_embeddings(self, new_embeddings):
1217
+ self.language_model.set_output_embeddings(new_embeddings)
1218
+
1219
+ def get_output_embeddings(self) -> nn.Module:
1220
+ return self.language_model.get_output_embeddings()
1221
+
1222
+ def get_encoder(self):
1223
+ return self.language_model.get_encoder()
1224
+
1225
+ def get_decoder(self):
1226
+ return self.language_model.get_decoder()
1227
+
1228
+ def _tie_weights(self):
1229
+ if not self.config.use_decoder_only_language_model:
1230
+ self.language_model.encoder.embed_tokens = self.language_model.shared
1231
+ self.language_model.decoder.embed_tokens = self.language_model.shared
1232
+
1233
+ @add_start_docstrings_to_model_forward(BLIP_2_TEXT_INPUTS_DOCSTRING)
1234
+ def get_text_features(
1235
+ self,
1236
+ input_ids: Optional[torch.Tensor] = None,
1237
+ attention_mask: Optional[torch.Tensor] = None,
1238
+ decoder_input_ids: Optional[torch.Tensor] = None,
1239
+ decoder_attention_mask: Optional[torch.Tensor] = None,
1240
+ labels: Optional[torch.Tensor] = None,
1241
+ output_attentions: Optional[bool] = None,
1242
+ output_hidden_states: Optional[bool] = None,
1243
+ return_dict: Optional[bool] = None,
1244
+ ):
1245
+ r"""
1246
+ Returns:
1247
+ text_outputs (`CausalLMOutputWithPast`, or `tuple(torch.FloatTensor)` if `return_dict=False`):
1248
+ The language model outputs. If `return_dict=True`, the output is a [`CausalLMOutputWithPast`] that
1249
+ contains the language model logits, the past key values and the hidden states if
1250
+ `output_hidden_states=True`.
1251
+ Examples:
1252
+ ```python
1253
+ >>> import torch
1254
+ >>> from transformers import AutoTokenizer, Blip2Model
1255
+
1256
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
1257
+
1258
+ >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/blip2-opt-2.7b")
1259
+ >>> inputs = tokenizer(["a photo of a cat"], padding=True, return_tensors="pt")
1260
+ >>> text_features = model.get_text_features(**inputs)
1261
+ ```"""
1262
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1263
+ output_hidden_states = (
1264
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1265
+ )
1266
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1267
+
1268
+ if self.config.use_decoder_only_language_model:
1269
+ text_outputs = self.language_model(
1270
+ input_ids=input_ids,
1271
+ attention_mask=attention_mask,
1272
+ output_attentions=output_attentions,
1273
+ output_hidden_states=output_hidden_states,
1274
+ return_dict=return_dict,
1275
+ )
1276
+ else:
1277
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
1278
+
1279
+ text_outputs = self.language_model(
1280
+ inputs_embeds=inputs_embeds,
1281
+ attention_mask=attention_mask,
1282
+ decoder_input_ids=decoder_input_ids,
1283
+ decoder_attention_mask=decoder_attention_mask,
1284
+ output_attentions=output_attentions,
1285
+ output_hidden_states=output_hidden_states,
1286
+ return_dict=return_dict,
1287
+ labels=labels,
1288
+ )
1289
+
1290
+ return text_outputs
1291
+
1292
+ @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING)
1293
+ def get_image_features(
1294
+ self,
1295
+ pixel_values: Optional[torch.FloatTensor] = None,
1296
+ output_attentions: Optional[bool] = None,
1297
+ output_hidden_states: Optional[bool] = None,
1298
+ return_dict: Optional[bool] = None,
1299
+ ):
1300
+ r"""
1301
+ Returns:
1302
+ vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`):
1303
+ The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that
1304
+ contains the image features, the pooled image features and the hidden states if
1305
+ `output_hidden_states=True`.
1306
+ Examples:
1307
+ ```python
1308
+ >>> import torch
1309
+ >>> from PIL import Image
1310
+ >>> import requests
1311
+ >>> from transformers import AutoProcessor, Blip2Model
1312
+
1313
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
1314
+
1315
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
1316
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1317
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1318
+ >>> inputs = processor(images=image, return_tensors="pt")
1319
+ >>> image_outputs = model.get_image_features(**inputs)
1320
+ ```"""
1321
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1322
+ output_hidden_states = (
1323
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1324
+ )
1325
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1326
+
1327
+ vision_outputs = self.vision_model(
1328
+ pixel_values=pixel_values,
1329
+ output_attentions=output_attentions,
1330
+ output_hidden_states=output_hidden_states,
1331
+ return_dict=return_dict,
1332
+ )
1333
+
1334
+ return vision_outputs
1335
+
1336
+ @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING)
1337
+ def get_qformer_features(
1338
+ self,
1339
+ pixel_values: Optional[torch.FloatTensor] = None,
1340
+ output_attentions: Optional[bool] = None,
1341
+ output_hidden_states: Optional[bool] = None,
1342
+ return_dict: Optional[bool] = None,
1343
+ ):
1344
+ r"""
1345
+ Returns:
1346
+ vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`):
1347
+ The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that
1348
+ contains the image features, the pooled image features and the hidden states if
1349
+ `output_hidden_states=True`.
1350
+ Examples:
1351
+ ```python
1352
+ >>> import torch
1353
+ >>> from PIL import Image
1354
+ >>> import requests
1355
+ >>> from transformers import Blip2Processor, Blip2Model
1356
+
1357
+ >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
1358
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
1359
+
1360
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1361
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1362
+ >>> inputs = processor(images=image, return_tensors="pt")
1363
+ >>> qformer_outputs = model.get_qformer_features(**inputs)
1364
+ ```"""
1365
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1366
+ output_hidden_states = (
1367
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1368
+ )
1369
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1370
+
1371
+ vision_outputs = self.vision_model(
1372
+ pixel_values=pixel_values,
1373
+ output_attentions=output_attentions,
1374
+ output_hidden_states=output_hidden_states,
1375
+ return_dict=return_dict,
1376
+ )
1377
+
1378
+ image_embeds = vision_outputs[0]
1379
+
1380
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
1381
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1382
+
1383
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1384
+ query_outputs = self.qformer(
1385
+ query_embeds=query_tokens,
1386
+ encoder_hidden_states=image_embeds,
1387
+ encoder_attention_mask=image_attention_mask,
1388
+ output_attentions=output_attentions,
1389
+ output_hidden_states=output_hidden_states,
1390
+ return_dict=return_dict,
1391
+ )
1392
+
1393
+ return query_outputs
1394
+
1395
+ @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING)
1396
+ @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig)
1397
+ def forward(
1398
+ self,
1399
+ pixel_values: torch.FloatTensor,
1400
+ input_ids: torch.FloatTensor,
1401
+ attention_mask: Optional[torch.LongTensor] = None,
1402
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1403
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1404
+ output_attentions: Optional[bool] = None,
1405
+ output_hidden_states: Optional[bool] = None,
1406
+ labels: Optional[torch.LongTensor] = None,
1407
+ return_dict: Optional[bool] = None,
1408
+ ) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]:
1409
+ r"""
1410
+ Returns:
1411
+
1412
+ Examples:
1413
+
1414
+ ```python
1415
+ >>> from PIL import Image
1416
+ >>> import requests
1417
+ >>> from transformers import Blip2Processor, Blip2Model
1418
+ >>> import torch
1419
+
1420
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
1421
+
1422
+ >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
1423
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
1424
+ >>> model.to(device) # doctest: +IGNORE_RESULT
1425
+
1426
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1427
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1428
+
1429
+ >>> prompt = "Question: how many cats are there? Answer:"
1430
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device, torch.float16)
1431
+
1432
+ >>> outputs = model(**inputs)
1433
+ ```"""
1434
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1435
+
1436
+ # step 1: forward the images through the vision encoder,
1437
+ # to get image embeddings of shape (batch_size, seq_len, hidden_size)
1438
+ vision_outputs = self.vision_model(
1439
+ pixel_values=pixel_values,
1440
+ output_attentions=output_attentions,
1441
+ output_hidden_states=output_hidden_states,
1442
+ return_dict=return_dict,
1443
+ )
1444
+ image_embeds = vision_outputs[0]
1445
+
1446
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
1447
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1448
+
1449
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1450
+ query_outputs = self.qformer(
1451
+ query_embeds=query_tokens,
1452
+ encoder_hidden_states=image_embeds,
1453
+ encoder_attention_mask=image_attention_mask,
1454
+ output_attentions=output_attentions,
1455
+ output_hidden_states=output_hidden_states,
1456
+ return_dict=return_dict,
1457
+ )
1458
+ query_output = query_outputs[0]
1459
+
1460
+ # step 3: use the language model, conditioned on the query outputs and the prompt
1461
+ language_model_inputs = self.language_projection(query_output)
1462
+ language_model_attention_mask = torch.ones(
1463
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
1464
+ )
1465
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
1466
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds], dim=1)
1467
+
1468
+ if attention_mask is None:
1469
+ attention_mask = torch.ones_like(input_ids)
1470
+ expected_device = language_model_attention_mask.device
1471
+ attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1)
1472
+
1473
+ if self.config.use_decoder_only_language_model:
1474
+ outputs = self.language_model(
1475
+ inputs_embeds=inputs_embeds,
1476
+ attention_mask=attention_mask,
1477
+ output_attentions=output_attentions,
1478
+ output_hidden_states=output_hidden_states,
1479
+ return_dict=return_dict,
1480
+ )
1481
+ logits = outputs.logits if return_dict else outputs[0]
1482
+ loss = None
1483
+ # we compute the loss here since we need to take into account the sequence length of the query embeds
1484
+ if labels is not None:
1485
+ labels = labels.to(logits.device)
1486
+ logits = logits[:, -labels.size(1) :, :]
1487
+ # Shift so that tokens < n predict n
1488
+ shift_logits = logits[..., :-1, :].contiguous()
1489
+ shift_labels = labels[..., 1:].contiguous().to(logits.device)
1490
+
1491
+ # Flatten the tokens
1492
+ loss_fct = CrossEntropyLoss(reduction="mean")
1493
+
1494
+ loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
1495
+ else:
1496
+ outputs = self.language_model(
1497
+ inputs_embeds=inputs_embeds,
1498
+ attention_mask=attention_mask,
1499
+ decoder_input_ids=decoder_input_ids,
1500
+ decoder_attention_mask=decoder_attention_mask,
1501
+ output_attentions=output_attentions,
1502
+ output_hidden_states=output_hidden_states,
1503
+ return_dict=return_dict,
1504
+ labels=labels,
1505
+ )
1506
+ loss = outputs.loss if return_dict else outputs[0]
1507
+ logits = outputs.logits if return_dict else outputs[1]
1508
+
1509
+ if not return_dict:
1510
+ output = (logits, vision_outputs, query_outputs, outputs)
1511
+ return ((loss,) + output) if loss is not None else output
1512
+
1513
+ return Blip2ForConditionalGenerationModelOutput(
1514
+ loss=loss,
1515
+ logits=logits,
1516
+ vision_outputs=vision_outputs,
1517
+ qformer_outputs=query_outputs,
1518
+ language_model_outputs=outputs,
1519
+ )
1520
+
1521
+
1522
+ @add_start_docstrings(
1523
+ """
1524
+ BLIP-2 Model for generating text given an image and an optional text prompt. The model consists of a vision
1525
+ encoder, Querying Transformer (Q-Former) and a language model.
1526
+
1527
+ One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
1528
+ the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
1529
+
1530
+ <Tip>
1531
+
1532
+ Note that Flan-T5 checkpoints cannot be cast to float16. They are pre-trained using bfloat16.
1533
+
1534
+ </Tip>
1535
+ """,
1536
+ BLIP_2_START_DOCSTRING,
1537
+ )
1538
+ class Blip2ForConditionalGeneration(Blip2PreTrainedModel):
1539
+ config_class = Blip2Config
1540
+ main_input_name = "pixel_values"
1541
+
1542
+ def __init__(self, config: Blip2Config):
1543
+ super().__init__(config)
1544
+
1545
+ self.vision_model = Blip2VisionModel(config.vision_config)
1546
+
1547
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
1548
+ self.qformer = Blip2QFormerModel(config.qformer_config)
1549
+
1550
+ self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
1551
+ if config.use_decoder_only_language_model:
1552
+ language_model = AutoModelForCausalLM.from_config(config.text_config)
1553
+ else:
1554
+ language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
1555
+
1556
+ # Update _tied_weights_keys using the base model used.
1557
+ if language_model._tied_weights_keys is not None:
1558
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
1559
+
1560
+ self.language_model = language_model
1561
+
1562
+ # Initialize weights and apply final processing
1563
+ self.post_init()
1564
+
1565
+ def get_input_embeddings(self):
1566
+ return self.language_model.get_input_embeddings()
1567
+
1568
+ def set_input_embeddings(self, value):
1569
+ self.language_model.set_input_embeddings(value)
1570
+
1571
+ def set_output_embeddings(self, new_embeddings):
1572
+ self.language_model.set_output_embeddings(new_embeddings)
1573
+
1574
+ def get_output_embeddings(self) -> nn.Module:
1575
+ return self.language_model.get_output_embeddings()
1576
+
1577
+ def get_encoder(self):
1578
+ return self.language_model.get_encoder()
1579
+
1580
+ def get_decoder(self):
1581
+ return self.language_model.get_decoder()
1582
+
1583
+ def _tie_weights(self):
1584
+ if not self.config.use_decoder_only_language_model:
1585
+ self.language_model.encoder.embed_tokens = self.language_model.shared
1586
+ self.language_model.decoder.embed_tokens = self.language_model.shared
1587
+
1588
+ def _preprocess_accelerate(self):
1589
+ r"""
1590
+ Some pre-processing hacks to make the model `accelerate` compatible. Check
1591
+ https://github.com/huggingface/transformers/pull/21707 for more details.
1592
+ """
1593
+ hf_device_map = self.hf_device_map
1594
+
1595
+ if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
1596
+ # warn users about unexpected behavior when using multi-GPU + BLIP-2 + `accelerate`.
1597
+ logger.warning(
1598
+ "The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
1599
+ " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
1600
+ " Please pass a `device_map` that contains `language_model` to remove this warning."
1601
+ " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for"
1602
+ " more details on creating a `device_map` for large models.",
1603
+ )
1604
+
1605
+ if hasattr(self.language_model, "_hf_hook"):
1606
+ self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
1607
+
1608
+ @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING)
1609
+ @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig)
1610
+ def forward(
1611
+ self,
1612
+ pixel_values: torch.FloatTensor,
1613
+ input_ids: torch.FloatTensor,
1614
+ attention_mask: Optional[torch.LongTensor] = None,
1615
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1616
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1617
+ output_attentions: Optional[bool] = None,
1618
+ output_hidden_states: Optional[bool] = None,
1619
+ labels: Optional[torch.LongTensor] = None,
1620
+ return_dict: Optional[bool] = None,
1621
+ ) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]:
1622
+ r"""
1623
+ Returns:
1624
+
1625
+ Examples:
1626
+
1627
+ Prepare processor, model and image input
1628
+
1629
+ ```python
1630
+ >>> from PIL import Image
1631
+ >>> import requests
1632
+ >>> from transformers import Blip2Processor, Blip2ForConditionalGeneration
1633
+ >>> import torch
1634
+
1635
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
1636
+
1637
+ >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
1638
+ >>> model = Blip2ForConditionalGeneration.from_pretrained(
1639
+ ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16
1640
+ ... ) # doctest: +IGNORE_RESULT
1641
+
1642
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1643
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1644
+ ```
1645
+
1646
+ Image captioning (without providing a text prompt):
1647
+
1648
+ ```python
1649
+ >>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
1650
+
1651
+ >>> generated_ids = model.generate(**inputs)
1652
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
1653
+ >>> print(generated_text)
1654
+ two cats laying on a couch
1655
+ ```
1656
+
1657
+ Visual question answering (prompt = question):
1658
+
1659
+ ```python
1660
+ >>> prompt = "Question: how many cats are there? Answer:"
1661
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.float16)
1662
+
1663
+ >>> generated_ids = model.generate(**inputs)
1664
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
1665
+ >>> print(generated_text)
1666
+ two
1667
+ ```
1668
+
1669
+ Note that int8 inference is also supported through [bitsandbytes](https://github.com/TimDettmers/bitsandbytes).
1670
+ This greatly reduces the amount of memory used by the model while maintaining the same performance.
1671
+
1672
+ ```python
1673
+ >>> model = Blip2ForConditionalGeneration.from_pretrained(
1674
+ ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.bfloat16
1675
+ ... ) # doctest: +IGNORE_RESULT
1676
+
1677
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16)
1678
+
1679
+ >>> generated_ids = model.generate(**inputs)
1680
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
1681
+ >>> print(generated_text)
1682
+ two
1683
+ ```"""
1684
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1685
+
1686
+ # step 1: forward the images through the vision encoder,
1687
+ # to get image embeddings of shape (batch_size, seq_len, hidden_size)
1688
+ vision_outputs = self.vision_model(
1689
+ pixel_values=pixel_values,
1690
+ output_attentions=output_attentions,
1691
+ output_hidden_states=output_hidden_states,
1692
+ return_dict=return_dict,
1693
+ )
1694
+ image_embeds = vision_outputs[0]
1695
+
1696
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
1697
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1698
+
1699
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1700
+ query_outputs = self.qformer(
1701
+ query_embeds=query_tokens,
1702
+ encoder_hidden_states=image_embeds,
1703
+ encoder_attention_mask=image_attention_mask,
1704
+ output_attentions=output_attentions,
1705
+ output_hidden_states=output_hidden_states,
1706
+ return_dict=return_dict,
1707
+ )
1708
+ query_output = query_outputs[0]
1709
+
1710
+ # step 3: use the language model, conditioned on the query outputs and the prompt
1711
+ language_model_inputs = self.language_projection(query_output)
1712
+ language_model_attention_mask = torch.ones(
1713
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
1714
+ )
1715
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
1716
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
1717
+
1718
+ if attention_mask is None:
1719
+ attention_mask = torch.ones_like(input_ids)
1720
+ expected_device = language_model_attention_mask.device
1721
+ attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1)
1722
+
1723
+ if self.config.use_decoder_only_language_model:
1724
+ outputs = self.language_model(
1725
+ inputs_embeds=inputs_embeds,
1726
+ attention_mask=attention_mask,
1727
+ output_attentions=output_attentions,
1728
+ output_hidden_states=output_hidden_states,
1729
+ return_dict=return_dict,
1730
+ )
1731
+ logits = outputs.logits if return_dict else outputs[0]
1732
+ loss = None
1733
+ # we compute the loss here since we need to take into account the sequence length of the query embeds
1734
+ if labels is not None:
1735
+ labels = labels.to(logits.device)
1736
+ logits = logits[:, -labels.size(1) :, :]
1737
+ # Shift so that tokens < n predict n
1738
+ shift_logits = logits[..., :-1, :].contiguous()
1739
+ shift_labels = labels[..., 1:].contiguous().to(logits.device)
1740
+
1741
+ # Flatten the tokens
1742
+ loss_fct = CrossEntropyLoss(reduction="mean")
1743
+
1744
+ loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
1745
+ else:
1746
+ outputs = self.language_model(
1747
+ inputs_embeds=inputs_embeds,
1748
+ attention_mask=attention_mask,
1749
+ decoder_input_ids=decoder_input_ids,
1750
+ decoder_attention_mask=decoder_attention_mask,
1751
+ output_attentions=output_attentions,
1752
+ output_hidden_states=output_hidden_states,
1753
+ return_dict=return_dict,
1754
+ labels=labels,
1755
+ )
1756
+ loss = outputs.loss if return_dict else outputs[0]
1757
+ logits = outputs.logits if return_dict else outputs[1]
1758
+
1759
+ if not return_dict:
1760
+ output = (logits, vision_outputs, query_outputs, outputs)
1761
+ return ((loss,) + output) if loss is not None else output
1762
+
1763
+ return Blip2ForConditionalGenerationModelOutput(
1764
+ loss=loss,
1765
+ logits=logits,
1766
+ vision_outputs=vision_outputs,
1767
+ qformer_outputs=query_outputs,
1768
+ language_model_outputs=outputs,
1769
+ )
1770
+
1771
+ @torch.no_grad()
1772
+ def generate(
1773
+ self,
1774
+ pixel_values: torch.FloatTensor,
1775
+ input_ids: Optional[torch.LongTensor] = None,
1776
+ attention_mask: Optional[torch.LongTensor] = None,
1777
+ **generate_kwargs,
1778
+ ) -> torch.LongTensor:
1779
+ """
1780
+ Overrides `generate` function to be able to use the model as a conditional generator.
1781
+
1782
+ Args:
1783
+ pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
1784
+ Input images to be processed.
1785
+ input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
1786
+ The sequence used as a prompt for the generation.
1787
+ attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
1788
+ Mask to avoid performing attention on padding token indices
1789
+
1790
+ Returns:
1791
+ captions (list): A list of strings of length batch_size * num_captions.
1792
+ """
1793
+ if hasattr(self, "hf_device_map"):
1794
+ # preprocess for `accelerate`
1795
+ self._preprocess_accelerate()
1796
+
1797
+ batch_size = pixel_values.shape[0]
1798
+ image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state
1799
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1800
+
1801
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1802
+ query_outputs = self.qformer(
1803
+ query_embeds=query_tokens,
1804
+ encoder_hidden_states=image_embeds,
1805
+ encoder_attention_mask=image_attention_mask,
1806
+ return_dict=True,
1807
+ )
1808
+ query_output = query_outputs.last_hidden_state
1809
+
1810
+ language_model_inputs = self.language_projection(query_output)
1811
+ language_attention_mask = torch.ones(
1812
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
1813
+ )
1814
+ if input_ids is None:
1815
+ input_ids = (
1816
+ torch.LongTensor([[self.config.text_config.bos_token_id]])
1817
+ .repeat(batch_size, 1)
1818
+ .to(image_embeds.device)
1819
+ )
1820
+ if attention_mask is None:
1821
+ attention_mask = torch.ones_like(input_ids)
1822
+ attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1)
1823
+
1824
+ # concatenate query embeddings with prompt embeddings
1825
+ inputs_embeds = self.get_input_embeddings()(input_ids)
1826
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
1827
+
1828
+ # add image_embeds length to max_length, so that the final max_length in counted only on token embeds
1829
+ # -1 is to account for the prepended BOS after `generate.`
1830
+ # TODO (joao, raushan): refactor `generate` to avoid these operations with VLMs
1831
+ if not self.language_model.config.is_encoder_decoder:
1832
+ generate_kwargs["max_length"] = generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] - 1
1833
+ generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1]
1834
+
1835
+ outputs = self.language_model.generate(
1836
+ inputs_embeds=inputs_embeds,
1837
+ attention_mask=attention_mask,
1838
+ **generate_kwargs,
1839
+ )
1840
+
1841
+ # this is a temporary workaround to be consistent with other generation models and
1842
+ # have BOS as the first token, even though under the hood we are calling LM with embeds
1843
+ if not self.language_model.config.is_encoder_decoder:
1844
+ bos_tokens = (
1845
+ torch.LongTensor([[self.config.text_config.bos_token_id]])
1846
+ .repeat(batch_size, 1)
1847
+ .to(image_embeds.device)
1848
+ )
1849
+ if not isinstance(outputs, torch.Tensor):
1850
+ outputs.sequences = torch.cat([bos_tokens, outputs.sequences], dim=-1)
1851
+ else:
1852
+ outputs = torch.cat([bos_tokens, outputs], dim=-1)
1853
+ return outputs
venv/lib/python3.10/site-packages/transformers/models/blip_2/processing_blip_2.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for BLIP-2.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...image_utils import ImageInput
22
+ from ...processing_utils import ProcessorMixin
23
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
24
+ from ...utils import TensorType
25
+
26
+
27
+ class Blip2Processor(ProcessorMixin):
28
+ r"""
29
+ Constructs a BLIP-2 processor which wraps a BLIP image processor and an OPT/T5 tokenizer into a single processor.
30
+
31
+ [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the docstring
32
+ of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor (`BlipImageProcessor`):
36
+ An instance of [`BlipImageProcessor`]. The image processor is a required input.
37
+ tokenizer (`AutoTokenizer`):
38
+ An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "BlipImageProcessor"
43
+ tokenizer_class = "AutoTokenizer"
44
+
45
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.__init__
46
+ def __init__(self, image_processor, tokenizer):
47
+ tokenizer.return_token_type_ids = False
48
+ super().__init__(image_processor, tokenizer)
49
+ self.current_processor = self.image_processor
50
+
51
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.__call__
52
+ def __call__(
53
+ self,
54
+ images: ImageInput = None,
55
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
56
+ add_special_tokens: bool = True,
57
+ padding: Union[bool, str, PaddingStrategy] = False,
58
+ truncation: Union[bool, str, TruncationStrategy] = None,
59
+ max_length: Optional[int] = None,
60
+ stride: int = 0,
61
+ pad_to_multiple_of: Optional[int] = None,
62
+ return_attention_mask: Optional[bool] = None,
63
+ return_overflowing_tokens: bool = False,
64
+ return_special_tokens_mask: bool = False,
65
+ return_offsets_mapping: bool = False,
66
+ return_token_type_ids: bool = False,
67
+ return_length: bool = False,
68
+ verbose: bool = True,
69
+ return_tensors: Optional[Union[str, TensorType]] = None,
70
+ **kwargs,
71
+ ) -> BatchEncoding:
72
+ """
73
+ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
74
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
75
+
76
+ Please refer to the docstring of the above two methods for more information.
77
+ """
78
+ if images is None and text is None:
79
+ raise ValueError("You have to specify either images or text.")
80
+
81
+ # Get only text
82
+ if images is None:
83
+ self.current_processor = self.tokenizer
84
+ text_encoding = self.tokenizer(
85
+ text=text,
86
+ add_special_tokens=add_special_tokens,
87
+ padding=padding,
88
+ truncation=truncation,
89
+ max_length=max_length,
90
+ stride=stride,
91
+ pad_to_multiple_of=pad_to_multiple_of,
92
+ return_attention_mask=return_attention_mask,
93
+ return_overflowing_tokens=return_overflowing_tokens,
94
+ return_special_tokens_mask=return_special_tokens_mask,
95
+ return_offsets_mapping=return_offsets_mapping,
96
+ return_token_type_ids=return_token_type_ids,
97
+ return_length=return_length,
98
+ verbose=verbose,
99
+ return_tensors=return_tensors,
100
+ **kwargs,
101
+ )
102
+ return text_encoding
103
+
104
+ # add pixel_values
105
+ encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
106
+
107
+ if text is not None:
108
+ text_encoding = self.tokenizer(
109
+ text=text,
110
+ add_special_tokens=add_special_tokens,
111
+ padding=padding,
112
+ truncation=truncation,
113
+ max_length=max_length,
114
+ stride=stride,
115
+ pad_to_multiple_of=pad_to_multiple_of,
116
+ return_attention_mask=return_attention_mask,
117
+ return_overflowing_tokens=return_overflowing_tokens,
118
+ return_special_tokens_mask=return_special_tokens_mask,
119
+ return_offsets_mapping=return_offsets_mapping,
120
+ return_token_type_ids=return_token_type_ids,
121
+ return_length=return_length,
122
+ verbose=verbose,
123
+ return_tensors=return_tensors,
124
+ **kwargs,
125
+ )
126
+ else:
127
+ text_encoding = None
128
+
129
+ if text_encoding is not None:
130
+ encoding_image_processor.update(text_encoding)
131
+
132
+ return encoding_image_processor
133
+
134
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer
135
+ def batch_decode(self, *args, **kwargs):
136
+ """
137
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
138
+ refer to the docstring of this method for more information.
139
+ """
140
+ return self.tokenizer.batch_decode(*args, **kwargs)
141
+
142
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer
143
+ def decode(self, *args, **kwargs):
144
+ """
145
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
146
+ the docstring of this method for more information.
147
+ """
148
+ return self.tokenizer.decode(*args, **kwargs)
149
+
150
+ @property
151
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
152
+ def model_input_names(self):
153
+ tokenizer_input_names = self.tokenizer.model_input_names
154
+ image_processor_input_names = self.image_processor.model_input_names
155
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
venv/lib/python3.10/site-packages/transformers/models/electra/modeling_electra.py ADDED
@@ -0,0 +1,1679 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch ELECTRA model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN, get_activation
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithCrossAttentions,
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ CausalLMOutputWithCrossAttentions,
32
+ MaskedLMOutput,
33
+ MultipleChoiceModelOutput,
34
+ QuestionAnsweringModelOutput,
35
+ SequenceClassifierOutput,
36
+ TokenClassifierOutput,
37
+ )
38
+ from ...modeling_utils import PreTrainedModel, SequenceSummary
39
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
40
+ from ...utils import (
41
+ ModelOutput,
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_electra import ElectraConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "google/electra-small-discriminator"
54
+ _CONFIG_FOR_DOC = "ElectraConfig"
55
+
56
+
57
+ from ..deprecated._archive_maps import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"):
61
+ """Load tf checkpoints in a pytorch model."""
62
+ try:
63
+ import re
64
+
65
+ import numpy as np
66
+ import tensorflow as tf
67
+ except ImportError:
68
+ logger.error(
69
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
70
+ "https://www.tensorflow.org/install/ for installation instructions."
71
+ )
72
+ raise
73
+ tf_path = os.path.abspath(tf_checkpoint_path)
74
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
75
+ # Load weights from TF model
76
+ init_vars = tf.train.list_variables(tf_path)
77
+ names = []
78
+ arrays = []
79
+ for name, shape in init_vars:
80
+ logger.info(f"Loading TF weight {name} with shape {shape}")
81
+ array = tf.train.load_variable(tf_path, name)
82
+ names.append(name)
83
+ arrays.append(array)
84
+ for name, array in zip(names, arrays):
85
+ original_name: str = name
86
+
87
+ try:
88
+ if isinstance(model, ElectraForMaskedLM):
89
+ name = name.replace("electra/embeddings/", "generator/embeddings/")
90
+
91
+ if discriminator_or_generator == "generator":
92
+ name = name.replace("electra/", "discriminator/")
93
+ name = name.replace("generator/", "electra/")
94
+
95
+ name = name.replace("dense_1", "dense_prediction")
96
+ name = name.replace("generator_predictions/output_bias", "generator_lm_head/bias")
97
+
98
+ name = name.split("/")
99
+ # print(original_name, name)
100
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
101
+ # which are not required for using pretrained model
102
+ if any(n in ["global_step", "temperature"] for n in name):
103
+ logger.info(f"Skipping {original_name}")
104
+ continue
105
+ pointer = model
106
+ for m_name in name:
107
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
108
+ scope_names = re.split(r"_(\d+)", m_name)
109
+ else:
110
+ scope_names = [m_name]
111
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
112
+ pointer = getattr(pointer, "weight")
113
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
114
+ pointer = getattr(pointer, "bias")
115
+ elif scope_names[0] == "output_weights":
116
+ pointer = getattr(pointer, "weight")
117
+ elif scope_names[0] == "squad":
118
+ pointer = getattr(pointer, "classifier")
119
+ else:
120
+ pointer = getattr(pointer, scope_names[0])
121
+ if len(scope_names) >= 2:
122
+ num = int(scope_names[1])
123
+ pointer = pointer[num]
124
+ if m_name.endswith("_embeddings"):
125
+ pointer = getattr(pointer, "weight")
126
+ elif m_name == "kernel":
127
+ array = np.transpose(array)
128
+ try:
129
+ if pointer.shape != array.shape:
130
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
131
+ except ValueError as e:
132
+ e.args += (pointer.shape, array.shape)
133
+ raise
134
+ print(f"Initialize PyTorch weight {name}", original_name)
135
+ pointer.data = torch.from_numpy(array)
136
+ except AttributeError as e:
137
+ print(f"Skipping {original_name}", name, e)
138
+ continue
139
+ return model
140
+
141
+
142
+ class ElectraEmbeddings(nn.Module):
143
+ """Construct the embeddings from word, position and token_type embeddings."""
144
+
145
+ def __init__(self, config):
146
+ super().__init__()
147
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
148
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
149
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
150
+
151
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
152
+ # any TensorFlow checkpoint file
153
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
154
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
155
+
156
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
157
+ self.register_buffer(
158
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
159
+ )
160
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
161
+ self.register_buffer(
162
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
163
+ )
164
+
165
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
166
+ def forward(
167
+ self,
168
+ input_ids: Optional[torch.LongTensor] = None,
169
+ token_type_ids: Optional[torch.LongTensor] = None,
170
+ position_ids: Optional[torch.LongTensor] = None,
171
+ inputs_embeds: Optional[torch.FloatTensor] = None,
172
+ past_key_values_length: int = 0,
173
+ ) -> torch.Tensor:
174
+ if input_ids is not None:
175
+ input_shape = input_ids.size()
176
+ else:
177
+ input_shape = inputs_embeds.size()[:-1]
178
+
179
+ seq_length = input_shape[1]
180
+
181
+ if position_ids is None:
182
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
183
+
184
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
185
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
186
+ # issue #5664
187
+ if token_type_ids is None:
188
+ if hasattr(self, "token_type_ids"):
189
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
190
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
191
+ token_type_ids = buffered_token_type_ids_expanded
192
+ else:
193
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
194
+
195
+ if inputs_embeds is None:
196
+ inputs_embeds = self.word_embeddings(input_ids)
197
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
198
+
199
+ embeddings = inputs_embeds + token_type_embeddings
200
+ if self.position_embedding_type == "absolute":
201
+ position_embeddings = self.position_embeddings(position_ids)
202
+ embeddings += position_embeddings
203
+ embeddings = self.LayerNorm(embeddings)
204
+ embeddings = self.dropout(embeddings)
205
+ return embeddings
206
+
207
+
208
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Electra
209
+ class ElectraSelfAttention(nn.Module):
210
+ def __init__(self, config, position_embedding_type=None):
211
+ super().__init__()
212
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
213
+ raise ValueError(
214
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
215
+ f"heads ({config.num_attention_heads})"
216
+ )
217
+
218
+ self.num_attention_heads = config.num_attention_heads
219
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
220
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
221
+
222
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
223
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
224
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
225
+
226
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
227
+ self.position_embedding_type = position_embedding_type or getattr(
228
+ config, "position_embedding_type", "absolute"
229
+ )
230
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
231
+ self.max_position_embeddings = config.max_position_embeddings
232
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
233
+
234
+ self.is_decoder = config.is_decoder
235
+
236
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
237
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
238
+ x = x.view(new_x_shape)
239
+ return x.permute(0, 2, 1, 3)
240
+
241
+ def forward(
242
+ self,
243
+ hidden_states: torch.Tensor,
244
+ attention_mask: Optional[torch.FloatTensor] = None,
245
+ head_mask: Optional[torch.FloatTensor] = None,
246
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
247
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
248
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
249
+ output_attentions: Optional[bool] = False,
250
+ ) -> Tuple[torch.Tensor]:
251
+ mixed_query_layer = self.query(hidden_states)
252
+
253
+ # If this is instantiated as a cross-attention module, the keys
254
+ # and values come from an encoder; the attention mask needs to be
255
+ # such that the encoder's padding tokens are not attended to.
256
+ is_cross_attention = encoder_hidden_states is not None
257
+
258
+ if is_cross_attention and past_key_value is not None:
259
+ # reuse k,v, cross_attentions
260
+ key_layer = past_key_value[0]
261
+ value_layer = past_key_value[1]
262
+ attention_mask = encoder_attention_mask
263
+ elif is_cross_attention:
264
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
265
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
266
+ attention_mask = encoder_attention_mask
267
+ elif past_key_value is not None:
268
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
269
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
270
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
271
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
272
+ else:
273
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
274
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
275
+
276
+ query_layer = self.transpose_for_scores(mixed_query_layer)
277
+
278
+ use_cache = past_key_value is not None
279
+ if self.is_decoder:
280
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
281
+ # Further calls to cross_attention layer can then reuse all cross-attention
282
+ # key/value_states (first "if" case)
283
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
284
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
285
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
286
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
287
+ past_key_value = (key_layer, value_layer)
288
+
289
+ # Take the dot product between "query" and "key" to get the raw attention scores.
290
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
291
+
292
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
293
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
294
+ if use_cache:
295
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
296
+ -1, 1
297
+ )
298
+ else:
299
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
300
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
301
+ distance = position_ids_l - position_ids_r
302
+
303
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
304
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
305
+
306
+ if self.position_embedding_type == "relative_key":
307
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
308
+ attention_scores = attention_scores + relative_position_scores
309
+ elif self.position_embedding_type == "relative_key_query":
310
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
311
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
312
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
313
+
314
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
315
+ if attention_mask is not None:
316
+ # Apply the attention mask is (precomputed for all layers in ElectraModel forward() function)
317
+ attention_scores = attention_scores + attention_mask
318
+
319
+ # Normalize the attention scores to probabilities.
320
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
321
+
322
+ # This is actually dropping out entire tokens to attend to, which might
323
+ # seem a bit unusual, but is taken from the original Transformer paper.
324
+ attention_probs = self.dropout(attention_probs)
325
+
326
+ # Mask heads if we want to
327
+ if head_mask is not None:
328
+ attention_probs = attention_probs * head_mask
329
+
330
+ context_layer = torch.matmul(attention_probs, value_layer)
331
+
332
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
333
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
334
+ context_layer = context_layer.view(new_context_layer_shape)
335
+
336
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
337
+
338
+ if self.is_decoder:
339
+ outputs = outputs + (past_key_value,)
340
+ return outputs
341
+
342
+
343
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
344
+ class ElectraSelfOutput(nn.Module):
345
+ def __init__(self, config):
346
+ super().__init__()
347
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
348
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
349
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
350
+
351
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
352
+ hidden_states = self.dense(hidden_states)
353
+ hidden_states = self.dropout(hidden_states)
354
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
355
+ return hidden_states
356
+
357
+
358
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Electra
359
+ class ElectraAttention(nn.Module):
360
+ def __init__(self, config, position_embedding_type=None):
361
+ super().__init__()
362
+ self.self = ElectraSelfAttention(config, position_embedding_type=position_embedding_type)
363
+ self.output = ElectraSelfOutput(config)
364
+ self.pruned_heads = set()
365
+
366
+ def prune_heads(self, heads):
367
+ if len(heads) == 0:
368
+ return
369
+ heads, index = find_pruneable_heads_and_indices(
370
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
371
+ )
372
+
373
+ # Prune linear layers
374
+ self.self.query = prune_linear_layer(self.self.query, index)
375
+ self.self.key = prune_linear_layer(self.self.key, index)
376
+ self.self.value = prune_linear_layer(self.self.value, index)
377
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
378
+
379
+ # Update hyper params and store pruned heads
380
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
381
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
382
+ self.pruned_heads = self.pruned_heads.union(heads)
383
+
384
+ def forward(
385
+ self,
386
+ hidden_states: torch.Tensor,
387
+ attention_mask: Optional[torch.FloatTensor] = None,
388
+ head_mask: Optional[torch.FloatTensor] = None,
389
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
390
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
391
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
392
+ output_attentions: Optional[bool] = False,
393
+ ) -> Tuple[torch.Tensor]:
394
+ self_outputs = self.self(
395
+ hidden_states,
396
+ attention_mask,
397
+ head_mask,
398
+ encoder_hidden_states,
399
+ encoder_attention_mask,
400
+ past_key_value,
401
+ output_attentions,
402
+ )
403
+ attention_output = self.output(self_outputs[0], hidden_states)
404
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
405
+ return outputs
406
+
407
+
408
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
409
+ class ElectraIntermediate(nn.Module):
410
+ def __init__(self, config):
411
+ super().__init__()
412
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
413
+ if isinstance(config.hidden_act, str):
414
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
415
+ else:
416
+ self.intermediate_act_fn = config.hidden_act
417
+
418
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
419
+ hidden_states = self.dense(hidden_states)
420
+ hidden_states = self.intermediate_act_fn(hidden_states)
421
+ return hidden_states
422
+
423
+
424
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
425
+ class ElectraOutput(nn.Module):
426
+ def __init__(self, config):
427
+ super().__init__()
428
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
429
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
430
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
431
+
432
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
433
+ hidden_states = self.dense(hidden_states)
434
+ hidden_states = self.dropout(hidden_states)
435
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
436
+ return hidden_states
437
+
438
+
439
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Electra
440
+ class ElectraLayer(nn.Module):
441
+ def __init__(self, config):
442
+ super().__init__()
443
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
444
+ self.seq_len_dim = 1
445
+ self.attention = ElectraAttention(config)
446
+ self.is_decoder = config.is_decoder
447
+ self.add_cross_attention = config.add_cross_attention
448
+ if self.add_cross_attention:
449
+ if not self.is_decoder:
450
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
451
+ self.crossattention = ElectraAttention(config, position_embedding_type="absolute")
452
+ self.intermediate = ElectraIntermediate(config)
453
+ self.output = ElectraOutput(config)
454
+
455
+ def forward(
456
+ self,
457
+ hidden_states: torch.Tensor,
458
+ attention_mask: Optional[torch.FloatTensor] = None,
459
+ head_mask: Optional[torch.FloatTensor] = None,
460
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
461
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
462
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
463
+ output_attentions: Optional[bool] = False,
464
+ ) -> Tuple[torch.Tensor]:
465
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
466
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
467
+ self_attention_outputs = self.attention(
468
+ hidden_states,
469
+ attention_mask,
470
+ head_mask,
471
+ output_attentions=output_attentions,
472
+ past_key_value=self_attn_past_key_value,
473
+ )
474
+ attention_output = self_attention_outputs[0]
475
+
476
+ # if decoder, the last output is tuple of self-attn cache
477
+ if self.is_decoder:
478
+ outputs = self_attention_outputs[1:-1]
479
+ present_key_value = self_attention_outputs[-1]
480
+ else:
481
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
482
+
483
+ cross_attn_present_key_value = None
484
+ if self.is_decoder and encoder_hidden_states is not None:
485
+ if not hasattr(self, "crossattention"):
486
+ raise ValueError(
487
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
488
+ " by setting `config.add_cross_attention=True`"
489
+ )
490
+
491
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
492
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
493
+ cross_attention_outputs = self.crossattention(
494
+ attention_output,
495
+ attention_mask,
496
+ head_mask,
497
+ encoder_hidden_states,
498
+ encoder_attention_mask,
499
+ cross_attn_past_key_value,
500
+ output_attentions,
501
+ )
502
+ attention_output = cross_attention_outputs[0]
503
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
504
+
505
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
506
+ cross_attn_present_key_value = cross_attention_outputs[-1]
507
+ present_key_value = present_key_value + cross_attn_present_key_value
508
+
509
+ layer_output = apply_chunking_to_forward(
510
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
511
+ )
512
+ outputs = (layer_output,) + outputs
513
+
514
+ # if decoder, return the attn key/values as the last output
515
+ if self.is_decoder:
516
+ outputs = outputs + (present_key_value,)
517
+
518
+ return outputs
519
+
520
+ def feed_forward_chunk(self, attention_output):
521
+ intermediate_output = self.intermediate(attention_output)
522
+ layer_output = self.output(intermediate_output, attention_output)
523
+ return layer_output
524
+
525
+
526
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Electra
527
+ class ElectraEncoder(nn.Module):
528
+ def __init__(self, config):
529
+ super().__init__()
530
+ self.config = config
531
+ self.layer = nn.ModuleList([ElectraLayer(config) for _ in range(config.num_hidden_layers)])
532
+ self.gradient_checkpointing = False
533
+
534
+ def forward(
535
+ self,
536
+ hidden_states: torch.Tensor,
537
+ attention_mask: Optional[torch.FloatTensor] = None,
538
+ head_mask: Optional[torch.FloatTensor] = None,
539
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
540
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
541
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
542
+ use_cache: Optional[bool] = None,
543
+ output_attentions: Optional[bool] = False,
544
+ output_hidden_states: Optional[bool] = False,
545
+ return_dict: Optional[bool] = True,
546
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
547
+ all_hidden_states = () if output_hidden_states else None
548
+ all_self_attentions = () if output_attentions else None
549
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
550
+
551
+ if self.gradient_checkpointing and self.training:
552
+ if use_cache:
553
+ logger.warning_once(
554
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
555
+ )
556
+ use_cache = False
557
+
558
+ next_decoder_cache = () if use_cache else None
559
+ for i, layer_module in enumerate(self.layer):
560
+ if output_hidden_states:
561
+ all_hidden_states = all_hidden_states + (hidden_states,)
562
+
563
+ layer_head_mask = head_mask[i] if head_mask is not None else None
564
+ past_key_value = past_key_values[i] if past_key_values is not None else None
565
+
566
+ if self.gradient_checkpointing and self.training:
567
+ layer_outputs = self._gradient_checkpointing_func(
568
+ layer_module.__call__,
569
+ hidden_states,
570
+ attention_mask,
571
+ layer_head_mask,
572
+ encoder_hidden_states,
573
+ encoder_attention_mask,
574
+ past_key_value,
575
+ output_attentions,
576
+ )
577
+ else:
578
+ layer_outputs = layer_module(
579
+ hidden_states,
580
+ attention_mask,
581
+ layer_head_mask,
582
+ encoder_hidden_states,
583
+ encoder_attention_mask,
584
+ past_key_value,
585
+ output_attentions,
586
+ )
587
+
588
+ hidden_states = layer_outputs[0]
589
+ if use_cache:
590
+ next_decoder_cache += (layer_outputs[-1],)
591
+ if output_attentions:
592
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
593
+ if self.config.add_cross_attention:
594
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
595
+
596
+ if output_hidden_states:
597
+ all_hidden_states = all_hidden_states + (hidden_states,)
598
+
599
+ if not return_dict:
600
+ return tuple(
601
+ v
602
+ for v in [
603
+ hidden_states,
604
+ next_decoder_cache,
605
+ all_hidden_states,
606
+ all_self_attentions,
607
+ all_cross_attentions,
608
+ ]
609
+ if v is not None
610
+ )
611
+ return BaseModelOutputWithPastAndCrossAttentions(
612
+ last_hidden_state=hidden_states,
613
+ past_key_values=next_decoder_cache,
614
+ hidden_states=all_hidden_states,
615
+ attentions=all_self_attentions,
616
+ cross_attentions=all_cross_attentions,
617
+ )
618
+
619
+
620
+ class ElectraDiscriminatorPredictions(nn.Module):
621
+ """Prediction module for the discriminator, made up of two dense layers."""
622
+
623
+ def __init__(self, config):
624
+ super().__init__()
625
+
626
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
627
+ self.activation = get_activation(config.hidden_act)
628
+ self.dense_prediction = nn.Linear(config.hidden_size, 1)
629
+ self.config = config
630
+
631
+ def forward(self, discriminator_hidden_states):
632
+ hidden_states = self.dense(discriminator_hidden_states)
633
+ hidden_states = self.activation(hidden_states)
634
+ logits = self.dense_prediction(hidden_states).squeeze(-1)
635
+
636
+ return logits
637
+
638
+
639
+ class ElectraGeneratorPredictions(nn.Module):
640
+ """Prediction module for the generator, made up of two dense layers."""
641
+
642
+ def __init__(self, config):
643
+ super().__init__()
644
+
645
+ self.activation = get_activation("gelu")
646
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
647
+ self.dense = nn.Linear(config.hidden_size, config.embedding_size)
648
+
649
+ def forward(self, generator_hidden_states):
650
+ hidden_states = self.dense(generator_hidden_states)
651
+ hidden_states = self.activation(hidden_states)
652
+ hidden_states = self.LayerNorm(hidden_states)
653
+
654
+ return hidden_states
655
+
656
+
657
+ class ElectraPreTrainedModel(PreTrainedModel):
658
+ """
659
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
660
+ models.
661
+ """
662
+
663
+ config_class = ElectraConfig
664
+ load_tf_weights = load_tf_weights_in_electra
665
+ base_model_prefix = "electra"
666
+ supports_gradient_checkpointing = True
667
+
668
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
669
+ def _init_weights(self, module):
670
+ """Initialize the weights"""
671
+ if isinstance(module, nn.Linear):
672
+ # Slightly different from the TF version which uses truncated_normal for initialization
673
+ # cf https://github.com/pytorch/pytorch/pull/5617
674
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
675
+ if module.bias is not None:
676
+ module.bias.data.zero_()
677
+ elif isinstance(module, nn.Embedding):
678
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
679
+ if module.padding_idx is not None:
680
+ module.weight.data[module.padding_idx].zero_()
681
+ elif isinstance(module, nn.LayerNorm):
682
+ module.bias.data.zero_()
683
+ module.weight.data.fill_(1.0)
684
+
685
+
686
+ @dataclass
687
+ class ElectraForPreTrainingOutput(ModelOutput):
688
+ """
689
+ Output type of [`ElectraForPreTraining`].
690
+
691
+ Args:
692
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
693
+ Total loss of the ELECTRA objective.
694
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
695
+ Prediction scores of the head (scores for each token before SoftMax).
696
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
697
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
698
+ shape `(batch_size, sequence_length, hidden_size)`.
699
+
700
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
701
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
702
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
703
+ sequence_length)`.
704
+
705
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
706
+ heads.
707
+ """
708
+
709
+ loss: Optional[torch.FloatTensor] = None
710
+ logits: torch.FloatTensor = None
711
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
712
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
713
+
714
+
715
+ ELECTRA_START_DOCSTRING = r"""
716
+
717
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
718
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
719
+ etc.)
720
+
721
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
722
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
723
+ and behavior.
724
+
725
+ Parameters:
726
+ config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.
727
+ Initializing with a config file does not load the weights associated with the model, only the
728
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
729
+ """
730
+
731
+ ELECTRA_INPUTS_DOCSTRING = r"""
732
+ Args:
733
+ input_ids (`torch.LongTensor` of shape `({0})`):
734
+ Indices of input sequence tokens in the vocabulary.
735
+
736
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
737
+ [`PreTrainedTokenizer.__call__`] for details.
738
+
739
+ [What are input IDs?](../glossary#input-ids)
740
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
741
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
742
+
743
+ - 1 for tokens that are **not masked**,
744
+ - 0 for tokens that are **masked**.
745
+
746
+ [What are attention masks?](../glossary#attention-mask)
747
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
748
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
749
+ 1]`:
750
+
751
+ - 0 corresponds to a *sentence A* token,
752
+ - 1 corresponds to a *sentence B* token.
753
+
754
+ [What are token type IDs?](../glossary#token-type-ids)
755
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
756
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
757
+ config.max_position_embeddings - 1]`.
758
+
759
+ [What are position IDs?](../glossary#position-ids)
760
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
761
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
762
+
763
+ - 1 indicates the head is **not masked**,
764
+ - 0 indicates the head is **masked**.
765
+
766
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
767
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
768
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
769
+ model's internal embedding lookup matrix.
770
+ encoder_hidden_states (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
771
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
772
+ the model is configured as a decoder.
773
+ encoder_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
774
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
775
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
776
+
777
+ - 1 indicates the head is **not masked**,
778
+ - 0 indicates the head is **masked**.
779
+
780
+ output_attentions (`bool`, *optional*):
781
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
782
+ tensors for more detail.
783
+ output_hidden_states (`bool`, *optional*):
784
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
785
+ more detail.
786
+ return_dict (`bool`, *optional*):
787
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
788
+ """
789
+
790
+
791
+ @add_start_docstrings(
792
+ "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
793
+ "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
794
+ "hidden size and embedding size are different. "
795
+ ""
796
+ "Both the generator and discriminator checkpoints may be loaded into this model.",
797
+ ELECTRA_START_DOCSTRING,
798
+ )
799
+ class ElectraModel(ElectraPreTrainedModel):
800
+ def __init__(self, config):
801
+ super().__init__(config)
802
+ self.embeddings = ElectraEmbeddings(config)
803
+
804
+ if config.embedding_size != config.hidden_size:
805
+ self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
806
+
807
+ self.encoder = ElectraEncoder(config)
808
+ self.config = config
809
+ # Initialize weights and apply final processing
810
+ self.post_init()
811
+
812
+ def get_input_embeddings(self):
813
+ return self.embeddings.word_embeddings
814
+
815
+ def set_input_embeddings(self, value):
816
+ self.embeddings.word_embeddings = value
817
+
818
+ def _prune_heads(self, heads_to_prune):
819
+ """
820
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
821
+ class PreTrainedModel
822
+ """
823
+ for layer, heads in heads_to_prune.items():
824
+ self.encoder.layer[layer].attention.prune_heads(heads)
825
+
826
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
827
+ @add_code_sample_docstrings(
828
+ checkpoint=_CHECKPOINT_FOR_DOC,
829
+ output_type=BaseModelOutputWithCrossAttentions,
830
+ config_class=_CONFIG_FOR_DOC,
831
+ )
832
+ def forward(
833
+ self,
834
+ input_ids: Optional[torch.Tensor] = None,
835
+ attention_mask: Optional[torch.Tensor] = None,
836
+ token_type_ids: Optional[torch.Tensor] = None,
837
+ position_ids: Optional[torch.Tensor] = None,
838
+ head_mask: Optional[torch.Tensor] = None,
839
+ inputs_embeds: Optional[torch.Tensor] = None,
840
+ encoder_hidden_states: Optional[torch.Tensor] = None,
841
+ encoder_attention_mask: Optional[torch.Tensor] = None,
842
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
843
+ use_cache: Optional[bool] = None,
844
+ output_attentions: Optional[bool] = None,
845
+ output_hidden_states: Optional[bool] = None,
846
+ return_dict: Optional[bool] = None,
847
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithCrossAttentions]:
848
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
849
+ output_hidden_states = (
850
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
851
+ )
852
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
853
+
854
+ if input_ids is not None and inputs_embeds is not None:
855
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
856
+ elif input_ids is not None:
857
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
858
+ input_shape = input_ids.size()
859
+ elif inputs_embeds is not None:
860
+ input_shape = inputs_embeds.size()[:-1]
861
+ else:
862
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
863
+
864
+ batch_size, seq_length = input_shape
865
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
866
+
867
+ # past_key_values_length
868
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
869
+
870
+ if attention_mask is None:
871
+ attention_mask = torch.ones(input_shape, device=device)
872
+ if token_type_ids is None:
873
+ if hasattr(self.embeddings, "token_type_ids"):
874
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
875
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
876
+ token_type_ids = buffered_token_type_ids_expanded
877
+ else:
878
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
879
+
880
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
881
+
882
+ # If a 2D or 3D attention mask is provided for the cross-attention
883
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
884
+ if self.config.is_decoder and encoder_hidden_states is not None:
885
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
886
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
887
+ if encoder_attention_mask is None:
888
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
889
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
890
+ else:
891
+ encoder_extended_attention_mask = None
892
+
893
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
894
+
895
+ hidden_states = self.embeddings(
896
+ input_ids=input_ids,
897
+ position_ids=position_ids,
898
+ token_type_ids=token_type_ids,
899
+ inputs_embeds=inputs_embeds,
900
+ past_key_values_length=past_key_values_length,
901
+ )
902
+
903
+ if hasattr(self, "embeddings_project"):
904
+ hidden_states = self.embeddings_project(hidden_states)
905
+
906
+ hidden_states = self.encoder(
907
+ hidden_states,
908
+ attention_mask=extended_attention_mask,
909
+ head_mask=head_mask,
910
+ encoder_hidden_states=encoder_hidden_states,
911
+ encoder_attention_mask=encoder_extended_attention_mask,
912
+ past_key_values=past_key_values,
913
+ use_cache=use_cache,
914
+ output_attentions=output_attentions,
915
+ output_hidden_states=output_hidden_states,
916
+ return_dict=return_dict,
917
+ )
918
+
919
+ return hidden_states
920
+
921
+
922
+ class ElectraClassificationHead(nn.Module):
923
+ """Head for sentence-level classification tasks."""
924
+
925
+ def __init__(self, config):
926
+ super().__init__()
927
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
928
+ classifier_dropout = (
929
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
930
+ )
931
+ self.activation = get_activation("gelu")
932
+ self.dropout = nn.Dropout(classifier_dropout)
933
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
934
+
935
+ def forward(self, features, **kwargs):
936
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
937
+ x = self.dropout(x)
938
+ x = self.dense(x)
939
+ x = self.activation(x) # although BERT uses tanh here, it seems Electra authors used gelu here
940
+ x = self.dropout(x)
941
+ x = self.out_proj(x)
942
+ return x
943
+
944
+
945
+ @add_start_docstrings(
946
+ """
947
+ ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the
948
+ pooled output) e.g. for GLUE tasks.
949
+ """,
950
+ ELECTRA_START_DOCSTRING,
951
+ )
952
+ class ElectraForSequenceClassification(ElectraPreTrainedModel):
953
+ def __init__(self, config):
954
+ super().__init__(config)
955
+ self.num_labels = config.num_labels
956
+ self.config = config
957
+ self.electra = ElectraModel(config)
958
+ self.classifier = ElectraClassificationHead(config)
959
+
960
+ # Initialize weights and apply final processing
961
+ self.post_init()
962
+
963
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
964
+ @add_code_sample_docstrings(
965
+ checkpoint="bhadresh-savani/electra-base-emotion",
966
+ output_type=SequenceClassifierOutput,
967
+ config_class=_CONFIG_FOR_DOC,
968
+ expected_output="'joy'",
969
+ expected_loss=0.06,
970
+ )
971
+ def forward(
972
+ self,
973
+ input_ids: Optional[torch.Tensor] = None,
974
+ attention_mask: Optional[torch.Tensor] = None,
975
+ token_type_ids: Optional[torch.Tensor] = None,
976
+ position_ids: Optional[torch.Tensor] = None,
977
+ head_mask: Optional[torch.Tensor] = None,
978
+ inputs_embeds: Optional[torch.Tensor] = None,
979
+ labels: Optional[torch.Tensor] = None,
980
+ output_attentions: Optional[bool] = None,
981
+ output_hidden_states: Optional[bool] = None,
982
+ return_dict: Optional[bool] = None,
983
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
984
+ r"""
985
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
986
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
987
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
988
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
989
+ """
990
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
991
+
992
+ discriminator_hidden_states = self.electra(
993
+ input_ids,
994
+ attention_mask=attention_mask,
995
+ token_type_ids=token_type_ids,
996
+ position_ids=position_ids,
997
+ head_mask=head_mask,
998
+ inputs_embeds=inputs_embeds,
999
+ output_attentions=output_attentions,
1000
+ output_hidden_states=output_hidden_states,
1001
+ return_dict=return_dict,
1002
+ )
1003
+
1004
+ sequence_output = discriminator_hidden_states[0]
1005
+ logits = self.classifier(sequence_output)
1006
+
1007
+ loss = None
1008
+ if labels is not None:
1009
+ if self.config.problem_type is None:
1010
+ if self.num_labels == 1:
1011
+ self.config.problem_type = "regression"
1012
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1013
+ self.config.problem_type = "single_label_classification"
1014
+ else:
1015
+ self.config.problem_type = "multi_label_classification"
1016
+
1017
+ if self.config.problem_type == "regression":
1018
+ loss_fct = MSELoss()
1019
+ if self.num_labels == 1:
1020
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1021
+ else:
1022
+ loss = loss_fct(logits, labels)
1023
+ elif self.config.problem_type == "single_label_classification":
1024
+ loss_fct = CrossEntropyLoss()
1025
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1026
+ elif self.config.problem_type == "multi_label_classification":
1027
+ loss_fct = BCEWithLogitsLoss()
1028
+ loss = loss_fct(logits, labels)
1029
+
1030
+ if not return_dict:
1031
+ output = (logits,) + discriminator_hidden_states[1:]
1032
+ return ((loss,) + output) if loss is not None else output
1033
+
1034
+ return SequenceClassifierOutput(
1035
+ loss=loss,
1036
+ logits=logits,
1037
+ hidden_states=discriminator_hidden_states.hidden_states,
1038
+ attentions=discriminator_hidden_states.attentions,
1039
+ )
1040
+
1041
+
1042
+ @add_start_docstrings(
1043
+ """
1044
+ Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.
1045
+
1046
+ It is recommended to load the discriminator checkpoint into that model.
1047
+ """,
1048
+ ELECTRA_START_DOCSTRING,
1049
+ )
1050
+ class ElectraForPreTraining(ElectraPreTrainedModel):
1051
+ def __init__(self, config):
1052
+ super().__init__(config)
1053
+
1054
+ self.electra = ElectraModel(config)
1055
+ self.discriminator_predictions = ElectraDiscriminatorPredictions(config)
1056
+ # Initialize weights and apply final processing
1057
+ self.post_init()
1058
+
1059
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1060
+ @replace_return_docstrings(output_type=ElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1061
+ def forward(
1062
+ self,
1063
+ input_ids: Optional[torch.Tensor] = None,
1064
+ attention_mask: Optional[torch.Tensor] = None,
1065
+ token_type_ids: Optional[torch.Tensor] = None,
1066
+ position_ids: Optional[torch.Tensor] = None,
1067
+ head_mask: Optional[torch.Tensor] = None,
1068
+ inputs_embeds: Optional[torch.Tensor] = None,
1069
+ labels: Optional[torch.Tensor] = None,
1070
+ output_attentions: Optional[bool] = None,
1071
+ output_hidden_states: Optional[bool] = None,
1072
+ return_dict: Optional[bool] = None,
1073
+ ) -> Union[Tuple[torch.Tensor], ElectraForPreTrainingOutput]:
1074
+ r"""
1075
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1076
+ Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see `input_ids` docstring)
1077
+ Indices should be in `[0, 1]`:
1078
+
1079
+ - 0 indicates the token is an original token,
1080
+ - 1 indicates the token was replaced.
1081
+
1082
+ Returns:
1083
+
1084
+ Examples:
1085
+
1086
+ ```python
1087
+ >>> from transformers import ElectraForPreTraining, AutoTokenizer
1088
+ >>> import torch
1089
+
1090
+ >>> discriminator = ElectraForPreTraining.from_pretrained("google/electra-base-discriminator")
1091
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-discriminator")
1092
+
1093
+ >>> sentence = "The quick brown fox jumps over the lazy dog"
1094
+ >>> fake_sentence = "The quick brown fox fake over the lazy dog"
1095
+
1096
+ >>> fake_tokens = tokenizer.tokenize(fake_sentence, add_special_tokens=True)
1097
+ >>> fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt")
1098
+ >>> discriminator_outputs = discriminator(fake_inputs)
1099
+ >>> predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2)
1100
+
1101
+ >>> fake_tokens
1102
+ ['[CLS]', 'the', 'quick', 'brown', 'fox', 'fake', 'over', 'the', 'lazy', 'dog', '[SEP]']
1103
+
1104
+ >>> predictions.squeeze().tolist()
1105
+ [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
1106
+ ```"""
1107
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1108
+
1109
+ discriminator_hidden_states = self.electra(
1110
+ input_ids,
1111
+ attention_mask=attention_mask,
1112
+ token_type_ids=token_type_ids,
1113
+ position_ids=position_ids,
1114
+ head_mask=head_mask,
1115
+ inputs_embeds=inputs_embeds,
1116
+ output_attentions=output_attentions,
1117
+ output_hidden_states=output_hidden_states,
1118
+ return_dict=return_dict,
1119
+ )
1120
+ discriminator_sequence_output = discriminator_hidden_states[0]
1121
+
1122
+ logits = self.discriminator_predictions(discriminator_sequence_output)
1123
+
1124
+ loss = None
1125
+ if labels is not None:
1126
+ loss_fct = nn.BCEWithLogitsLoss()
1127
+ if attention_mask is not None:
1128
+ active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
1129
+ active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
1130
+ active_labels = labels[active_loss]
1131
+ loss = loss_fct(active_logits, active_labels.float())
1132
+ else:
1133
+ loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())
1134
+
1135
+ if not return_dict:
1136
+ output = (logits,) + discriminator_hidden_states[1:]
1137
+ return ((loss,) + output) if loss is not None else output
1138
+
1139
+ return ElectraForPreTrainingOutput(
1140
+ loss=loss,
1141
+ logits=logits,
1142
+ hidden_states=discriminator_hidden_states.hidden_states,
1143
+ attentions=discriminator_hidden_states.attentions,
1144
+ )
1145
+
1146
+
1147
+ @add_start_docstrings(
1148
+ """
1149
+ Electra model with a language modeling head on top.
1150
+
1151
+ Even though both the discriminator and generator may be loaded into this model, the generator is the only model of
1152
+ the two to have been trained for the masked language modeling task.
1153
+ """,
1154
+ ELECTRA_START_DOCSTRING,
1155
+ )
1156
+ class ElectraForMaskedLM(ElectraPreTrainedModel):
1157
+ _tied_weights_keys = ["generator_lm_head.weight"]
1158
+
1159
+ def __init__(self, config):
1160
+ super().__init__(config)
1161
+
1162
+ self.electra = ElectraModel(config)
1163
+ self.generator_predictions = ElectraGeneratorPredictions(config)
1164
+
1165
+ self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
1166
+ # Initialize weights and apply final processing
1167
+ self.post_init()
1168
+
1169
+ def get_output_embeddings(self):
1170
+ return self.generator_lm_head
1171
+
1172
+ def set_output_embeddings(self, word_embeddings):
1173
+ self.generator_lm_head = word_embeddings
1174
+
1175
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1176
+ @add_code_sample_docstrings(
1177
+ checkpoint="google/electra-small-generator",
1178
+ output_type=MaskedLMOutput,
1179
+ config_class=_CONFIG_FOR_DOC,
1180
+ mask="[MASK]",
1181
+ expected_output="'paris'",
1182
+ expected_loss=1.22,
1183
+ )
1184
+ def forward(
1185
+ self,
1186
+ input_ids: Optional[torch.Tensor] = None,
1187
+ attention_mask: Optional[torch.Tensor] = None,
1188
+ token_type_ids: Optional[torch.Tensor] = None,
1189
+ position_ids: Optional[torch.Tensor] = None,
1190
+ head_mask: Optional[torch.Tensor] = None,
1191
+ inputs_embeds: Optional[torch.Tensor] = None,
1192
+ labels: Optional[torch.Tensor] = None,
1193
+ output_attentions: Optional[bool] = None,
1194
+ output_hidden_states: Optional[bool] = None,
1195
+ return_dict: Optional[bool] = None,
1196
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1197
+ r"""
1198
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1199
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1200
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1201
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1202
+ """
1203
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1204
+
1205
+ generator_hidden_states = self.electra(
1206
+ input_ids,
1207
+ attention_mask=attention_mask,
1208
+ token_type_ids=token_type_ids,
1209
+ position_ids=position_ids,
1210
+ head_mask=head_mask,
1211
+ inputs_embeds=inputs_embeds,
1212
+ output_attentions=output_attentions,
1213
+ output_hidden_states=output_hidden_states,
1214
+ return_dict=return_dict,
1215
+ )
1216
+ generator_sequence_output = generator_hidden_states[0]
1217
+
1218
+ prediction_scores = self.generator_predictions(generator_sequence_output)
1219
+ prediction_scores = self.generator_lm_head(prediction_scores)
1220
+
1221
+ loss = None
1222
+ # Masked language modeling softmax layer
1223
+ if labels is not None:
1224
+ loss_fct = nn.CrossEntropyLoss() # -100 index = padding token
1225
+ loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1226
+
1227
+ if not return_dict:
1228
+ output = (prediction_scores,) + generator_hidden_states[1:]
1229
+ return ((loss,) + output) if loss is not None else output
1230
+
1231
+ return MaskedLMOutput(
1232
+ loss=loss,
1233
+ logits=prediction_scores,
1234
+ hidden_states=generator_hidden_states.hidden_states,
1235
+ attentions=generator_hidden_states.attentions,
1236
+ )
1237
+
1238
+
1239
+ @add_start_docstrings(
1240
+ """
1241
+ Electra model with a token classification head on top.
1242
+
1243
+ Both the discriminator and generator may be loaded into this model.
1244
+ """,
1245
+ ELECTRA_START_DOCSTRING,
1246
+ )
1247
+ class ElectraForTokenClassification(ElectraPreTrainedModel):
1248
+ def __init__(self, config):
1249
+ super().__init__(config)
1250
+ self.num_labels = config.num_labels
1251
+
1252
+ self.electra = ElectraModel(config)
1253
+ classifier_dropout = (
1254
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1255
+ )
1256
+ self.dropout = nn.Dropout(classifier_dropout)
1257
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1258
+ # Initialize weights and apply final processing
1259
+ self.post_init()
1260
+
1261
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1262
+ @add_code_sample_docstrings(
1263
+ checkpoint="bhadresh-savani/electra-base-discriminator-finetuned-conll03-english",
1264
+ output_type=TokenClassifierOutput,
1265
+ config_class=_CONFIG_FOR_DOC,
1266
+ expected_output="['B-LOC', 'B-ORG', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'O', 'B-LOC', 'I-LOC']",
1267
+ expected_loss=0.11,
1268
+ )
1269
+ def forward(
1270
+ self,
1271
+ input_ids: Optional[torch.Tensor] = None,
1272
+ attention_mask: Optional[torch.Tensor] = None,
1273
+ token_type_ids: Optional[torch.Tensor] = None,
1274
+ position_ids: Optional[torch.Tensor] = None,
1275
+ head_mask: Optional[torch.Tensor] = None,
1276
+ inputs_embeds: Optional[torch.Tensor] = None,
1277
+ labels: Optional[torch.Tensor] = None,
1278
+ output_attentions: Optional[bool] = None,
1279
+ output_hidden_states: Optional[bool] = None,
1280
+ return_dict: Optional[bool] = None,
1281
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1282
+ r"""
1283
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1284
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1285
+ """
1286
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1287
+
1288
+ discriminator_hidden_states = self.electra(
1289
+ input_ids,
1290
+ attention_mask=attention_mask,
1291
+ token_type_ids=token_type_ids,
1292
+ position_ids=position_ids,
1293
+ head_mask=head_mask,
1294
+ inputs_embeds=inputs_embeds,
1295
+ output_attentions=output_attentions,
1296
+ output_hidden_states=output_hidden_states,
1297
+ return_dict=return_dict,
1298
+ )
1299
+ discriminator_sequence_output = discriminator_hidden_states[0]
1300
+
1301
+ discriminator_sequence_output = self.dropout(discriminator_sequence_output)
1302
+ logits = self.classifier(discriminator_sequence_output)
1303
+
1304
+ loss = None
1305
+ if labels is not None:
1306
+ loss_fct = CrossEntropyLoss()
1307
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1308
+
1309
+ if not return_dict:
1310
+ output = (logits,) + discriminator_hidden_states[1:]
1311
+ return ((loss,) + output) if loss is not None else output
1312
+
1313
+ return TokenClassifierOutput(
1314
+ loss=loss,
1315
+ logits=logits,
1316
+ hidden_states=discriminator_hidden_states.hidden_states,
1317
+ attentions=discriminator_hidden_states.attentions,
1318
+ )
1319
+
1320
+
1321
+ @add_start_docstrings(
1322
+ """
1323
+ ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1324
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1325
+ """,
1326
+ ELECTRA_START_DOCSTRING,
1327
+ )
1328
+ class ElectraForQuestionAnswering(ElectraPreTrainedModel):
1329
+ config_class = ElectraConfig
1330
+ base_model_prefix = "electra"
1331
+
1332
+ def __init__(self, config):
1333
+ super().__init__(config)
1334
+ self.num_labels = config.num_labels
1335
+
1336
+ self.electra = ElectraModel(config)
1337
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1338
+
1339
+ # Initialize weights and apply final processing
1340
+ self.post_init()
1341
+
1342
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1343
+ @add_code_sample_docstrings(
1344
+ checkpoint="bhadresh-savani/electra-base-squad2",
1345
+ output_type=QuestionAnsweringModelOutput,
1346
+ config_class=_CONFIG_FOR_DOC,
1347
+ qa_target_start_index=11,
1348
+ qa_target_end_index=12,
1349
+ expected_output="'a nice puppet'",
1350
+ expected_loss=2.64,
1351
+ )
1352
+ def forward(
1353
+ self,
1354
+ input_ids: Optional[torch.Tensor] = None,
1355
+ attention_mask: Optional[torch.Tensor] = None,
1356
+ token_type_ids: Optional[torch.Tensor] = None,
1357
+ position_ids: Optional[torch.Tensor] = None,
1358
+ head_mask: Optional[torch.Tensor] = None,
1359
+ inputs_embeds: Optional[torch.Tensor] = None,
1360
+ start_positions: Optional[torch.Tensor] = None,
1361
+ end_positions: Optional[torch.Tensor] = None,
1362
+ output_attentions: Optional[bool] = None,
1363
+ output_hidden_states: Optional[bool] = None,
1364
+ return_dict: Optional[bool] = None,
1365
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1366
+ r"""
1367
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1368
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1369
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1370
+ are not taken into account for computing the loss.
1371
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1372
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1373
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1374
+ are not taken into account for computing the loss.
1375
+ """
1376
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1377
+
1378
+ discriminator_hidden_states = self.electra(
1379
+ input_ids,
1380
+ attention_mask=attention_mask,
1381
+ token_type_ids=token_type_ids,
1382
+ position_ids=position_ids,
1383
+ head_mask=head_mask,
1384
+ inputs_embeds=inputs_embeds,
1385
+ output_attentions=output_attentions,
1386
+ output_hidden_states=output_hidden_states,
1387
+ )
1388
+
1389
+ sequence_output = discriminator_hidden_states[0]
1390
+
1391
+ logits = self.qa_outputs(sequence_output)
1392
+ start_logits, end_logits = logits.split(1, dim=-1)
1393
+ start_logits = start_logits.squeeze(-1).contiguous()
1394
+ end_logits = end_logits.squeeze(-1).contiguous()
1395
+
1396
+ total_loss = None
1397
+ if start_positions is not None and end_positions is not None:
1398
+ # If we are on multi-GPU, split add a dimension
1399
+ if len(start_positions.size()) > 1:
1400
+ start_positions = start_positions.squeeze(-1)
1401
+ if len(end_positions.size()) > 1:
1402
+ end_positions = end_positions.squeeze(-1)
1403
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1404
+ ignored_index = start_logits.size(1)
1405
+ start_positions = start_positions.clamp(0, ignored_index)
1406
+ end_positions = end_positions.clamp(0, ignored_index)
1407
+
1408
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1409
+ start_loss = loss_fct(start_logits, start_positions)
1410
+ end_loss = loss_fct(end_logits, end_positions)
1411
+ total_loss = (start_loss + end_loss) / 2
1412
+
1413
+ if not return_dict:
1414
+ output = (
1415
+ start_logits,
1416
+ end_logits,
1417
+ ) + discriminator_hidden_states[1:]
1418
+ return ((total_loss,) + output) if total_loss is not None else output
1419
+
1420
+ return QuestionAnsweringModelOutput(
1421
+ loss=total_loss,
1422
+ start_logits=start_logits,
1423
+ end_logits=end_logits,
1424
+ hidden_states=discriminator_hidden_states.hidden_states,
1425
+ attentions=discriminator_hidden_states.attentions,
1426
+ )
1427
+
1428
+
1429
+ @add_start_docstrings(
1430
+ """
1431
+ ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1432
+ softmax) e.g. for RocStories/SWAG tasks.
1433
+ """,
1434
+ ELECTRA_START_DOCSTRING,
1435
+ )
1436
+ class ElectraForMultipleChoice(ElectraPreTrainedModel):
1437
+ def __init__(self, config):
1438
+ super().__init__(config)
1439
+
1440
+ self.electra = ElectraModel(config)
1441
+ self.sequence_summary = SequenceSummary(config)
1442
+ self.classifier = nn.Linear(config.hidden_size, 1)
1443
+
1444
+ # Initialize weights and apply final processing
1445
+ self.post_init()
1446
+
1447
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1448
+ @add_code_sample_docstrings(
1449
+ checkpoint=_CHECKPOINT_FOR_DOC,
1450
+ output_type=MultipleChoiceModelOutput,
1451
+ config_class=_CONFIG_FOR_DOC,
1452
+ )
1453
+ def forward(
1454
+ self,
1455
+ input_ids: Optional[torch.Tensor] = None,
1456
+ attention_mask: Optional[torch.Tensor] = None,
1457
+ token_type_ids: Optional[torch.Tensor] = None,
1458
+ position_ids: Optional[torch.Tensor] = None,
1459
+ head_mask: Optional[torch.Tensor] = None,
1460
+ inputs_embeds: Optional[torch.Tensor] = None,
1461
+ labels: Optional[torch.Tensor] = None,
1462
+ output_attentions: Optional[bool] = None,
1463
+ output_hidden_states: Optional[bool] = None,
1464
+ return_dict: Optional[bool] = None,
1465
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1466
+ r"""
1467
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1468
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1469
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1470
+ `input_ids` above)
1471
+ """
1472
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1473
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1474
+
1475
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1476
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1477
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1478
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1479
+ inputs_embeds = (
1480
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1481
+ if inputs_embeds is not None
1482
+ else None
1483
+ )
1484
+
1485
+ discriminator_hidden_states = self.electra(
1486
+ input_ids,
1487
+ attention_mask=attention_mask,
1488
+ token_type_ids=token_type_ids,
1489
+ position_ids=position_ids,
1490
+ head_mask=head_mask,
1491
+ inputs_embeds=inputs_embeds,
1492
+ output_attentions=output_attentions,
1493
+ output_hidden_states=output_hidden_states,
1494
+ return_dict=return_dict,
1495
+ )
1496
+
1497
+ sequence_output = discriminator_hidden_states[0]
1498
+
1499
+ pooled_output = self.sequence_summary(sequence_output)
1500
+ logits = self.classifier(pooled_output)
1501
+ reshaped_logits = logits.view(-1, num_choices)
1502
+
1503
+ loss = None
1504
+ if labels is not None:
1505
+ loss_fct = CrossEntropyLoss()
1506
+ loss = loss_fct(reshaped_logits, labels)
1507
+
1508
+ if not return_dict:
1509
+ output = (reshaped_logits,) + discriminator_hidden_states[1:]
1510
+ return ((loss,) + output) if loss is not None else output
1511
+
1512
+ return MultipleChoiceModelOutput(
1513
+ loss=loss,
1514
+ logits=reshaped_logits,
1515
+ hidden_states=discriminator_hidden_states.hidden_states,
1516
+ attentions=discriminator_hidden_states.attentions,
1517
+ )
1518
+
1519
+
1520
+ @add_start_docstrings(
1521
+ """ELECTRA Model with a `language modeling` head on top for CLM fine-tuning.""", ELECTRA_START_DOCSTRING
1522
+ )
1523
+ class ElectraForCausalLM(ElectraPreTrainedModel):
1524
+ _tied_weights_keys = ["generator_lm_head.weight"]
1525
+
1526
+ def __init__(self, config):
1527
+ super().__init__(config)
1528
+
1529
+ if not config.is_decoder:
1530
+ logger.warning("If you want to use `ElectraForCausalLM` as a standalone, add `is_decoder=True.`")
1531
+
1532
+ self.electra = ElectraModel(config)
1533
+ self.generator_predictions = ElectraGeneratorPredictions(config)
1534
+ self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
1535
+
1536
+ self.init_weights()
1537
+
1538
+ def get_output_embeddings(self):
1539
+ return self.generator_lm_head
1540
+
1541
+ def set_output_embeddings(self, new_embeddings):
1542
+ self.generator_lm_head = new_embeddings
1543
+
1544
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1545
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1546
+ def forward(
1547
+ self,
1548
+ input_ids: Optional[torch.Tensor] = None,
1549
+ attention_mask: Optional[torch.Tensor] = None,
1550
+ token_type_ids: Optional[torch.Tensor] = None,
1551
+ position_ids: Optional[torch.Tensor] = None,
1552
+ head_mask: Optional[torch.Tensor] = None,
1553
+ inputs_embeds: Optional[torch.Tensor] = None,
1554
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1555
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1556
+ labels: Optional[torch.Tensor] = None,
1557
+ past_key_values: Optional[List[torch.Tensor]] = None,
1558
+ use_cache: Optional[bool] = None,
1559
+ output_attentions: Optional[bool] = None,
1560
+ output_hidden_states: Optional[bool] = None,
1561
+ return_dict: Optional[bool] = None,
1562
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1563
+ r"""
1564
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1565
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1566
+ the model is configured as a decoder.
1567
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1568
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1569
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1570
+
1571
+ - 1 for tokens that are **not masked**,
1572
+ - 0 for tokens that are **masked**.
1573
+
1574
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1575
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1576
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1577
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1578
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1579
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1580
+
1581
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1582
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1583
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1584
+ use_cache (`bool`, *optional*):
1585
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1586
+ `past_key_values`).
1587
+
1588
+ Returns:
1589
+
1590
+ Example:
1591
+
1592
+ ```python
1593
+ >>> from transformers import AutoTokenizer, ElectraForCausalLM, ElectraConfig
1594
+ >>> import torch
1595
+
1596
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-generator")
1597
+ >>> config = ElectraConfig.from_pretrained("google/electra-base-generator")
1598
+ >>> config.is_decoder = True
1599
+ >>> model = ElectraForCausalLM.from_pretrained("google/electra-base-generator", config=config)
1600
+
1601
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1602
+ >>> outputs = model(**inputs)
1603
+
1604
+ >>> prediction_logits = outputs.logits
1605
+ ```"""
1606
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1607
+ if labels is not None:
1608
+ use_cache = False
1609
+
1610
+ outputs = self.electra(
1611
+ input_ids,
1612
+ attention_mask=attention_mask,
1613
+ token_type_ids=token_type_ids,
1614
+ position_ids=position_ids,
1615
+ head_mask=head_mask,
1616
+ inputs_embeds=inputs_embeds,
1617
+ encoder_hidden_states=encoder_hidden_states,
1618
+ encoder_attention_mask=encoder_attention_mask,
1619
+ past_key_values=past_key_values,
1620
+ use_cache=use_cache,
1621
+ output_attentions=output_attentions,
1622
+ output_hidden_states=output_hidden_states,
1623
+ return_dict=return_dict,
1624
+ )
1625
+
1626
+ sequence_output = outputs[0]
1627
+ prediction_scores = self.generator_lm_head(self.generator_predictions(sequence_output))
1628
+
1629
+ lm_loss = None
1630
+ if labels is not None:
1631
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1632
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1633
+ labels = labels[:, 1:].contiguous()
1634
+ loss_fct = CrossEntropyLoss()
1635
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1636
+
1637
+ if not return_dict:
1638
+ output = (prediction_scores,) + outputs[1:]
1639
+ return ((lm_loss,) + output) if lm_loss is not None else output
1640
+
1641
+ return CausalLMOutputWithCrossAttentions(
1642
+ loss=lm_loss,
1643
+ logits=prediction_scores,
1644
+ past_key_values=outputs.past_key_values,
1645
+ hidden_states=outputs.hidden_states,
1646
+ attentions=outputs.attentions,
1647
+ cross_attentions=outputs.cross_attentions,
1648
+ )
1649
+
1650
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM.prepare_inputs_for_generation
1651
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1652
+ input_shape = input_ids.shape
1653
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1654
+ if attention_mask is None:
1655
+ attention_mask = input_ids.new_ones(input_shape)
1656
+
1657
+ # cut decoder_input_ids if past_key_values is used
1658
+ if past_key_values is not None:
1659
+ past_length = past_key_values[0][0].shape[2]
1660
+
1661
+ # Some generation methods already pass only the last input ID
1662
+ if input_ids.shape[1] > past_length:
1663
+ remove_prefix_length = past_length
1664
+ else:
1665
+ # Default to old behavior: keep only final ID
1666
+ remove_prefix_length = input_ids.shape[1] - 1
1667
+
1668
+ input_ids = input_ids[:, remove_prefix_length:]
1669
+
1670
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1671
+
1672
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM._reorder_cache
1673
+ def _reorder_cache(self, past_key_values, beam_idx):
1674
+ reordered_past = ()
1675
+ for layer_past in past_key_values:
1676
+ reordered_past += (
1677
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1678
+ )
1679
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/idefics2/__init__.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_idefics2": ["Idefics2Config"]}
20
+
21
+
22
+ try:
23
+ if not is_vision_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["image_processing_idefics2"] = ["Idefics2ImageProcessor"]
29
+
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_idefics2"] = [
38
+ "IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "Idefics2ForConditionalGeneration",
40
+ "Idefics2PreTrainedModel",
41
+ "Idefics2Model",
42
+ ]
43
+ _import_structure["processing_idefics2"] = ["Idefics2Processor"]
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_idefics2 import Idefics2Config
47
+
48
+ try:
49
+ if not is_vision_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .image_processing_idefics2 import Idefics2ImageProcessor
55
+
56
+ try:
57
+ if not is_torch_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .modeling_idefics2 import (
63
+ IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST,
64
+ Idefics2ForConditionalGeneration,
65
+ Idefics2Model,
66
+ Idefics2PreTrainedModel,
67
+ )
68
+ from .processing_idefics2 import Idefics2Processor
69
+
70
+
71
+ else:
72
+ import sys
73
+
74
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/configuration_idefics2.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/convert_idefics2_weights_to_hf.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/image_processing_idefics2.cpython-310.pyc ADDED
Binary file (22.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/modeling_idefics2.cpython-310.pyc ADDED
Binary file (62.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/processing_idefics2.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/idefics2/configuration_idefics2.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Idefics2 model configuration"""
15
+
16
+ import os
17
+ from typing import Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+ from ..auto import CONFIG_MAPPING
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class Idefics2VisionConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`Idefics2VisionModel`]. It is used to instantiate a
30
+ Idefics2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
32
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics2 model
33
+ [HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b).
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ hidden_size (`int`, *optional*, defaults to 768):
40
+ Dimensionality of the encoder layers and the pooler layer.
41
+ intermediate_size (`int`, *optional*, defaults to 3072):
42
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
43
+ num_hidden_layers (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ num_channels (`int`, *optional*, defaults to 3):
48
+ Number of channels in the input images.
49
+ image_size (`int`, *optional*, defaults to 224):
50
+ The size (resolution) of each image.
51
+ patch_size (`int`, *optional*, defaults to 32):
52
+ The size (resolution) of each patch.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
56
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
57
+ The epsilon used by the layer normalization layers.
58
+ attention_dropout (`float`, *optional*, defaults to 0.0):
59
+ The dropout ratio for the attention probabilities.
60
+ intializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation for initializing all weight matrices in the model.
62
+
63
+ Example:
64
+
65
+ ```python
66
+ >>> from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
67
+ >>> from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig
68
+
69
+ >>> # Initializing a Idefics2VisionConfig with google/siglip-base-patch16-224 style configuration
70
+ >>> configuration = Idefics2VisionConfig()
71
+
72
+ >>> # Initializing a Idefics2VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
73
+ >>> model = Idefics2VisionTransformer(configuration)
74
+
75
+ >>> # Accessing the model configuration
76
+ >>> configuration = model.config
77
+ ```"""
78
+
79
+ model_type = "idefics2"
80
+
81
+ def __init__(
82
+ self,
83
+ hidden_size=768,
84
+ intermediate_size=3072,
85
+ num_hidden_layers=12,
86
+ num_attention_heads=12,
87
+ num_channels=3,
88
+ image_size=224,
89
+ patch_size=32,
90
+ hidden_act="gelu_pytorch_tanh",
91
+ layer_norm_eps=1e-6,
92
+ attention_dropout=0.0,
93
+ initializer_range=0.02,
94
+ **kwargs,
95
+ ):
96
+ super().__init__(**kwargs)
97
+
98
+ self.hidden_size = hidden_size
99
+ self.intermediate_size = intermediate_size
100
+ self.num_hidden_layers = num_hidden_layers
101
+ self.num_attention_heads = num_attention_heads
102
+ self.num_channels = num_channels
103
+ self.patch_size = patch_size
104
+ self.image_size = image_size
105
+ self.attention_dropout = attention_dropout
106
+ self.layer_norm_eps = layer_norm_eps
107
+ self.hidden_act = hidden_act
108
+ self.initializer_range = initializer_range
109
+
110
+ @classmethod
111
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
112
+ cls._set_token_in_kwargs(kwargs)
113
+
114
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
115
+
116
+ # get the vision config dict if we are loading from Idefics2Config
117
+ if config_dict.get("model_type") == "idefics2":
118
+ config_dict = config_dict["vision_config"]
119
+
120
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
121
+ logger.warning(
122
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
123
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
124
+ )
125
+
126
+ return cls.from_dict(config_dict, **kwargs)
127
+
128
+
129
+ class Idefics2PerceiverConfig(PretrainedConfig):
130
+ r"""
131
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
132
+ documentation from [`PretrainedConfig`] for more information.
133
+
134
+ Args:
135
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
136
+ The non-linear activation function (function or string) in the perceiver block.
137
+ resampler_n_latents (`int`, *optional*, defaults to 64):
138
+ Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
139
+ resampler_depth (`int`, *optional*, defaults to 3):
140
+ Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (<= 3).
141
+ resampler_n_heads (`int`, *optional*, defaults to 16):
142
+ Number of heads in each Transformer block (for multi-headed self-attention).
143
+ resampler_head_dim (`int`, *optional*, defaults to 96):
144
+ Dimensionality of each head projection in the Transformer block.
145
+ num_key_value_heads (`int`, *optional*, defaults to 4):
146
+ Number of key-value heads in the perceiver attention block.
147
+ attention_dropout (`float`, *optional*, defaults to 0.0):
148
+ The dropout ratio for the attention probabilities.
149
+ """
150
+
151
+ model_type = "idefics2"
152
+
153
+ def __init__(
154
+ self,
155
+ hidden_act="silu",
156
+ resampler_n_latents=64,
157
+ resampler_depth=3,
158
+ resampler_n_heads=16,
159
+ resampler_head_dim=96,
160
+ num_key_value_heads=4,
161
+ attention_dropout=0.0,
162
+ **kwargs,
163
+ ):
164
+ self.hidden_act = hidden_act
165
+ self.resampler_n_latents = resampler_n_latents
166
+ self.resampler_depth = resampler_depth
167
+ self.resampler_n_heads = resampler_n_heads
168
+ self.num_key_value_heads = num_key_value_heads
169
+ self.resampler_head_dim = resampler_head_dim
170
+ self.attention_dropout = attention_dropout
171
+ if self.num_key_value_heads > self.resampler_n_heads:
172
+ raise ValueError(
173
+ f"num_key_value_heads={self.num_key_value_heads} must be less than or equal to"
174
+ f" resampler_n_heads={self.resampler_n_heads}"
175
+ )
176
+ super().__init__(**kwargs)
177
+
178
+
179
+ class Idefics2Config(PretrainedConfig):
180
+ r"""
181
+ This is the configuration class to store the configuration of a [`Idefics2Model`]. It is used to instantiate a
182
+ Idefics2 model according to the specified arguments, defining the model architecture. Instantiating a
183
+ configuration with the defaults will yield a similar configuration to that of the model of the Idefics2
184
+ [HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b) architecture.
185
+
186
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
187
+ documentation from [`PretrainedConfig`] for more information.
188
+
189
+ Args:
190
+ use_cache (`bool`, *optional*, defaults to `True`):
191
+ Whether or not the model should cache the key/value pairs of the attention mechanism.
192
+ image_token_id (`int`, *optional*, defaults to 32001):
193
+ The id of the "image" token.
194
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
195
+ Whether or not to tie the word embeddings with the token embeddings.
196
+ vision_config (`IdeficsVisionConfig` or `dict`, *optional*):
197
+ Custom vision config or dict
198
+ perceiver_config (`IdeficsPerceiverConfig` or `dict`, *optional*):
199
+ Custom perceiver config or dict
200
+ text_config (`MistralConfig` or `dict`, *optional*):
201
+ Custom text config or dict for the text model
202
+
203
+ Example:
204
+ ```python
205
+ >>> from transformers import Idefics2Model, Idefics2Config
206
+ >>> # Initializing configuration
207
+ >>> configuration = Idefics2Config()
208
+ >>> # Initializing a model from the configuration
209
+ >>> model = Idefics2Model(configuration)
210
+ >>> # Accessing the model configuration
211
+ >>> configuration = model.config
212
+ ```"""
213
+
214
+ model_type = "idefics2"
215
+ is_composition = True
216
+
217
+ def __init__(
218
+ self,
219
+ use_cache=True,
220
+ image_token_id=32_001,
221
+ tie_word_embeddings=False,
222
+ vision_config=None,
223
+ perceiver_config=None,
224
+ text_config=None,
225
+ **kwargs,
226
+ ):
227
+ self.image_token_id = image_token_id
228
+ self.use_cache = use_cache
229
+ self.tie_word_embeddings = tie_word_embeddings
230
+
231
+ if perceiver_config is None:
232
+ self.perceiver_config = Idefics2PerceiverConfig()
233
+ logger.info("perciver_config is None, using default perceiver config")
234
+ elif isinstance(perceiver_config, dict):
235
+ self.perceiver_config = Idefics2PerceiverConfig(**perceiver_config)
236
+ elif isinstance(perceiver_config, Idefics2PerceiverConfig):
237
+ self.perceiver_config = perceiver_config
238
+
239
+ if vision_config is None:
240
+ self.vision_config = Idefics2VisionConfig()
241
+ logger.info("vision_config is None, using default vision config")
242
+ elif isinstance(vision_config, dict):
243
+ self.vision_config = Idefics2VisionConfig(**vision_config)
244
+ elif isinstance(vision_config, Idefics2VisionConfig):
245
+ self.vision_config = vision_config
246
+
247
+ if isinstance(text_config, dict):
248
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "mistral"
249
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
250
+ elif text_config is None:
251
+ logger.info("text_config is None, using default text config")
252
+ text_config = CONFIG_MAPPING["mistral"](
253
+ max_position_embeddings=4096 * 8,
254
+ rms_norm_eps=1e-5,
255
+ # None in the original configuration_mistral, we set it to the unk_token_id
256
+ pad_token_id=0,
257
+ tie_word_embeddings=False,
258
+ )
259
+
260
+ self.text_config = text_config
261
+
262
+ super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings)
venv/lib/python3.10/site-packages/transformers/models/idefics2/convert_idefics2_weights_to_hf.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import copy
17
+
18
+ import torch
19
+ from accelerate import init_empty_weights
20
+
21
+ from transformers import (
22
+ AutoConfig,
23
+ AutoModelForCausalLM,
24
+ AutoTokenizer,
25
+ Idefics2Config,
26
+ Idefics2ForConditionalGeneration,
27
+ Idefics2ImageProcessor,
28
+ Idefics2Processor,
29
+ MistralConfig,
30
+ )
31
+
32
+
33
+ EPILOG_TXT = """Example:
34
+ python transformers/src/transformers/models/idefics2/convert_idefics2_weights_to_hf.py --original_model_id HuggingFaceM4/idefics2-8b --output_hub_path org/idefics2
35
+ """
36
+
37
+
38
+ KEYS_TO_MODIFY_MAPPING = {
39
+ "lm_head.weight": "lm_head.linear.weight",
40
+ "model.layers": "model.text_model.layers",
41
+ "model.norm": "model.text_model.norm",
42
+ "model.perceiver_resampler": "model.connector.perceiver_resampler",
43
+ "model.modality_projection": "model.connector.modality_projection",
44
+ }
45
+
46
+
47
+ WEIGHTS_TO_MERGE_MAPPING = (
48
+ # (weights to merge in merging order), (new weight name)
49
+ (
50
+ ("model.embed_tokens.weight", "model.embed_tokens.additional_embedding.weight"),
51
+ "model.text_model.embed_tokens.weight",
52
+ ),
53
+ (("lm_head.linear.weight", "additional_fc.weight"), "lm_head.weight"),
54
+ )
55
+
56
+
57
+ def convert_state_dict_to_hf(state_dict):
58
+ new_state_dict = {}
59
+ for key, value in state_dict.items():
60
+ if key.endswith(".inv_freq"):
61
+ continue
62
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
63
+ if key_to_modify in key:
64
+ key = key.replace(key_to_modify, new_key)
65
+
66
+ new_state_dict[key] = value
67
+ return new_state_dict
68
+
69
+
70
+ def merge_weights(state_dict):
71
+ new_state_dict = copy.deepcopy(state_dict)
72
+
73
+ # Merge the weights
74
+ for weights_to_merge, new_weight_name in WEIGHTS_TO_MERGE_MAPPING:
75
+ for weight in weights_to_merge:
76
+ assert weight in state_dict, f"Weight {weight} is missing in the state dict"
77
+ if new_weight_name not in new_state_dict:
78
+ new_state_dict[new_weight_name] = [state_dict[weight]]
79
+ else:
80
+ new_state_dict[new_weight_name].append(state_dict[weight])
81
+ new_state_dict[new_weight_name] = torch.cat(new_state_dict[new_weight_name], dim=0)
82
+
83
+ # Remove the weights that were merged
84
+ for weights_to_merge, new_weight_name in WEIGHTS_TO_MERGE_MAPPING:
85
+ for weight in weights_to_merge:
86
+ if weight in new_state_dict and weight != new_weight_name:
87
+ new_state_dict.pop(weight)
88
+
89
+ return new_state_dict
90
+
91
+
92
+ def get_config(checkpoint):
93
+ if checkpoint == "HuggingFaceM4/idefics2":
94
+ # We load the config then recreate to use the text_config
95
+ config = AutoConfig.from_pretrained(checkpoint)
96
+ text_config = MistralConfig(
97
+ vocab_size=config.vocab_size + config.additional_vocab_size,
98
+ hidden_size=config.hidden_size,
99
+ intermediate_size=config.intermediate_size,
100
+ num_hidden_layers=config.num_hidden_layers,
101
+ num_attention_heads=config.num_attention_heads,
102
+ num_key_value_heads=config.num_key_value_heads,
103
+ hidden_act=config.hidden_act,
104
+ max_position_embeddings=config.max_position_embeddings,
105
+ initializer_range=config.initializer_range,
106
+ rms_norm_eps=config.rms_norm_eps,
107
+ tie_word_embeddings=config.tie_word_embeddings,
108
+ rope_theta=config.rope_theta,
109
+ sliding_window=config.sliding_window,
110
+ attention_dropout=config.attention_dropout,
111
+ pad_token_id=config.pad_token_id,
112
+ bos_token_id=config.bos_token_id,
113
+ eos_token_id=config.eos_token_id,
114
+ )
115
+ perceiver_config = config.perceiver_config.to_dict()
116
+ config = Idefics2Config(
117
+ text_config=text_config.to_dict(),
118
+ vision_config=config.vision_config,
119
+ perceiver_config=perceiver_config,
120
+ use_cache=config.use_cache,
121
+ image_token_id=config.image_token_id,
122
+ tie_word_embeddings=config.tie_word_embeddings,
123
+ )
124
+ return config
125
+
126
+ return AutoConfig.from_pretrained(checkpoint)
127
+
128
+
129
+ def convert_idefics2_hub_to_hf(original_model_id, output_hub_path, push_to_hub):
130
+ # The original model maps to AutoModelForCausalLM, converted we map to Idefics2ForConditionalGeneration
131
+ original_model = AutoModelForCausalLM.from_pretrained(original_model_id, trust_remote_code=True)
132
+ # The original model doesn't use the idefics2 processing objects
133
+ image_seq_len = original_model.config.perceiver_config.resampler_n_latents
134
+ image_processor = Idefics2ImageProcessor()
135
+ tokenizer = AutoTokenizer.from_pretrained(original_model_id)
136
+ processor = Idefics2Processor(
137
+ image_processor=image_processor,
138
+ tokenizer=tokenizer,
139
+ image_seq_len=image_seq_len,
140
+ )
141
+ state_dict = original_model.state_dict()
142
+ state_dict = convert_state_dict_to_hf(state_dict)
143
+
144
+ # Merge weights
145
+ state_dict = merge_weights(state_dict)
146
+
147
+ config = get_config(original_model_id)
148
+
149
+ with init_empty_weights():
150
+ model = Idefics2ForConditionalGeneration(config)
151
+
152
+ model.load_state_dict(state_dict, strict=True, assign=True)
153
+
154
+ model.save_pretrained(output_hub_path)
155
+ processor.save_pretrained(output_hub_path)
156
+
157
+ if push_to_hub:
158
+ model.push_to_hub(output_hub_path, private=True)
159
+ processor.push_to_hub(output_hub_path, private=True)
160
+
161
+
162
+ def main():
163
+ parser = argparse.ArgumentParser(
164
+ epilog=EPILOG_TXT,
165
+ formatter_class=argparse.RawDescriptionHelpFormatter,
166
+ )
167
+ parser.add_argument(
168
+ "--original_model_id",
169
+ help="Hub location of the text model",
170
+ )
171
+ parser.add_argument(
172
+ "--output_hub_path",
173
+ help="Location on the hub of the converted model",
174
+ )
175
+ parser.add_argument(
176
+ "--push_to_hub",
177
+ action="store_true",
178
+ help="If set, the model will be pushed to the hub after conversion.",
179
+ )
180
+ args = parser.parse_args()
181
+ convert_idefics2_hub_to_hf(args.original_model_id, args.output_hub_path, args.push_to_hub)
182
+
183
+
184
+ if __name__ == "__main__":
185
+ main()
venv/lib/python3.10/site-packages/transformers/models/idefics2/image_processing_idefics2.py ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
22
+ from ...image_transforms import PaddingMode, pad, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ IMAGENET_STANDARD_MEAN,
25
+ IMAGENET_STANDARD_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ get_image_size,
30
+ infer_channel_dimension_format,
31
+ is_scaled_image,
32
+ is_valid_image,
33
+ to_numpy_array,
34
+ valid_images,
35
+ validate_preprocess_arguments,
36
+ )
37
+ from ...utils import TensorType, is_vision_available, logging
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ if is_vision_available():
44
+ import PIL
45
+ from PIL import Image
46
+
47
+
48
+ def get_resize_output_image_size(image, size, input_data_format) -> Tuple[int, int]:
49
+ """
50
+ Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
51
+
52
+ Args:
53
+ image (`np.ndarray`):
54
+ Image to resize.
55
+ size (`Dict[str, int]`):
56
+ Size of the output image containing the keys "shortest_edge" and "longest_edge".
57
+ input_data_format (`ChannelDimension` or `str`):
58
+ The channel dimension format of the input image.
59
+
60
+ Returns:
61
+ The output size of the image after resizing.
62
+ """
63
+ height, width = get_image_size(image, channel_dim=input_data_format)
64
+
65
+ min_len = size["shortest_edge"]
66
+ max_len = size["longest_edge"]
67
+ aspect_ratio = width / height
68
+
69
+ if width >= height and width > max_len:
70
+ width = max_len
71
+ height = int(width / aspect_ratio)
72
+ elif height > width and height > max_len:
73
+ height = max_len
74
+ width = int(height * aspect_ratio)
75
+ height = max(height, min_len)
76
+ width = max(width, min_len)
77
+ return height, width
78
+
79
+
80
+ def make_list_of_images(images: ImageInput) -> List[List[np.ndarray]]:
81
+ """
82
+ Convert a single image or a list of images to a list of numpy arrays.
83
+
84
+ Args:
85
+ images (`ImageInput`):
86
+ A single image or a list of images.
87
+
88
+ Returns:
89
+ A list of numpy arrays.
90
+ """
91
+ # If it's a single image, convert it to a list of lists
92
+ if is_valid_image(images):
93
+ images = [[images]]
94
+ # If it's a list of images, it's a single batch, so convert it to a list of lists
95
+ elif isinstance(images, (list, tuple)) and len(images) > 0 and is_valid_image(images[0]):
96
+ images = [images]
97
+ # If it's a list of batches, it's already in the right format
98
+ elif (
99
+ isinstance(images, (list, tuple))
100
+ and len(images) > 0
101
+ and isinstance(images[0], (list, tuple))
102
+ and is_valid_image(images[0][0])
103
+ ):
104
+ pass
105
+ else:
106
+ raise ValueError(
107
+ "Invalid input type. Must be a single image, a list of images, or a list of batches of images."
108
+ )
109
+ return images
110
+
111
+
112
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
113
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
114
+ """
115
+ Return the maximum value across all indices of an iterable of values.
116
+ """
117
+ return [max(values_i) for values_i in zip(*values)]
118
+
119
+
120
+ def get_max_height_width(
121
+ images_list: List[List[np.ndarray]], input_data_format: Optional[Union[str, ChannelDimension]] = None
122
+ ) -> List[int]:
123
+ """
124
+ Get the maximum height and width across all images in a batch.
125
+ """
126
+ if input_data_format is None:
127
+ input_data_format = infer_channel_dimension_format(images_list[0][0])
128
+
129
+ image_sizes = []
130
+ for images in images_list:
131
+ for image in images:
132
+ image_sizes.append(get_image_size(image, channel_dim=input_data_format))
133
+
134
+ max_height, max_width = max_across_indices(image_sizes)
135
+ return (max_height, max_width)
136
+
137
+
138
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
139
+ def make_pixel_mask(
140
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
141
+ ) -> np.ndarray:
142
+ """
143
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
144
+
145
+ Args:
146
+ image (`np.ndarray`):
147
+ Image to make the pixel mask for.
148
+ output_size (`Tuple[int, int]`):
149
+ Output size of the mask.
150
+ """
151
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
152
+ mask = np.zeros(output_size, dtype=np.int64)
153
+ mask[:input_height, :input_width] = 1
154
+ return mask
155
+
156
+
157
+ # FIXME Amy: merge this function with the one in image_transforms.py
158
+ def convert_to_rgb(image: ImageInput) -> ImageInput:
159
+ """
160
+ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
161
+ as is.
162
+ Args:
163
+ image (Image):
164
+ The image to convert.
165
+ """
166
+ if not isinstance(image, PIL.Image.Image):
167
+ return image
168
+
169
+ # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
170
+ # for transparent images. The call to `alpha_composite` handles this case
171
+ if image.mode == "RGB":
172
+ return image
173
+
174
+ image_rgba = image.convert("RGBA")
175
+ background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
176
+ alpha_composite = Image.alpha_composite(background, image_rgba)
177
+ alpha_composite = alpha_composite.convert("RGB")
178
+ return alpha_composite
179
+
180
+
181
+ class Idefics2ImageProcessor(BaseImageProcessor):
182
+ r"""
183
+ Constructs a Idefics image processor.
184
+
185
+ Args:
186
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
187
+ Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
188
+ Only has an effect if the input image is in the PIL format.
189
+ do_resize (`bool`, *optional*, defaults to `True`):
190
+ Whether to resize the image. The longest edge of the image is resized to be <= `size["longest_edge"]`, with the
191
+ shortest edge resized to keep the input aspect ratio, with a minimum size of `size["shortest_edge"]`.
192
+ size (`Dict`, *optional*):
193
+ Controls the size of the output image. This is a dictionary containing the keys "shortest_edge" and "longest_edge".
194
+ resample (`Resampling`, *optional*, defaults to `Resampling.BILINEAR`):
195
+ Resampling filter to use when resizing the image.
196
+ do_rescale (`bool`, *optional*, defaults to `True`):
197
+ Whether to rescale the image. If set to `True`, the image is rescaled to have pixel values between 0 and 1.
198
+ rescale_factor (`float`, *optional*, defaults to `1/255`):
199
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
200
+ do_normalize (`bool`, *optional*, defaults to `True`):
201
+ Whether to normalize the image. If set to `True`, the image is normalized to have a mean of `image_mean` and
202
+ a standard deviation of `image_std`.
203
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
204
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
205
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
206
+ overridden by the `image_mean` parameter in the `preprocess` method.
207
+ image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
208
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
209
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
210
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
211
+ do_pad (`bool`, *optional*, defaults to `True`):
212
+ Whether or not to pad the images to the largest height and width in the batch and number of images per
213
+ sample in the batch, such that the returned tensor is of shape (batch_size, max_num_images, num_channels, max_height, max_width).
214
+ do_image_splitting (`bool`, *optional*, defaults to `False`):
215
+ Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
216
+ strategy was first introduced in https://arxiv.org/abs/2311.06607.
217
+ """
218
+
219
+ model_input_names = ["pixel_values"]
220
+
221
+ def __init__(
222
+ self,
223
+ do_convert_rgb: bool = True,
224
+ do_resize: bool = True,
225
+ size: Dict[str, int] = None,
226
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
227
+ do_rescale: bool = True,
228
+ rescale_factor: float = 1 / 255,
229
+ do_normalize: bool = True,
230
+ image_mean: Optional[Union[float, List[float]]] = None,
231
+ image_std: Optional[Union[float, List[float]]] = None,
232
+ do_pad: bool = True,
233
+ do_image_splitting: bool = False,
234
+ **kwargs,
235
+ ) -> None:
236
+ super().__init__(**kwargs)
237
+ self.do_convert_rgb = do_convert_rgb
238
+ self.do_resize = do_resize
239
+ self.size = size if size is not None else {"shortest_edge": 378, "longest_edge": 980}
240
+ self.resample = resample
241
+ self.do_rescale = do_rescale
242
+ self.rescale_factor = rescale_factor
243
+ self.do_normalize = do_normalize
244
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
245
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
246
+ self.do_pad = do_pad
247
+ self.do_image_splitting = do_image_splitting
248
+
249
+ def resize(
250
+ self,
251
+ image: np.ndarray,
252
+ size: Dict[str, int],
253
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
254
+ data_format: Optional[Union[str, ChannelDimension]] = None,
255
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
256
+ **kwargs,
257
+ ) -> np.ndarray:
258
+ """
259
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
260
+ resized to keep the input aspect ratio.
261
+
262
+ Args:
263
+ image (`np.ndarray`):
264
+ Image to resize.
265
+ size (`Dict[str, int]`):
266
+ Size of the output image.
267
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
268
+ Resampling filter to use when resiizing the image.
269
+ data_format (`str` or `ChannelDimension`, *optional*):
270
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
271
+ input_data_format (`ChannelDimension` or `str`, *optional*):
272
+ The channel dimension format of the input image. If not provided, it will be inferred.
273
+ """
274
+ if "shortest_edge" in size and "longest_edge" in size:
275
+ size = get_resize_output_image_size(image, size, input_data_format)
276
+ elif "height" in size and "width" in size:
277
+ size = (size["height"], size["width"])
278
+ else:
279
+ raise ValueError(
280
+ "size must be a dictionary with keys 'shortest_edge' and 'longest_edge' or 'height' and 'width'."
281
+ )
282
+ return resize(
283
+ image, size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
284
+ )
285
+
286
+ # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image
287
+ def _pad_image(
288
+ self,
289
+ image: np.ndarray,
290
+ output_size: Tuple[int, int],
291
+ constant_values: Union[float, Iterable[float]] = 0,
292
+ data_format: Optional[ChannelDimension] = None,
293
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
294
+ ) -> np.ndarray:
295
+ """
296
+ Pad an image with zeros to the given size.
297
+ """
298
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
299
+ output_height, output_width = output_size
300
+
301
+ pad_bottom = output_height - input_height
302
+ pad_right = output_width - input_width
303
+ padding = ((0, pad_bottom), (0, pad_right))
304
+ padded_image = pad(
305
+ image,
306
+ padding,
307
+ mode=PaddingMode.CONSTANT,
308
+ constant_values=constant_values,
309
+ data_format=data_format,
310
+ input_data_format=input_data_format,
311
+ )
312
+ return padded_image
313
+
314
+ def pad(
315
+ self,
316
+ images: List[np.ndarray],
317
+ constant_values: Union[float, Iterable[float]] = 0,
318
+ return_pixel_mask: bool = True,
319
+ return_tensors: Optional[Union[str, TensorType]] = None,
320
+ data_format: Optional[ChannelDimension] = None,
321
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
322
+ ) -> BatchFeature:
323
+ """
324
+ For a list of images, for each images, pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width.
325
+ For each sample in the batch, pads the sample with empty images to the max_number of images per sample in the batch. Optionally returns a pixel mask.
326
+
327
+ Args:
328
+ images (`np.ndarray`):
329
+ List of list of images to pad. Pads to the largest height and width in the batch.
330
+ constant_values (`float` or `Iterable[float]`, *optional*):
331
+ The value to use for the padding if `mode` is `"constant"`.
332
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
333
+ Whether to return a pixel mask.
334
+ return_tensors (`str` or `TensorType`, *optional*):
335
+ The type of tensors to return. Can be one of:
336
+ - Unset: Return a list of `np.ndarray`.
337
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
338
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
339
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
340
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
341
+ data_format (`str` or `ChannelDimension`, *optional*):
342
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
343
+ input_data_format (`ChannelDimension` or `str`, *optional*):
344
+ The channel dimension format of the input image. If not provided, it will be inferred.
345
+ """
346
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
347
+
348
+ batch_size = len(images)
349
+ max_num_images = max(len(images_) for images_ in images)
350
+ input_data_format = (
351
+ infer_channel_dimension_format(images[0][0]) if input_data_format is None else input_data_format
352
+ )
353
+ data_format = input_data_format if data_format is None else data_format
354
+
355
+ def empty_image(size, input_data_format):
356
+ if input_data_format == ChannelDimension.FIRST:
357
+ return np.zeros((3, *size), dtype=np.uint8)
358
+ elif input_data_format == ChannelDimension.LAST:
359
+ return np.zeros((*size, 3), dtype=np.uint8)
360
+ raise ValueError("Invalid channel dimension format.")
361
+
362
+ padded_images_list = [
363
+ [empty_image(pad_size, data_format) for _ in range(max_num_images)] for _ in range(batch_size)
364
+ ]
365
+ padded_masks = [[np.zeros(pad_size) for _ in range(max_num_images)] for _ in range(batch_size)]
366
+
367
+ for batch_idx in range(batch_size):
368
+ for sample_idx, image in enumerate(images[batch_idx]):
369
+ padded_images_list[batch_idx][sample_idx] = self._pad_image(
370
+ image,
371
+ pad_size,
372
+ constant_values=constant_values,
373
+ data_format=data_format,
374
+ input_data_format=input_data_format,
375
+ )
376
+ padded_masks[batch_idx][sample_idx] = make_pixel_mask(
377
+ image, output_size=pad_size, input_data_format=input_data_format
378
+ )
379
+
380
+ padded_masks = padded_masks if return_pixel_mask else None
381
+ return padded_images_list, padded_masks
382
+
383
+ def _crop(
384
+ self,
385
+ im: np.ndarray,
386
+ w1: int,
387
+ h1: int,
388
+ w2: int,
389
+ h2: int,
390
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
391
+ ) -> np.ndarray:
392
+ if input_data_format == ChannelDimension.FIRST:
393
+ return im[:, h1:h2, w1:w2]
394
+ elif input_data_format == ChannelDimension.LAST:
395
+ return im[h1:h2, w1:w2, :]
396
+
397
+ def split_image(
398
+ self,
399
+ image: np.ndarray,
400
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
401
+ ):
402
+ """
403
+ Split an image into 4 equal sub-images, and the concatenate that sequence with the original image.
404
+ That means that a single image becomes a sequence of 5 images.
405
+ This is a "trick" to spend more compute on each image with no changes in the vision encoder.
406
+
407
+ Args:
408
+ image (`np.ndarray`):
409
+ Images to split.
410
+ input_data_format (`ChannelDimension` or `str`, *optional*):
411
+ The channel dimension format of the input image. If not provided, it will be inferred.
412
+ """
413
+ height, width = get_image_size(image, input_data_format)
414
+
415
+ mid_width = width // 2
416
+ mid_height = height // 2
417
+ return [
418
+ self._crop(image, 0, 0, mid_width, mid_height, input_data_format),
419
+ self._crop(image, mid_width, 0, width, mid_height, input_data_format),
420
+ self._crop(image, 0, mid_height, mid_width, height, input_data_format),
421
+ self._crop(image, mid_width, mid_height, width, height, input_data_format),
422
+ image,
423
+ ]
424
+
425
+ def preprocess(
426
+ self,
427
+ images: ImageInput,
428
+ do_convert_rgb: Optional[bool] = None,
429
+ do_resize: Optional[bool] = None,
430
+ size: Optional[Dict[str, int]] = None,
431
+ resample: PILImageResampling = None,
432
+ do_rescale: Optional[bool] = None,
433
+ rescale_factor: Optional[float] = None,
434
+ do_normalize: Optional[bool] = None,
435
+ image_mean: Optional[Union[float, List[float]]] = None,
436
+ image_std: Optional[Union[float, List[float]]] = None,
437
+ do_pad: Optional[bool] = None,
438
+ do_image_splitting: Optional[bool] = None,
439
+ return_tensors: Optional[Union[str, TensorType]] = None,
440
+ input_data_format: Optional[ChannelDimension] = None,
441
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
442
+ ):
443
+ """
444
+ Preprocess a batch of images.
445
+
446
+ Args:
447
+ images (`ImageInput`):
448
+ A list of images to preprocess.
449
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
450
+ Whether to convert the image to RGB.
451
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
452
+ Whether to resize the image.
453
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
454
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
455
+ the longest edge resized to keep the input aspect ratio.
456
+ resample (`int`, *optional*, defaults to `self.resample`):
457
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
458
+ has an effect if `do_resize` is set to `True`.
459
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
460
+ Whether to rescale the image.
461
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
462
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
463
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
464
+ Whether to normalize the image.
465
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
466
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
467
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
468
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
469
+ `True`.
470
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
471
+ Whether or not to pad the images to the largest height and width in the batch.
472
+ do_image_splitting (`bool`, *optional*, defaults to `self.do_image_splitting`):
473
+ Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
474
+ strategy was first introduced in https://arxiv.org/abs/2311.06607.
475
+ return_tensors (`str` or `TensorType`, *optional*):
476
+ The type of tensors to return. Can be one of:
477
+ - Unset: Return a list of `np.ndarray`.
478
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
479
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
480
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
481
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
482
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
483
+ The channel dimension format for the output image. Can be one of:
484
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
485
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
486
+ - Unset: Use the channel dimension format of the input image.
487
+ input_data_format (`ChannelDimension` or `str`, *optional*):
488
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
489
+ from the input image. Can be one of:
490
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
491
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
492
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
493
+ """
494
+ do_resize = do_resize if do_resize is not None else self.do_resize
495
+ size = size if size is not None else self.size
496
+ resample = resample if resample is not None else self.resample
497
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
498
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
499
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
500
+ image_mean = image_mean if image_mean is not None else self.image_mean
501
+ image_std = image_std if image_std is not None else self.image_std
502
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
503
+ do_pad = do_pad if do_pad is not None else self.do_pad
504
+ do_image_splitting = do_image_splitting if do_image_splitting is not None else self.do_image_splitting
505
+
506
+ images_list = make_list_of_images(images)
507
+
508
+ if not valid_images(images_list[0]):
509
+ raise ValueError(
510
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
511
+ "torch.Tensor, tf.Tensor or jax.ndarray."
512
+ )
513
+
514
+ validate_preprocess_arguments(
515
+ do_rescale=do_rescale,
516
+ rescale_factor=rescale_factor,
517
+ do_normalize=do_normalize,
518
+ image_mean=image_mean,
519
+ image_std=image_std,
520
+ do_resize=do_resize,
521
+ size=size,
522
+ resample=resample,
523
+ )
524
+
525
+ if do_convert_rgb:
526
+ images_list = [[convert_to_rgb(image) for image in images] for images in images_list]
527
+
528
+ # All transformations expect numpy arrays.
529
+ images_list = [[to_numpy_array(image) for image in images] for images in images_list]
530
+
531
+ if is_scaled_image(images_list[0][0]) and do_rescale:
532
+ logger.warning_once(
533
+ "It looks like you are trying to rescale already rescaled images. If the input"
534
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
535
+ )
536
+
537
+ if input_data_format is None:
538
+ # We assume that all images have the same channel dimension format.
539
+ input_data_format = infer_channel_dimension_format(images_list[0][0])
540
+
541
+ if do_image_splitting:
542
+ new_images_list = []
543
+ for images in images_list:
544
+ new_images = []
545
+ for image in images:
546
+ new_images.extend(self.split_image(image, input_data_format))
547
+ new_images_list.append(new_images)
548
+ images_list = new_images_list
549
+
550
+ if do_resize:
551
+ images_list = [
552
+ [
553
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
554
+ for image in images
555
+ ]
556
+ for images in images_list
557
+ ]
558
+
559
+ if do_rescale:
560
+ images_list = [
561
+ [
562
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
563
+ for image in images
564
+ ]
565
+ for images in images_list
566
+ ]
567
+
568
+ if do_normalize:
569
+ images_list = [
570
+ [
571
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
572
+ for image in images
573
+ ]
574
+ for images in images_list
575
+ ]
576
+
577
+ pixel_attention_mask = None
578
+ if do_pad:
579
+ images_list, pixel_attention_mask = self.pad(
580
+ images_list, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=input_data_format
581
+ )
582
+
583
+ if data_format is not None:
584
+ images_list = [
585
+ [
586
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
587
+ for image in images
588
+ ]
589
+ for images in images_list
590
+ ]
591
+
592
+ data = {"pixel_values": np.array(images_list) if do_pad else images_list} # Faster tensor conversion
593
+ if pixel_attention_mask is not None:
594
+ data["pixel_attention_mask"] = np.array(pixel_attention_mask) if do_pad else pixel_attention_mask
595
+
596
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/idefics2/modeling_idefics2.py ADDED
@@ -0,0 +1,1956 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Idefics2 model."""
16
+
17
+ import inspect
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import CrossEntropyLoss
27
+
28
+ from ... import PreTrainedModel
29
+ from ...activations import ACT2FN
30
+ from ...cache_utils import Cache, DynamicCache
31
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
32
+ from ...modeling_outputs import BaseModelOutput, ModelOutput
33
+ from ...utils import (
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ is_flash_attn_2_available,
37
+ is_flash_attn_greater_or_equal_2_10,
38
+ logging,
39
+ replace_return_docstrings,
40
+ )
41
+ from ..auto import AutoModel
42
+ from .configuration_idefics2 import Idefics2Config, Idefics2VisionConfig
43
+
44
+
45
+ if is_flash_attn_2_available():
46
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
47
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
48
+
49
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
50
+
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+ _CONFIG_FOR_DOC = "Idefics2Config"
55
+
56
+ IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST = [
57
+ "HuggingFaceM4/idefics2-8b",
58
+ # See all IDEFICS2 models at https://huggingface.co/models?filter=idefics2
59
+ ]
60
+
61
+
62
+ @dataclass
63
+ class Idefics2BaseModelOutputWithPast(ModelOutput):
64
+ """
65
+ Base class for Idefics2 model's outputs that may also contain a past key/values (to speed up sequential decoding).
66
+ Args:
67
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
68
+ Sequence of hidden-states at the output of the last layer of the model.
69
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
70
+ hidden_size)` is output.
71
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
72
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
73
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
74
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
75
+ encoder_sequence_length, embed_size_per_head)`.
76
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
77
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
78
+ input) to speed up sequential decoding.
79
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
80
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
81
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
82
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
83
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
84
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
85
+ sequence_length)`.
86
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
87
+ heads.
88
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
89
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
90
+ sequence_length, hidden_size)`.
91
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
92
+ """
93
+
94
+ last_hidden_state: torch.FloatTensor = None
95
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
96
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
97
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
98
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
99
+
100
+
101
+ @dataclass
102
+ # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->Idefics2
103
+ class Idefics2CausalLMOutputWithPast(ModelOutput):
104
+ """
105
+ Base class for Idefics2 causal language model (or autoregressive) outputs.
106
+ Args:
107
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
108
+ Language modeling loss (for next-token prediction).
109
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
110
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
111
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
112
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
113
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
114
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
115
+ `past_key_values` input) to speed up sequential decoding.
116
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
117
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
118
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
119
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
120
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
121
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
122
+ sequence_length)`.
123
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
124
+ heads.
125
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
126
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
127
+ sequence_length, hidden_size)`.
128
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
129
+ """
130
+
131
+ loss: Optional[torch.FloatTensor] = None
132
+ logits: torch.FloatTensor = None
133
+ past_key_values: Optional[List[torch.FloatTensor]] = None
134
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
135
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
136
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
137
+
138
+
139
+ class Idefics2VisionEmbeddings(nn.Module):
140
+ """
141
+ This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable
142
+ resolution.
143
+
144
+ The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://arxiv.org/abs/2307.06304)
145
+ which allows treating images in their native aspect ratio and without the need to resize them to the same
146
+ fixed size. In particular, we start from the original pre-trained SigLIP model
147
+ (which uses images of fixed-size square images) and adapt it by training on images of variable resolutions.
148
+ """
149
+
150
+ def __init__(self, config: Idefics2VisionConfig):
151
+ super().__init__()
152
+ self.embed_dim = config.hidden_size
153
+ self.image_size = config.image_size
154
+ self.patch_size = config.patch_size
155
+
156
+ self.patch_embedding = nn.Conv2d(
157
+ in_channels=config.num_channels,
158
+ out_channels=self.embed_dim,
159
+ kernel_size=self.patch_size,
160
+ stride=self.patch_size,
161
+ padding="valid",
162
+ )
163
+
164
+ self.num_patches_per_side = self.image_size // self.patch_size
165
+ self.num_patches = self.num_patches_per_side**2
166
+ self.num_positions = self.num_patches
167
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
168
+
169
+ def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
170
+ batch_size, _, max_im_h, max_im_w = pixel_values.shape
171
+
172
+ patch_embeds = self.patch_embedding(pixel_values)
173
+ embeddings = patch_embeds.flatten(2).transpose(1, 2)
174
+
175
+ max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
176
+ boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
177
+ position_ids = torch.full(size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0)
178
+
179
+ for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
180
+ nb_patches_h = p_attn_mask[:, 0].sum()
181
+ nb_patches_w = p_attn_mask[0].sum()
182
+
183
+ fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
184
+ fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
185
+
186
+ bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
187
+ bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
188
+
189
+ pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
190
+ position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
191
+
192
+ position_ids = position_ids.to(self.position_embedding.weight.device)
193
+ embeddings = embeddings + self.position_embedding(position_ids)
194
+ return embeddings
195
+
196
+
197
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipAttention with Siglip->Idefics2Vision
198
+ class Idefics2VisionAttention(nn.Module):
199
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
200
+
201
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
202
+ def __init__(self, config):
203
+ super().__init__()
204
+ self.config = config
205
+ self.embed_dim = config.hidden_size
206
+ self.num_heads = config.num_attention_heads
207
+ self.head_dim = self.embed_dim // self.num_heads
208
+ if self.head_dim * self.num_heads != self.embed_dim:
209
+ raise ValueError(
210
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
211
+ f" {self.num_heads})."
212
+ )
213
+ self.scale = self.head_dim**-0.5
214
+ self.dropout = config.attention_dropout
215
+
216
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
217
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
218
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
219
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
220
+
221
+ # Ignore copy
222
+ self.is_causal = False
223
+
224
+ def forward(
225
+ self,
226
+ hidden_states: torch.Tensor,
227
+ attention_mask: Optional[torch.Tensor] = None,
228
+ output_attentions: Optional[bool] = False,
229
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
230
+ """Input shape: Batch x Time x Channel"""
231
+
232
+ batch_size, q_len, _ = hidden_states.size()
233
+
234
+ query_states = self.q_proj(hidden_states)
235
+ key_states = self.k_proj(hidden_states)
236
+ value_states = self.v_proj(hidden_states)
237
+
238
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
239
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
240
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
241
+
242
+ k_v_seq_len = key_states.shape[-2]
243
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
244
+
245
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
246
+ raise ValueError(
247
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
248
+ f" {attn_weights.size()}"
249
+ )
250
+
251
+ if attention_mask is not None:
252
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
253
+ raise ValueError(
254
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
255
+ )
256
+ attn_weights = attn_weights + attention_mask
257
+
258
+ # upcast attention to fp32
259
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
260
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
261
+ attn_output = torch.matmul(attn_weights, value_states)
262
+
263
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
264
+ raise ValueError(
265
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
266
+ f" {attn_output.size()}"
267
+ )
268
+
269
+ attn_output = attn_output.transpose(1, 2).contiguous()
270
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
271
+
272
+ attn_output = self.out_proj(attn_output)
273
+
274
+ return attn_output, attn_weights
275
+
276
+
277
+ class Idefics2VisionFlashAttention2(Idefics2VisionAttention):
278
+ """
279
+ Idefics2Vision flash attention module. This module inherits from `Idefics2VisionAttention` as the weights of the module stays
280
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
281
+ flash attention and deal with padding tokens in case the input contains any of them.
282
+ """
283
+
284
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
285
+ def __init__(self, *args, **kwargs):
286
+ super().__init__(*args, **kwargs)
287
+
288
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
289
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
290
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
291
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
292
+
293
+ def forward(
294
+ self,
295
+ hidden_states: torch.Tensor,
296
+ attention_mask: Optional[torch.LongTensor] = None,
297
+ position_ids: Optional[torch.LongTensor] = None,
298
+ past_key_value: Optional[Cache] = None,
299
+ output_attentions: bool = False,
300
+ use_cache: bool = False,
301
+ **kwargs,
302
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
303
+ output_attentions = False
304
+
305
+ bsz, q_len, _ = hidden_states.size()
306
+
307
+ query_states = self.q_proj(hidden_states)
308
+ key_states = self.k_proj(hidden_states)
309
+ value_states = self.v_proj(hidden_states)
310
+
311
+ # Flash attention requires the input to have the shape
312
+ # batch_size x seq_length x head_dim x hidden_dim
313
+ # therefore we just need to keep the original shape
314
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
315
+ key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
316
+ value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
317
+
318
+ kv_seq_len = key_states.shape[-2]
319
+ if past_key_value is not None:
320
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
321
+
322
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
323
+ # to be able to avoid many of these transpose/reshape/view.
324
+ query_states = query_states.transpose(1, 2)
325
+ key_states = key_states.transpose(1, 2)
326
+ value_states = value_states.transpose(1, 2)
327
+
328
+ dropout_rate = self.dropout if self.training else 0.0
329
+
330
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
331
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
332
+ # cast them back in the correct dtype just to be sure everything works as expected.
333
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
334
+ # in fp32. (Idefics2VisionRMSNorm handles it correctly)
335
+
336
+ input_dtype = query_states.dtype
337
+ if input_dtype == torch.float32:
338
+ if torch.is_autocast_enabled():
339
+ target_dtype = torch.get_autocast_gpu_dtype()
340
+ # Handle the case where the model is quantized
341
+ elif hasattr(self.config, "_pre_quantization_dtype"):
342
+ target_dtype = self.config._pre_quantization_dtype
343
+ else:
344
+ target_dtype = self.q_proj.weight.dtype
345
+
346
+ logger.warning_once(
347
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
348
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
349
+ f" {target_dtype}."
350
+ )
351
+
352
+ query_states = query_states.to(target_dtype)
353
+ key_states = key_states.to(target_dtype)
354
+ value_states = value_states.to(target_dtype)
355
+
356
+ attn_output = self._flash_attention_forward(
357
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
358
+ )
359
+
360
+ attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
361
+ attn_output = self.out_proj(attn_output)
362
+
363
+ if not output_attentions:
364
+ attn_weights = None
365
+
366
+ return attn_output, attn_weights
367
+
368
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
369
+ def _flash_attention_forward(
370
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
371
+ ):
372
+ """
373
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
374
+ first unpad the input, then computes the attention scores and pad the final attention scores.
375
+
376
+ Args:
377
+ query_states (`torch.Tensor`):
378
+ Input query states to be passed to Flash Attention API
379
+ key_states (`torch.Tensor`):
380
+ Input key states to be passed to Flash Attention API
381
+ value_states (`torch.Tensor`):
382
+ Input value states to be passed to Flash Attention API
383
+ attention_mask (`torch.Tensor`):
384
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
385
+ position of padding tokens and 1 for the position of non-padding tokens.
386
+ dropout (`float`):
387
+ Attention dropout
388
+ softmax_scale (`float`, *optional*):
389
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
390
+ """
391
+ if not self._flash_attn_uses_top_left_mask:
392
+ causal = self.is_causal
393
+ else:
394
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
395
+ causal = self.is_causal and query_length != 1
396
+
397
+ # Contains at least one padding token in the sequence
398
+ if attention_mask is not None:
399
+ batch_size = query_states.shape[0]
400
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
401
+ query_states, key_states, value_states, attention_mask, query_length
402
+ )
403
+
404
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
405
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
406
+
407
+ attn_output_unpad = flash_attn_varlen_func(
408
+ query_states,
409
+ key_states,
410
+ value_states,
411
+ cu_seqlens_q=cu_seqlens_q,
412
+ cu_seqlens_k=cu_seqlens_k,
413
+ max_seqlen_q=max_seqlen_in_batch_q,
414
+ max_seqlen_k=max_seqlen_in_batch_k,
415
+ dropout_p=dropout,
416
+ softmax_scale=softmax_scale,
417
+ causal=causal,
418
+ )
419
+
420
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
421
+ else:
422
+ attn_output = flash_attn_func(
423
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
424
+ )
425
+
426
+ return attn_output
427
+
428
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
429
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
430
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
431
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
432
+
433
+ key_layer = index_first_axis(
434
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
435
+ )
436
+ value_layer = index_first_axis(
437
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
438
+ )
439
+ if query_length == kv_seq_len:
440
+ query_layer = index_first_axis(
441
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
442
+ )
443
+ cu_seqlens_q = cu_seqlens_k
444
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
445
+ indices_q = indices_k
446
+ elif query_length == 1:
447
+ max_seqlen_in_batch_q = 1
448
+ cu_seqlens_q = torch.arange(
449
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
450
+ ) # There is a memcpy here, that is very bad.
451
+ indices_q = cu_seqlens_q[:-1]
452
+ query_layer = query_layer.squeeze(1)
453
+ else:
454
+ # The -q_len: slice assumes left padding.
455
+ attention_mask = attention_mask[:, -query_length:]
456
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
457
+
458
+ return (
459
+ query_layer,
460
+ key_layer,
461
+ value_layer,
462
+ indices_q,
463
+ (cu_seqlens_q, cu_seqlens_k),
464
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
465
+ )
466
+
467
+
468
+ IDEFICS_VISION_ATTENTION_CLASSES = {
469
+ "eager": Idefics2VisionAttention,
470
+ "flash_attention_2": Idefics2VisionFlashAttention2,
471
+ }
472
+
473
+
474
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipMLP with Siglip->Idefics2Vision
475
+ class Idefics2VisionMLP(nn.Module):
476
+ def __init__(self, config):
477
+ super().__init__()
478
+ self.config = config
479
+ self.activation_fn = ACT2FN[config.hidden_act]
480
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
481
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
482
+
483
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
484
+ hidden_states = self.fc1(hidden_states)
485
+ hidden_states = self.activation_fn(hidden_states)
486
+ hidden_states = self.fc2(hidden_states)
487
+ return hidden_states
488
+
489
+
490
+ class Idefics2MLP(nn.Module):
491
+ def __init__(
492
+ self,
493
+ hidden_size: int,
494
+ intermediate_size: int,
495
+ output_size: int,
496
+ hidden_act: str,
497
+ ):
498
+ super().__init__()
499
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
500
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
501
+ self.down_proj = nn.Linear(intermediate_size, output_size, bias=False)
502
+ self.act_fn = ACT2FN[hidden_act]
503
+
504
+ def forward(self, x):
505
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
506
+
507
+
508
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipMultiheadAttentionPoolingHead with Siglip->Idefics2
509
+ class Idefics2MultiheadAttentionPoolingHead(nn.Module):
510
+ """Multihead Attention Pooling."""
511
+
512
+ def __init__(self, config: Idefics2VisionConfig):
513
+ super().__init__()
514
+
515
+ self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size))
516
+ self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
517
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
518
+ # Ignore copy
519
+ self.mlp = Idefics2MLP(
520
+ hidden_size=config.hidden_size,
521
+ intermediate_size=config.intermediate_size,
522
+ hidden_act=config.hidden_act,
523
+ output_size=config.hidden_size,
524
+ )
525
+
526
+ def forward(self, hidden_state):
527
+ batch_size = hidden_state.shape[0]
528
+ probe = self.probe.repeat(batch_size, 1, 1)
529
+
530
+ hidden_state = self.attention(probe, hidden_state, hidden_state)[0]
531
+
532
+ residual = hidden_state
533
+ hidden_state = self.layernorm(hidden_state)
534
+ hidden_state = residual + self.mlp(hidden_state)
535
+
536
+ return hidden_state[:, 0]
537
+
538
+
539
+ class Idefics2EncoderLayer(nn.Module):
540
+ def __init__(self, config: Idefics2Config):
541
+ super().__init__()
542
+ self.embed_dim = config.hidden_size
543
+ self.self_attn = IDEFICS_VISION_ATTENTION_CLASSES[config._attn_implementation](config)
544
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
545
+ self.mlp = Idefics2VisionMLP(config)
546
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
547
+
548
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoderLayer.forward
549
+ def forward(
550
+ self,
551
+ hidden_states: torch.Tensor,
552
+ attention_mask: torch.Tensor,
553
+ output_attentions: Optional[bool] = False,
554
+ ) -> Tuple[torch.FloatTensor]:
555
+ """
556
+ Args:
557
+ hidden_states (`torch.FloatTensor`):
558
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
559
+ attention_mask (`torch.FloatTensor`):
560
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
561
+ output_attentions (`bool`, *optional*, defaults to `False`):
562
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
563
+ returned tensors for more detail.
564
+ """
565
+ residual = hidden_states
566
+
567
+ hidden_states = self.layer_norm1(hidden_states)
568
+ hidden_states, attn_weights = self.self_attn(
569
+ hidden_states=hidden_states,
570
+ attention_mask=attention_mask,
571
+ output_attentions=output_attentions,
572
+ )
573
+ hidden_states = residual + hidden_states
574
+
575
+ residual = hidden_states
576
+ hidden_states = self.layer_norm2(hidden_states)
577
+ hidden_states = self.mlp(hidden_states)
578
+ hidden_states = residual + hidden_states
579
+
580
+ outputs = (hidden_states,)
581
+
582
+ if output_attentions:
583
+ outputs += (attn_weights,)
584
+
585
+ return outputs
586
+
587
+
588
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoder with Siglip->Idefics2
589
+ class Idefics2Encoder(nn.Module):
590
+ """
591
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
592
+ [`Idefics2EncoderLayer`].
593
+
594
+ Args:
595
+ config: Idefics2Config
596
+ """
597
+
598
+ def __init__(self, config: Idefics2Config):
599
+ super().__init__()
600
+ self.config = config
601
+ self.layers = nn.ModuleList([Idefics2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
602
+ self.gradient_checkpointing = False
603
+
604
+ # Ignore copy
605
+ def forward(
606
+ self,
607
+ inputs_embeds,
608
+ attention_mask: Optional[torch.Tensor] = None,
609
+ output_attentions: Optional[bool] = None,
610
+ output_hidden_states: Optional[bool] = None,
611
+ return_dict: Optional[bool] = None,
612
+ ) -> Union[Tuple, BaseModelOutput]:
613
+ r"""
614
+ Args:
615
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
616
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
617
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
618
+ than the model's internal embedding lookup matrix.
619
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
620
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
621
+
622
+ - 1 for tokens that are **not masked**,
623
+ - 0 for tokens that are **masked**.
624
+
625
+ [What are attention masks?](../glossary#attention-mask)
626
+ output_attentions (`bool`, *optional*):
627
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
628
+ returned tensors for more detail.
629
+ output_hidden_states (`bool`, *optional*):
630
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
631
+ for more detail.
632
+ return_dict (`bool`, *optional*):
633
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
634
+ """
635
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
636
+ output_hidden_states = (
637
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
638
+ )
639
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
640
+
641
+ encoder_states = () if output_hidden_states else None
642
+ all_attentions = () if output_attentions else None
643
+
644
+ hidden_states = inputs_embeds
645
+ for encoder_layer in self.layers:
646
+ if output_hidden_states:
647
+ encoder_states = encoder_states + (hidden_states,)
648
+ if self.gradient_checkpointing and self.training:
649
+ layer_outputs = self._gradient_checkpointing_func(
650
+ encoder_layer.__call__,
651
+ hidden_states,
652
+ attention_mask,
653
+ output_attentions,
654
+ )
655
+ else:
656
+ layer_outputs = encoder_layer(
657
+ hidden_states,
658
+ attention_mask,
659
+ output_attentions=output_attentions,
660
+ )
661
+
662
+ hidden_states = layer_outputs[0]
663
+
664
+ if output_attentions:
665
+ all_attentions = all_attentions + (layer_outputs[1],)
666
+
667
+ if output_hidden_states:
668
+ encoder_states = encoder_states + (hidden_states,)
669
+
670
+ if not return_dict:
671
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
672
+ return BaseModelOutput(
673
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
674
+ )
675
+
676
+
677
+ class Idefics2VisionTransformer(nn.Module):
678
+ def __init__(self, config: Idefics2VisionConfig):
679
+ super().__init__()
680
+ embed_dim = config.hidden_size
681
+
682
+ self.config = config
683
+ self.embeddings = Idefics2VisionEmbeddings(config)
684
+ self.encoder = Idefics2Encoder(config)
685
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
686
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
687
+
688
+ def get_input_embeddings(self):
689
+ return self.embeddings
690
+
691
+ def set_input_embeddings(self, value):
692
+ self.embeddings = value
693
+
694
+ def forward(
695
+ self,
696
+ pixel_values,
697
+ patch_attention_mask: Optional[torch.BoolTensor] = None,
698
+ output_attentions: Optional[bool] = None,
699
+ output_hidden_states: Optional[bool] = None,
700
+ return_dict: Optional[bool] = None,
701
+ ) -> Union[Tuple, BaseModelOutput]:
702
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
703
+ output_hidden_states = (
704
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
705
+ )
706
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
707
+
708
+ batch_size = pixel_values.size(0)
709
+ if patch_attention_mask is None:
710
+ patch_size = self.config.patch_size
711
+ patch_attention_mask = torch.ones(
712
+ (
713
+ batch_size,
714
+ pixel_values.size(2) // patch_size,
715
+ pixel_values.size(3) // patch_size,
716
+ )
717
+ )
718
+ patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device)
719
+
720
+ hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
721
+
722
+ patch_attention_mask = patch_attention_mask.view(batch_size, -1)
723
+ # The call to `_upad_input` in `_flash_attention_forward` is expensive
724
+ # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
725
+ # avoiding passing the attention_mask, which is equivalent to attending to the full sequence
726
+ if not torch.any(~patch_attention_mask):
727
+ patch_attention_mask = None
728
+ elif not self._use_flash_attention_2:
729
+ patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
730
+
731
+ encoder_outputs = self.encoder(
732
+ inputs_embeds=hidden_states,
733
+ attention_mask=patch_attention_mask,
734
+ output_attentions=output_attentions,
735
+ output_hidden_states=output_hidden_states,
736
+ return_dict=return_dict,
737
+ )
738
+
739
+ last_hidden_state = encoder_outputs[0]
740
+ last_hidden_state = self.post_layernorm(last_hidden_state)
741
+
742
+ if not return_dict:
743
+ return (last_hidden_state,) + encoder_outputs[1:]
744
+
745
+ return BaseModelOutput(
746
+ last_hidden_state=last_hidden_state,
747
+ hidden_states=encoder_outputs.hidden_states,
748
+ attentions=encoder_outputs.attentions,
749
+ )
750
+
751
+
752
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
753
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
754
+ """
755
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
756
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
757
+ """
758
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
759
+ if n_rep == 1:
760
+ return hidden_states
761
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
762
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
763
+
764
+
765
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
766
+ def _get_unpad_data(attention_mask):
767
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
768
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
769
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
770
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
771
+ return (
772
+ indices,
773
+ cu_seqlens,
774
+ max_seqlen_in_batch,
775
+ )
776
+
777
+
778
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Idefics2
779
+ class Idefics2RMSNorm(nn.Module):
780
+ def __init__(self, hidden_size, eps=1e-6):
781
+ """
782
+ Idefics2RMSNorm is equivalent to T5LayerNorm
783
+ """
784
+ super().__init__()
785
+ self.weight = nn.Parameter(torch.ones(hidden_size))
786
+ self.variance_epsilon = eps
787
+
788
+ def forward(self, hidden_states):
789
+ input_dtype = hidden_states.dtype
790
+ hidden_states = hidden_states.to(torch.float32)
791
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
792
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
793
+ return self.weight * hidden_states.to(input_dtype)
794
+
795
+
796
+ class Idefics2PerceiverAttention(nn.Module):
797
+ def __init__(self, config, layer_idx: Optional[int] = None) -> None:
798
+ """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
799
+ super().__init__()
800
+
801
+ self.layer_idx = None
802
+ self.hidden_size = config.text_config.hidden_size
803
+ self.num_heads = config.perceiver_config.resampler_n_heads
804
+ self.head_dim = config.perceiver_config.resampler_head_dim
805
+ self.num_key_value_heads = config.perceiver_config.num_key_value_heads
806
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
807
+ self.attention_dropout = config.perceiver_config.attention_dropout
808
+
809
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
810
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
811
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
812
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
813
+
814
+ self.is_causal = False
815
+
816
+ def forward(
817
+ self,
818
+ latents: torch.Tensor,
819
+ context: torch.Tensor,
820
+ attention_mask: Optional[torch.Tensor] = None,
821
+ position_ids: Optional[torch.LongTensor] = None,
822
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
823
+ output_attentions: bool = False,
824
+ use_cache: bool = False,
825
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
826
+ """
827
+ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
828
+
829
+ Args:
830
+ latents (`torch.Tensor`): Tensor of shape [bsz, n_latents, embed_dim] representing fixed length latents to compress to.
831
+ context (`torch.Tensor`): Tensor of shape [bsz, seq, embed_dim] representing long-form context to resample.
832
+ attention_mask (`torch.Tensor`, *optional*): Tensor of shape [bsz, 1, seq, n_latents] representing attention mask.
833
+ position_ids (`torch.LongTensor`, *optional*): Tensor of shape [bsz, seq] representing position indices of each input token.
834
+ past_key_value (`Tuple[torch.Tensor]`, *optional*): Tuple of tensors containing cached key and value states.
835
+ output_attentions (`bool`, *optional*, defaults to `False`): Whether to return attention weights.
836
+ use_cache (`bool`, *optional*, defaults to `False`): Whether to use past_key_value for caching.
837
+ """
838
+ bsz, q_len, _ = latents.size()
839
+ kv_seq_len = q_len + context.size()[1]
840
+
841
+ hidden_states = torch.concat([context, latents], dim=-2)
842
+
843
+ query_states = self.q_proj(latents)
844
+ key_states = self.k_proj(hidden_states)
845
+ value_states = self.v_proj(hidden_states)
846
+
847
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
848
+ key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
849
+ value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
850
+
851
+ past_key_value = getattr(self, "past_key_value", past_key_value)
852
+
853
+ if past_key_value is not None:
854
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
855
+
856
+ # repeat k/v heads if n_kv_heads < n_heads
857
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
858
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
859
+
860
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
861
+
862
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
863
+ raise ValueError(
864
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
865
+ f" {attn_weights.size()}"
866
+ )
867
+
868
+ if attention_mask is not None:
869
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
870
+ raise ValueError(
871
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
872
+ )
873
+
874
+ attn_weights = attn_weights + attention_mask
875
+
876
+ # upcast attention to fp32
877
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
878
+ attn_output = torch.matmul(attn_weights, value_states)
879
+
880
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
881
+ raise ValueError(
882
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
883
+ f" {attn_output.size()}"
884
+ )
885
+
886
+ attn_output = attn_output.transpose(1, 2).contiguous()
887
+ attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim)
888
+
889
+ attn_output = self.o_proj(attn_output)
890
+
891
+ if not output_attentions:
892
+ attn_weights = None
893
+
894
+ return attn_output, attn_weights, past_key_value
895
+
896
+
897
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with MistralAttention->Idefics2PerceiverAttention,MistralFlashAttention->Idefics2PerceiverFlashAttention,Mistral->Idefics2
898
+ class Idefics2PerceiverFlashAttention2(Idefics2PerceiverAttention):
899
+ """
900
+ Idefics2 flash attention module. This module inherits from `Idefics2PerceiverAttention` as the weights of the module stays
901
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
902
+ flash attention and deal with padding tokens in case the input contains any of them.
903
+ """
904
+
905
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
906
+ def __init__(self, *args, **kwargs):
907
+ super().__init__(*args, **kwargs)
908
+
909
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
910
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
911
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
912
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
913
+
914
+ # Ignore copy
915
+ def forward(
916
+ self,
917
+ latents: torch.Tensor,
918
+ context: torch.Tensor,
919
+ attention_mask: Optional[torch.LongTensor] = None,
920
+ position_ids: Optional[torch.LongTensor] = None,
921
+ past_key_value: Optional[Cache] = None,
922
+ output_attentions: bool = False,
923
+ use_cache: bool = False,
924
+ **kwargs,
925
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
926
+ bsz, q_len, _ = latents.size()
927
+ kv_seq_len = q_len + context.size()[1]
928
+
929
+ # Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn!
930
+ # Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents`
931
+ query_states = self.q_proj(latents)
932
+ key_states = self.k_proj(torch.cat([context, latents], dim=-2))
933
+ value_states = self.v_proj(torch.cat([context, latents], dim=-2))
934
+
935
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
936
+ key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
937
+ value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
938
+
939
+ kv_seq_len = key_states.shape[-2]
940
+ if past_key_value is not None:
941
+ kv_seq_len += past_key_value[0].shape[-2]
942
+
943
+ if past_key_value is not None:
944
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
945
+ if hasattr(self.config, "sliding_window") and kv_seq_len > self.config.sliding_window:
946
+ slicing_tokens = kv_seq_len - self.config.sliding_window
947
+
948
+ past_key = past_key_value[0]
949
+ past_value = past_key_value[1]
950
+
951
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
952
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
953
+
954
+ if past_key.shape[-2] != self.config.sliding_window - 1:
955
+ raise ValueError(
956
+ "past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1,"
957
+ f" head_dim`), got {past_key.shape}"
958
+ )
959
+
960
+ past_key_value = (past_key, past_value)
961
+
962
+ if attention_mask is not None:
963
+ attention_mask = attention_mask[:, slicing_tokens:]
964
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
965
+
966
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
967
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
968
+
969
+ past_key_value = (key_states, value_states) if use_cache else None
970
+
971
+ # repeat k/v heads if n_kv_heads < n_heads
972
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
973
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
974
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
975
+
976
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
977
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
978
+ # cast them back in float16 just to be sure everything works as expected.
979
+ input_dtype = query_states.dtype
980
+ if input_dtype == torch.float32:
981
+ if torch.is_autocast_enabled():
982
+ target_dtype = torch.get_autocast_gpu_dtype()
983
+ # Handle the case where the model is quantized
984
+ elif hasattr(self.config, "_pre_quantization_dtype"):
985
+ target_dtype = self.config._pre_quantization_dtype
986
+ else:
987
+ target_dtype = self.q_proj.weight.dtype
988
+
989
+ logger.warning_once(
990
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
991
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
992
+ f" {target_dtype}."
993
+ )
994
+
995
+ query_states = query_states.to(target_dtype)
996
+ key_states = key_states.to(target_dtype)
997
+ value_states = value_states.to(target_dtype)
998
+
999
+ # Reashape to the expected shape for Flash Attention
1000
+ query_states = query_states.transpose(1, 2)
1001
+ key_states = key_states.transpose(1, 2)
1002
+ value_states = value_states.transpose(1, 2)
1003
+
1004
+ attn_output = self._flash_attention_forward(
1005
+ query_states,
1006
+ key_states,
1007
+ value_states,
1008
+ attention_mask,
1009
+ q_len,
1010
+ dropout=dropout_rate,
1011
+ use_sliding_windows=False,
1012
+ )
1013
+
1014
+ attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim).contiguous()
1015
+ attn_output = self.o_proj(attn_output)
1016
+
1017
+ if not output_attentions:
1018
+ attn_weights = None
1019
+
1020
+ return attn_output, attn_weights, past_key_value
1021
+
1022
+ def _flash_attention_forward(
1023
+ self,
1024
+ query_states,
1025
+ key_states,
1026
+ value_states,
1027
+ attention_mask,
1028
+ query_length,
1029
+ dropout=0.0,
1030
+ softmax_scale=None,
1031
+ use_sliding_windows=False,
1032
+ ):
1033
+ """
1034
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
1035
+ first unpad the input, then computes the attention scores and pad the final attention scores.
1036
+
1037
+ Args:
1038
+ query_states (`torch.Tensor`):
1039
+ Input query states to be passed to Flash Attention API
1040
+ key_states (`torch.Tensor`):
1041
+ Input key states to be passed to Flash Attention API
1042
+ value_states (`torch.Tensor`):
1043
+ Input value states to be passed to Flash Attention API
1044
+ attention_mask (`torch.Tensor`):
1045
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
1046
+ position of padding tokens and 1 for the position of non-padding tokens.
1047
+ dropout (`float`):
1048
+ Attention dropout
1049
+ softmax_scale (`float`, *optional*):
1050
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
1051
+ use_sliding_windows (`bool`, *optional*):
1052
+ Whether to activate sliding window attention.
1053
+ """
1054
+ if not self._flash_attn_uses_top_left_mask:
1055
+ causal = self.is_causal
1056
+ else:
1057
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
1058
+ causal = self.is_causal and query_length != 1
1059
+
1060
+ # Contains at least one padding token in the sequence
1061
+ if attention_mask is not None:
1062
+ batch_size = query_states.shape[0]
1063
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
1064
+ query_states, key_states, value_states, attention_mask, query_length
1065
+ )
1066
+
1067
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
1068
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
1069
+
1070
+ if not use_sliding_windows:
1071
+ attn_output_unpad = flash_attn_varlen_func(
1072
+ query_states,
1073
+ key_states,
1074
+ value_states,
1075
+ cu_seqlens_q=cu_seqlens_q,
1076
+ cu_seqlens_k=cu_seqlens_k,
1077
+ max_seqlen_q=max_seqlen_in_batch_q,
1078
+ max_seqlen_k=max_seqlen_in_batch_k,
1079
+ dropout_p=dropout,
1080
+ softmax_scale=softmax_scale,
1081
+ causal=causal,
1082
+ )
1083
+ else:
1084
+ attn_output_unpad = flash_attn_varlen_func(
1085
+ query_states,
1086
+ key_states,
1087
+ value_states,
1088
+ cu_seqlens_q=cu_seqlens_q,
1089
+ cu_seqlens_k=cu_seqlens_k,
1090
+ max_seqlen_q=max_seqlen_in_batch_q,
1091
+ max_seqlen_k=max_seqlen_in_batch_k,
1092
+ dropout_p=dropout,
1093
+ softmax_scale=softmax_scale,
1094
+ causal=causal,
1095
+ window_size=(self.config.sliding_window, self.config.sliding_window),
1096
+ )
1097
+
1098
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
1099
+ else:
1100
+ if not use_sliding_windows:
1101
+ attn_output = flash_attn_func(
1102
+ query_states,
1103
+ key_states,
1104
+ value_states,
1105
+ dropout,
1106
+ softmax_scale=softmax_scale,
1107
+ causal=causal,
1108
+ )
1109
+ else:
1110
+ attn_output = flash_attn_func(
1111
+ query_states,
1112
+ key_states,
1113
+ value_states,
1114
+ dropout,
1115
+ softmax_scale=softmax_scale,
1116
+ causal=causal,
1117
+ window_size=(self.config.sliding_window, self.config.sliding_window),
1118
+ )
1119
+
1120
+ return attn_output
1121
+
1122
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
1123
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
1124
+
1125
+ # On the first iteration we need to properly re-create the padding mask
1126
+ # by slicing it on the proper place
1127
+ if kv_seq_len != attention_mask.shape[-1]:
1128
+ attention_mask_num_tokens = attention_mask.shape[-1]
1129
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
1130
+
1131
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
1132
+
1133
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
1134
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
1135
+
1136
+ if query_length == kv_seq_len:
1137
+ query_layer = index_first_axis(
1138
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
1139
+ )
1140
+ cu_seqlens_q = cu_seqlens_k
1141
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
1142
+ indices_q = indices_k
1143
+ elif query_length == 1:
1144
+ max_seqlen_in_batch_q = 1
1145
+ cu_seqlens_q = torch.arange(
1146
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
1147
+ ) # There is a memcpy here, that is very bad.
1148
+ indices_q = cu_seqlens_q[:-1]
1149
+ query_layer = query_layer.squeeze(1)
1150
+ else:
1151
+ # The -q_len: slice assumes left padding.
1152
+ attention_mask = attention_mask[:, -query_length:]
1153
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
1154
+
1155
+ return (
1156
+ query_layer,
1157
+ key_layer,
1158
+ value_layer,
1159
+ indices_q,
1160
+ (cu_seqlens_q, cu_seqlens_k),
1161
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
1162
+ )
1163
+
1164
+
1165
+ IDEFICS2_PERCEIVER_ATTENTION_CLASSES = {
1166
+ "eager": Idefics2PerceiverAttention,
1167
+ "flash_attention_2": Idefics2PerceiverFlashAttention2,
1168
+ }
1169
+
1170
+
1171
+ class Idefics2PerceiverLayer(nn.Module):
1172
+ def __init__(self, config, layer_idx: int):
1173
+ super().__init__()
1174
+ self.hidden_size = config.text_config.hidden_size
1175
+ self.n_latents = config.perceiver_config.resampler_n_latents
1176
+ self.depth = config.perceiver_config.resampler_depth
1177
+ self.rms_norm_eps = config.text_config.rms_norm_eps
1178
+
1179
+ self.input_latents_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1180
+ self.input_context_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1181
+ self.self_attn = IDEFICS2_PERCEIVER_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
1182
+ self.post_attention_layernorm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1183
+ self.mlp = Idefics2MLP(
1184
+ hidden_size=config.text_config.hidden_size,
1185
+ intermediate_size=config.text_config.hidden_size * 4,
1186
+ output_size=config.text_config.hidden_size,
1187
+ hidden_act=config.perceiver_config.hidden_act,
1188
+ )
1189
+
1190
+ def forward(
1191
+ self,
1192
+ latents: torch.Tensor,
1193
+ context: torch.Tensor,
1194
+ attention_mask: Optional[torch.Tensor] = None,
1195
+ position_ids: Optional[torch.LongTensor] = None,
1196
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
1197
+ output_attentions: Optional[bool] = False,
1198
+ use_cache: Optional[bool] = False,
1199
+ **kwargs,
1200
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1201
+ """
1202
+ Args:
1203
+ latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1204
+ context (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1205
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
1206
+ `(batch, sequence_length)` where padding elements are indicated by 0.
1207
+ output_attentions (`bool`, *optional*):
1208
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1209
+ returned tensors for more detail.
1210
+ use_cache (`bool`, *optional*):
1211
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1212
+ (see `past_key_values`).
1213
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
1214
+ """
1215
+ residual = latents
1216
+
1217
+ latents = self.input_latents_norm(latents)
1218
+ context = self.input_context_norm(context)
1219
+
1220
+ latents, self_attn_weights, present_key_value = self.self_attn(
1221
+ latents=latents,
1222
+ context=context,
1223
+ attention_mask=attention_mask,
1224
+ )
1225
+ latents = residual + latents
1226
+ residual = latents
1227
+
1228
+ latents = self.post_attention_layernorm(latents)
1229
+ latents = self.mlp(latents)
1230
+ latents = residual + latents
1231
+
1232
+ outputs = (latents,)
1233
+
1234
+ if output_attentions:
1235
+ outputs += (self_attn_weights,)
1236
+
1237
+ if use_cache:
1238
+ outputs += (present_key_value,)
1239
+
1240
+ return outputs
1241
+
1242
+
1243
+ class Idefics2PerceiverResampler(nn.Module):
1244
+ def __init__(self, config) -> None:
1245
+ """
1246
+ Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
1247
+ MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
1248
+ returns a Tensor of shape [bsz, n_latents, embed_dim]. The Resampler acts as a form of learned pooling and
1249
+ is derived from [Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206).
1250
+ """
1251
+ super().__init__()
1252
+ self.hidden_size = config.text_config.hidden_size
1253
+ self.hidden_act = config.perceiver_config.hidden_act
1254
+ self.n_latents = config.perceiver_config.resampler_n_latents
1255
+ self.depth = config.perceiver_config.resampler_depth
1256
+ self.rms_norm_eps = config.text_config.rms_norm_eps
1257
+
1258
+ # Create Latents for Perceiver
1259
+ self.latents = nn.Parameter(torch.ones(self.n_latents, self.hidden_size))
1260
+
1261
+ # Create Transformer Blocks
1262
+ self.layers = nn.ModuleList([Idefics2PerceiverLayer(config, idx) for idx in range(self.depth)])
1263
+ self.norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1264
+
1265
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1266
+
1267
+ def forward(
1268
+ self,
1269
+ context: torch.Tensor,
1270
+ attention_mask,
1271
+ ) -> torch.Tensor:
1272
+ # seq embed -> bsz seq embed
1273
+ latents = self.latents.unsqueeze(0).expand((context.shape[0], *self.latents.size()))
1274
+
1275
+ latent_attention_mask = torch.ones(
1276
+ (attention_mask.size(0), latents.size(1)), dtype=attention_mask.dtype, device=attention_mask.device
1277
+ )
1278
+ attention_mask = torch.cat([attention_mask, latent_attention_mask], dim=-1)
1279
+ attention_mask = (
1280
+ _prepare_4d_attention_mask(attention_mask, latents.dtype, tgt_len=self.n_latents)
1281
+ if not self._use_flash_attention_2
1282
+ else attention_mask
1283
+ )
1284
+
1285
+ compressed_context = latents
1286
+ for perceiver_layer in self.layers:
1287
+ layer_outputs = perceiver_layer(
1288
+ compressed_context,
1289
+ context,
1290
+ attention_mask=attention_mask,
1291
+ position_ids=None,
1292
+ past_key_value=None,
1293
+ output_attentions=False,
1294
+ use_cache=False,
1295
+ )
1296
+
1297
+ compressed_context = layer_outputs[0]
1298
+
1299
+ compressed_context = self.norm(compressed_context)
1300
+
1301
+ return compressed_context
1302
+
1303
+
1304
+ class Idefics2Connector(nn.Module):
1305
+ def __init__(self, config):
1306
+ super().__init__()
1307
+ self.modality_projection = Idefics2MLP(
1308
+ hidden_size=config.vision_config.hidden_size,
1309
+ intermediate_size=config.text_config.intermediate_size,
1310
+ output_size=config.text_config.hidden_size,
1311
+ hidden_act=config.text_config.hidden_act,
1312
+ )
1313
+ self.perceiver_resampler = Idefics2PerceiverResampler(config)
1314
+
1315
+ def forward(self, image_hidden_states, attention_mask):
1316
+ image_hidden_states = self.modality_projection(image_hidden_states)
1317
+ image_hidden_states = self.perceiver_resampler(context=image_hidden_states, attention_mask=attention_mask)
1318
+ return image_hidden_states
1319
+
1320
+
1321
+ IDEFICS2_START_DOCSTRING = r"""
1322
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1323
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1324
+ etc.)
1325
+
1326
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1327
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1328
+ and behavior.
1329
+
1330
+ Parameters:
1331
+ config ([`Idefics2Config`] or [`Idefics2VisionConfig`]):
1332
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1333
+ load the weights associated with the model, only the configuration. Check out the
1334
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1335
+ """
1336
+
1337
+
1338
+ @add_start_docstrings(
1339
+ "The bare Idefics2 Model outputting raw hidden-states without any specific head on top.",
1340
+ IDEFICS2_START_DOCSTRING,
1341
+ )
1342
+ class Idefics2PreTrainedModel(PreTrainedModel):
1343
+ config_class = Idefics2Config
1344
+ base_model_prefix = "model"
1345
+ supports_gradient_checkpointing = True
1346
+ _no_split_modules = ["Idefics2VisionAttention", "Idefics2MLP", "Idefics2PerceiverLayer", "Idefics2DecoderLayer"]
1347
+ _skip_keys_device_placement = "past_key_values"
1348
+ _supports_flash_attn_2 = True
1349
+
1350
+ def _init_weights(self, module):
1351
+ # important: this ported version of Idefics2 isn't meant for training from scratch - only
1352
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
1353
+ # https://github.com/haotian-liu/LLaVA/tree/main/idefics2 should serve for that purpose
1354
+ std = (
1355
+ self.config.text_config.initializer_range
1356
+ if hasattr(self.config, "initializer_range")
1357
+ else self.config.text_config.initializer_range
1358
+ )
1359
+
1360
+ if hasattr(module, "class_embedding"):
1361
+ module.class_embedding.data.normal_(mean=0.0, std=std)
1362
+
1363
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
1364
+ module.weight.data.normal_(mean=0.0, std=std)
1365
+ if module.bias is not None:
1366
+ module.bias.data.zero_()
1367
+ elif isinstance(module, nn.Embedding):
1368
+ module.weight.data.normal_(mean=0.0, std=std)
1369
+ if module.padding_idx is not None:
1370
+ module.weight.data[module.padding_idx].zero_()
1371
+
1372
+ @classmethod
1373
+ def _autoset_attn_implementation(
1374
+ cls,
1375
+ config,
1376
+ use_flash_attention_2: bool = False,
1377
+ torch_dtype: Optional[torch.dtype] = None,
1378
+ device_map: Optional[Union[str, Dict[str, int]]] = None,
1379
+ check_device_map: bool = True,
1380
+ **kwargs,
1381
+ ):
1382
+ """
1383
+ Overrides the method in `PreTrainedModel` to update the vision config with the correct attention implementation
1384
+ """
1385
+ config = super()._autoset_attn_implementation(
1386
+ config=config,
1387
+ use_flash_attention_2=use_flash_attention_2,
1388
+ torch_dtype=torch_dtype,
1389
+ device_map=device_map,
1390
+ check_device_map=check_device_map,
1391
+ **kwargs,
1392
+ )
1393
+ config.vision_config._attn_implementation = config._attn_implementation
1394
+ return config
1395
+
1396
+
1397
+ IDEFICS2_INPUTS_DOCSTRING = r"""
1398
+ Args:
1399
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1400
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1401
+ it.
1402
+
1403
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1404
+ [`PreTrainedTokenizer.__call__`] for details.
1405
+
1406
+ [What are input IDs?](../glossary#input-ids)
1407
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1408
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1409
+
1410
+ - 1 for tokens that are **not masked**,
1411
+ - 0 for tokens that are **masked**.
1412
+
1413
+ [What are attention masks?](../glossary#attention-mask)
1414
+
1415
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1416
+ [`PreTrainedTokenizer.__call__`] for details.
1417
+
1418
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1419
+ `past_key_values`).
1420
+
1421
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1422
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1423
+ information on the default strategy.
1424
+
1425
+ - 1 indicates the head is **not masked**,
1426
+ - 0 indicates the head is **masked**.
1427
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1428
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1429
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
1430
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1431
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
1432
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
1433
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
1434
+
1435
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1436
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1437
+
1438
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1439
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1440
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1441
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1442
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1443
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1444
+ model's internal embedding lookup matrix.
1445
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
1446
+ The tensors corresponding to the input images. Pixel values can be obtained using
1447
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses
1448
+ [`CLIPImageProcessor`] for processing images).
1449
+ pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
1450
+ Mask to avoid performing attention on padding pixel indices.
1451
+ image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1452
+ The hidden states of the image encoder after modality projection and perceiver resampling.
1453
+ use_cache (`bool`, *optional*):
1454
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1455
+ `past_key_values`).
1456
+ output_attentions (`bool`, *optional*):
1457
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1458
+ tensors for more detail.
1459
+ output_hidden_states (`bool`, *optional*):
1460
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1461
+ more detail.
1462
+ return_dict (`bool`, *optional*):
1463
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1464
+ """
1465
+
1466
+
1467
+ @add_start_docstrings(
1468
+ """Idefics2 model consisting of a SIGLIP vision encoder and Mistral language decoder""",
1469
+ IDEFICS2_START_DOCSTRING,
1470
+ )
1471
+ class Idefics2Model(Idefics2PreTrainedModel):
1472
+ def __init__(self, config: Idefics2Config):
1473
+ super().__init__(config)
1474
+ self.padding_idx = self.config.text_config.pad_token_id
1475
+ self.vocab_size = self.config.text_config.vocab_size
1476
+
1477
+ self.vision_model = Idefics2VisionTransformer(config.vision_config)
1478
+ self.connector = Idefics2Connector(config)
1479
+ self.text_model = AutoModel.from_config(config.text_config)
1480
+
1481
+ self.image_seq_len = config.perceiver_config.resampler_n_latents
1482
+ self.image_token_id = self.config.image_token_id
1483
+
1484
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1485
+
1486
+ self.post_init()
1487
+
1488
+ def enable_input_require_grads(self):
1489
+ """
1490
+ Enables the gradients for the input embeddings.
1491
+
1492
+ This is useful for lora when using gradient checkpointing.
1493
+ c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032
1494
+
1495
+ Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
1496
+ """
1497
+
1498
+ def get_lowest_module(module):
1499
+ if len(list(module.children())) == 0:
1500
+ # If the module has no children, it is a leaf module (e.g., Linear, Conv2d, etc.)
1501
+ return module
1502
+ else:
1503
+ # Recursively call the function on each child module
1504
+ return get_lowest_module(list(module.children())[0])
1505
+
1506
+ def make_inputs_require_grads(module, input, output):
1507
+ output.requires_grad_(True)
1508
+
1509
+ self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
1510
+ self._vision_require_grads_hook = get_lowest_module(self.vision_model).register_forward_hook(
1511
+ make_inputs_require_grads
1512
+ )
1513
+
1514
+ def get_input_embeddings(self):
1515
+ return self.text_model.get_input_embeddings()
1516
+
1517
+ def set_input_embeddings(self, value):
1518
+ self.text_model.set_input_embeddings(value)
1519
+
1520
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
1521
+ model_embeds = self.text_model.resize_token_embeddings(
1522
+ new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of
1523
+ )
1524
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
1525
+ return model_embeds
1526
+
1527
+ def inputs_merger(
1528
+ self,
1529
+ input_ids: torch.LongTensor,
1530
+ inputs_embeds: Optional[torch.Tensor],
1531
+ image_hidden_states: Optional[torch.Tensor],
1532
+ ):
1533
+ """
1534
+ This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
1535
+ The merging happens as follows:
1536
+ - The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
1537
+ - We get the image hidden states for the image through the vision encoder (and potentially the perceiver), and that hidden state is then projected into the text embedding space.
1538
+ We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
1539
+ - The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
1540
+ - To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
1541
+ """
1542
+ num_images, _, vision_hidden_size = image_hidden_states.shape
1543
+ special_image_token_mask = input_ids == self.image_token_id
1544
+ new_inputs_embeds = inputs_embeds.clone()
1545
+ reshaped_image_hidden_states = image_hidden_states.view(-1, vision_hidden_size)
1546
+ new_inputs_embeds[special_image_token_mask] = reshaped_image_hidden_states
1547
+ return new_inputs_embeds
1548
+
1549
+ @add_start_docstrings_to_model_forward(
1550
+ """
1551
+ Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
1552
+ the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
1553
+ max_num_images is the maximum number of images among the batch_size samples in the batch.
1554
+
1555
+ Padding images are not needed beyond padding the pixel_values at the entrance of the model.
1556
+ For efficiency, we only pass through the vision_model's forward the real images by
1557
+ discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
1558
+ image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
1559
+ """,
1560
+ IDEFICS2_INPUTS_DOCSTRING,
1561
+ )
1562
+ def forward(
1563
+ self,
1564
+ input_ids: torch.LongTensor = None,
1565
+ attention_mask: Optional[torch.Tensor] = None,
1566
+ position_ids: Optional[torch.LongTensor] = None,
1567
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1568
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1569
+ pixel_values: Optional[torch.FloatTensor] = None,
1570
+ pixel_attention_mask: Optional[torch.BoolTensor] = None,
1571
+ image_hidden_states: Optional[torch.FloatTensor] = None,
1572
+ use_cache: Optional[bool] = None,
1573
+ output_attentions: Optional[bool] = None,
1574
+ output_hidden_states: Optional[bool] = None,
1575
+ return_dict: Optional[bool] = None,
1576
+ ) -> Union[Tuple, Idefics2BaseModelOutputWithPast]:
1577
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1578
+ output_hidden_states = (
1579
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1580
+ )
1581
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1582
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1583
+
1584
+ # retrieve input_ids and inputs_embeds
1585
+ if input_ids is not None:
1586
+ batch_size, seq_length = input_ids.shape
1587
+ elif inputs_embeds is not None:
1588
+ batch_size, seq_length, _ = inputs_embeds.shape
1589
+ else:
1590
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1591
+
1592
+ past_seen_tokens = 0
1593
+ if use_cache:
1594
+ if not isinstance(past_key_values, Cache):
1595
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1596
+ past_seen_tokens = past_key_values.get_usable_length(seq_length)
1597
+
1598
+ if inputs_embeds is not None and input_ids is None and past_seen_tokens == 0:
1599
+ raise ValueError("When first calling the model, if input_embeds are passed, input_ids should not be None.")
1600
+
1601
+ if inputs_embeds is None:
1602
+ inputs_embeds = self.text_model.get_input_embeddings()(input_ids)
1603
+
1604
+ # START VISUAL INPUTS INTEGRATION
1605
+ if pixel_values is not None and image_hidden_states is not None:
1606
+ raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time")
1607
+ elif pixel_values is not None:
1608
+ batch_size, num_images, num_channels, height, width = pixel_values.shape
1609
+ pixel_values = pixel_values.to(dtype=self.dtype) # fp16 compatibility
1610
+ pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])
1611
+
1612
+ # Remove padding images - padding images are full 0.
1613
+ nb_values_per_image = pixel_values.shape[1:].numel()
1614
+ real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image
1615
+ pixel_values = pixel_values[real_images_inds].contiguous()
1616
+
1617
+ # Handle the vision attention mask
1618
+ if pixel_attention_mask is None:
1619
+ pixel_attention_mask = torch.ones(
1620
+ size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)),
1621
+ dtype=torch.bool,
1622
+ device=pixel_values.device,
1623
+ )
1624
+ else:
1625
+ # Remove padding images from the mask/pP p
1626
+ pixel_attention_mask = pixel_attention_mask.view(
1627
+ batch_size * num_images, *pixel_attention_mask.shape[2:]
1628
+ )
1629
+ pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()
1630
+
1631
+ patch_size = self.config.vision_config.patch_size
1632
+ patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)
1633
+ patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)
1634
+ patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
1635
+
1636
+ # Get sequence from the vision encoder
1637
+ image_hidden_states = self.vision_model(
1638
+ pixel_values=pixel_values,
1639
+ patch_attention_mask=patch_attention_mask,
1640
+ ).last_hidden_state
1641
+
1642
+ # Modality projection & resampling
1643
+ image_hidden_states = self.connector(
1644
+ image_hidden_states, attention_mask=patch_attention_mask.view(pixel_values.size(0), -1)
1645
+ )
1646
+
1647
+ elif image_hidden_states is not None:
1648
+ image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
1649
+
1650
+ if past_seen_tokens == 0 and inputs_embeds is not None and image_hidden_states is not None:
1651
+ # When we generate, we don't want to replace the potential image_token_id that we generated by images
1652
+ # that simply don't exist
1653
+ inputs_embeds = self.inputs_merger(
1654
+ input_ids=input_ids,
1655
+ inputs_embeds=inputs_embeds,
1656
+ image_hidden_states=image_hidden_states,
1657
+ )
1658
+
1659
+ outputs = self.text_model(
1660
+ inputs_embeds=inputs_embeds,
1661
+ attention_mask=attention_mask,
1662
+ position_ids=position_ids,
1663
+ past_key_values=past_key_values,
1664
+ output_attentions=output_attentions,
1665
+ output_hidden_states=output_hidden_states,
1666
+ return_dict=return_dict,
1667
+ )
1668
+
1669
+ if not return_dict:
1670
+ return tuple(v for v in [*outputs, image_hidden_states] if v is not None)
1671
+
1672
+ return Idefics2BaseModelOutputWithPast(
1673
+ last_hidden_state=outputs.last_hidden_state,
1674
+ past_key_values=outputs.past_key_values,
1675
+ hidden_states=outputs.hidden_states,
1676
+ attentions=outputs.attentions,
1677
+ image_hidden_states=image_hidden_states,
1678
+ )
1679
+
1680
+
1681
+ @add_start_docstrings(
1682
+ """The Idefics2 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top. """,
1683
+ IDEFICS2_START_DOCSTRING,
1684
+ )
1685
+ class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel):
1686
+ _tied_weights_keys = ["lm_head.weight"]
1687
+
1688
+ def __init__(self, config):
1689
+ super().__init__(config)
1690
+ self.model = Idefics2Model(config)
1691
+ self.image_token_id = self.config.image_token_id
1692
+
1693
+ self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
1694
+ self.vocab_size = config.text_config.vocab_size
1695
+
1696
+ # Initialize weights and apply final processing
1697
+ self.post_init()
1698
+
1699
+ def enable_input_require_grads(self):
1700
+ """
1701
+ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
1702
+ the model weights fixed.
1703
+ """
1704
+
1705
+ def make_inputs_require_grads(module, input, output):
1706
+ output.requires_grad_(True)
1707
+
1708
+ self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
1709
+ self._vision_require_grads_hook = self.model.vision_model.get_input_embeddings().register_forward_hook(
1710
+ make_inputs_require_grads
1711
+ )
1712
+
1713
+ def get_input_embeddings(self):
1714
+ return self.model.text_model.get_input_embeddings()
1715
+
1716
+ def set_input_embeddings(self, value):
1717
+ self.model.text_model.set_input_embeddings(value)
1718
+
1719
+ def get_output_embeddings(self):
1720
+ return self.lm_head
1721
+
1722
+ def set_output_embeddings(self, new_embeddings):
1723
+ self.lm_head = new_embeddings
1724
+
1725
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
1726
+ # model_embeds = self.model.resize_token_embeddings(new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of)
1727
+ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1728
+ if new_num_tokens is None and pad_to_multiple_of is None:
1729
+ return model_embeds
1730
+
1731
+ # Update base model and current model config
1732
+ # Ignore copy
1733
+ self.config.text_config.vocab_size = model_embeds.weight.shape[0]
1734
+ self.vocab_size = self.config.text_config.vocab_size
1735
+
1736
+ # Tie weights again if needed
1737
+ self.tie_weights()
1738
+
1739
+ return model_embeds
1740
+
1741
+ def tie_weights(self):
1742
+ """
1743
+ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding.
1744
+ """
1745
+ output_embeddings = self.get_output_embeddings()
1746
+ input_embeddings = self.get_input_embeddings()
1747
+
1748
+ if getattr(self.config, "tie_word_embeddings", True):
1749
+ output_embeddings.weight = input_embeddings.weight
1750
+
1751
+ @add_start_docstrings_to_model_forward(IDEFICS2_INPUTS_DOCSTRING)
1752
+ @replace_return_docstrings(output_type=Idefics2CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1753
+ def forward(
1754
+ self,
1755
+ input_ids: torch.LongTensor = None,
1756
+ attention_mask: Optional[torch.Tensor] = None,
1757
+ position_ids: Optional[torch.LongTensor] = None,
1758
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1759
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1760
+ pixel_values: Optional[torch.FloatTensor] = None,
1761
+ pixel_attention_mask: Optional[torch.BoolTensor] = None,
1762
+ image_hidden_states: Optional[torch.FloatTensor] = None,
1763
+ labels: Optional[torch.LongTensor] = None,
1764
+ use_cache: Optional[bool] = None,
1765
+ output_attentions: Optional[bool] = None,
1766
+ output_hidden_states: Optional[bool] = None,
1767
+ return_dict: Optional[bool] = None,
1768
+ ) -> Union[Tuple, Idefics2CausalLMOutputWithPast]:
1769
+ r"""
1770
+ Args:
1771
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1772
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1773
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1774
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1775
+
1776
+ Returns:
1777
+
1778
+ Example:
1779
+
1780
+ ```python
1781
+ >>> import requests
1782
+ >>> import torch
1783
+ >>> from PIL import Image
1784
+ >>> from io import BytesIO
1785
+
1786
+ >>> from transformers import AutoProcessor, AutoModelForVision2Seq
1787
+ >>> from transformers.image_utils import load_image
1788
+
1789
+ >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
1790
+ >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
1791
+ >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
1792
+ >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
1793
+
1794
+ >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b-base")
1795
+ >>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/idefics2-8b-base", device_map="auto")
1796
+
1797
+ >>> BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
1798
+ >>> EOS_WORDS_IDS = [processor.tokenizer.eos_token_id]
1799
+
1800
+ >>> # Create inputs
1801
+ >>> prompts = [
1802
+ ... "<image>In this image, we can see the city of New York, and more specifically the Statue of Liberty.<image>In this image,",
1803
+ ... "In which city is that bridge located?<image>",
1804
+ ... ]
1805
+ >>> images = [[image1, image2], [image3]]
1806
+ >>> inputs = processor(text=prompts, padding=True, return_tensors="pt").to("cuda")
1807
+
1808
+ >>> # Generate
1809
+ >>> generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=20)
1810
+ >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
1811
+
1812
+ >>> print(generated_texts)
1813
+ ['In this image, we can see the city of New York, and more specifically the Statue of Liberty. In this image, we can see the city of New York, and more specifically the Statue of Liberty.\n\n', 'In which city is that bridge located?\n\nThe bridge is located in the city of Pittsburgh, Pennsylvania.\n\n\nThe bridge is']
1814
+ ```"""
1815
+
1816
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1817
+ output_hidden_states = (
1818
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1819
+ )
1820
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1821
+
1822
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1823
+ outputs = self.model(
1824
+ input_ids=input_ids,
1825
+ attention_mask=attention_mask,
1826
+ position_ids=position_ids,
1827
+ past_key_values=past_key_values,
1828
+ inputs_embeds=inputs_embeds,
1829
+ pixel_values=pixel_values,
1830
+ pixel_attention_mask=pixel_attention_mask,
1831
+ image_hidden_states=image_hidden_states,
1832
+ use_cache=use_cache,
1833
+ output_attentions=output_attentions,
1834
+ output_hidden_states=output_hidden_states,
1835
+ return_dict=return_dict,
1836
+ )
1837
+
1838
+ hidden_states = outputs[0]
1839
+ logits = self.lm_head(hidden_states)
1840
+ logits = logits.float()
1841
+
1842
+ loss = None
1843
+ if labels is not None:
1844
+ labels = labels.to(logits.device)
1845
+ # Shift so that tokens < n predict n
1846
+ if attention_mask is not None:
1847
+ shift_attention_mask = attention_mask[..., 1:].to(logits.device)
1848
+ shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous()
1849
+ shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous()
1850
+ else:
1851
+ shift_logits = logits[..., :-1, :].contiguous()
1852
+ shift_labels = labels[..., 1:].contiguous()
1853
+ # Flatten the tokens
1854
+ loss_fct = CrossEntropyLoss(ignore_index=self.image_token_id)
1855
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1856
+
1857
+ if not return_dict:
1858
+ output = (logits,) + outputs[1:]
1859
+ return (loss,) + output if loss is not None else output
1860
+
1861
+ return Idefics2CausalLMOutputWithPast(
1862
+ loss=loss,
1863
+ logits=logits,
1864
+ past_key_values=outputs.past_key_values,
1865
+ hidden_states=outputs.hidden_states,
1866
+ attentions=outputs.attentions,
1867
+ image_hidden_states=outputs.image_hidden_states,
1868
+ )
1869
+
1870
+ def prepare_inputs_for_generation(
1871
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1872
+ ):
1873
+ # Omit tokens covered by past_key_values
1874
+ if past_key_values is not None:
1875
+ if isinstance(past_key_values, Cache):
1876
+ cache_length = past_key_values.get_seq_length()
1877
+ past_length = past_key_values.seen_tokens
1878
+ max_cache_length = past_key_values.get_max_length()
1879
+ else:
1880
+ cache_length = past_length = past_key_values[0][0].shape[2]
1881
+ max_cache_length = None
1882
+
1883
+ # Keep only the unprocessed tokens:
1884
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1885
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1886
+ # input)
1887
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1888
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1889
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1890
+ # input_ids based on the past_length.
1891
+ elif past_length < input_ids.shape[1]:
1892
+ input_ids = input_ids[:, past_length:]
1893
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1894
+
1895
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1896
+ if (
1897
+ max_cache_length is not None
1898
+ and attention_mask is not None
1899
+ and cache_length + input_ids.shape[1] > max_cache_length
1900
+ ):
1901
+ attention_mask = attention_mask[:, -max_cache_length:]
1902
+
1903
+ position_ids = kwargs.get("position_ids", None)
1904
+ if attention_mask is not None and position_ids is None:
1905
+ # create position_ids on the fly for batch generation
1906
+ position_ids = attention_mask.long().cumsum(-1) - 1
1907
+ position_ids.masked_fill_(attention_mask == 0, 1)
1908
+ if past_key_values:
1909
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1910
+
1911
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1912
+ if inputs_embeds is not None and past_key_values is None:
1913
+ model_inputs = {"inputs_embeds": inputs_embeds}
1914
+ else:
1915
+ model_inputs = {"input_ids": input_ids}
1916
+
1917
+ image_hidden_states = kwargs.get("image_hidden_states", None)
1918
+ if image_hidden_states is not None:
1919
+ pixel_values = None
1920
+ pixel_attention_mask = None
1921
+ else:
1922
+ pixel_values = kwargs.get("pixel_values", None)
1923
+ pixel_attention_mask = kwargs.get("pixel_attention_mask", None)
1924
+ model_inputs.update(
1925
+ {
1926
+ "position_ids": position_ids,
1927
+ "past_key_values": past_key_values,
1928
+ "use_cache": kwargs.get("use_cache"),
1929
+ "attention_mask": attention_mask,
1930
+ "pixel_values": pixel_values,
1931
+ "pixel_attention_mask": pixel_attention_mask,
1932
+ "image_hidden_states": image_hidden_states,
1933
+ }
1934
+ )
1935
+ return model_inputs
1936
+
1937
+ def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs):
1938
+ model_kwargs = super()._update_model_kwargs_for_generation(
1939
+ outputs=outputs,
1940
+ model_kwargs=model_kwargs,
1941
+ is_encoder_decoder=is_encoder_decoder,
1942
+ **kwargs,
1943
+ )
1944
+ # Get the precomputed image_hidden_states
1945
+ model_kwargs["image_hidden_states"] = outputs.image_hidden_states
1946
+ return model_kwargs
1947
+
1948
+ @staticmethod
1949
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1950
+ def _reorder_cache(past_key_values, beam_idx):
1951
+ reordered_past = ()
1952
+ for layer_past in past_key_values:
1953
+ reordered_past += (
1954
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1955
+ )
1956
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/idefics2/processing_idefics2.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for IDEFICS2.
17
+ """
18
+
19
+ from typing import TYPE_CHECKING, Dict, List, Optional, Union
20
+
21
+ from ...feature_extraction_utils import BatchFeature
22
+ from ...image_utils import ImageInput, is_valid_image, load_image
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...tokenization_utils_base import AddedToken, BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
25
+ from ...utils import TensorType, logging
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ from ...pipelines.conversational import Conversation
30
+ from ...tokenization_utils_base import PreTokenizedInput
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ def is_url(val) -> bool:
37
+ return isinstance(val, str) and val.startswith("http")
38
+
39
+
40
+ def is_image_or_image_url(elem):
41
+ return is_url(elem) or is_valid_image(elem)
42
+
43
+
44
+ class Idefics2Processor(ProcessorMixin):
45
+ r"""
46
+ Constructs a IDEFICS2 processor which wraps a LLama tokenizer and IDEFICS2 image processor into a single processor.
47
+
48
+ [`IdeficsProcessor`] offers all the functionalities of [`Idefics2ImageProcessor`] and [`LlamaTokenizerFast`]. See
49
+ the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
50
+
51
+ Args:
52
+ image_processor (`Idefics2ImageProcessor`):
53
+ An instance of [`Idefics2ImageProcessor`]. The image processor is a required input.
54
+ tokenizer (`PreTrainedTokenizerBase`, *optional*):
55
+ An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
56
+ image_seq_len (`int`, *optional*, defaults to 64):
57
+ The length of the image sequence i.e. the number of <image> tokens per image in the input.
58
+ This parameter is used to build the string from the input prompt and image tokens and should match the
59
+ config.perceiver_config.resampler_n_latents value for the model used.
60
+ """
61
+
62
+ attributes = ["image_processor", "tokenizer"]
63
+ image_processor_class = "Idefics2ImageProcessor"
64
+ tokenizer_class = "AutoTokenizer"
65
+
66
+ def __init__(self, image_processor, tokenizer=None, image_seq_len: int = 64, **kwargs):
67
+ if image_processor is None:
68
+ raise ValueError("You need to specify an `image_processor`.")
69
+ if tokenizer is None:
70
+ raise ValueError("You need to specify a `tokenizer`.")
71
+
72
+ self.fake_image_token = AddedToken("<fake_token_around_image>", normalized=False, special=True)
73
+ self.image_token = AddedToken("<image>", normalized=False, special=True)
74
+ self.end_of_utterance_token = AddedToken("<end_of_utterance>", normalized=False, special=True)
75
+ self.image_seq_len = image_seq_len
76
+
77
+ tokens_to_add = {
78
+ "additional_special_tokens": [self.fake_image_token, self.image_token, self.end_of_utterance_token]
79
+ }
80
+ tokenizer.add_special_tokens(tokens_to_add)
81
+
82
+ # Stores a Jinja template that formats chat histories into tokenizable strings
83
+ self.chat_template = kwargs.pop("chat_template", None)
84
+
85
+ super().__init__(image_processor, tokenizer)
86
+
87
+ def _extract_images_from_prompts(self, prompts):
88
+ prompt_images = []
89
+ for prompt in prompts:
90
+ images = []
91
+ for elem in prompt:
92
+ if is_valid_image(elem):
93
+ images.append(elem)
94
+ elif is_url(elem):
95
+ images.append(load_image(elem))
96
+ prompt_images.append(images)
97
+ return prompt_images
98
+
99
+ def __call__(
100
+ self,
101
+ text: Union[TextInput, "PreTokenizedInput", List[TextInput], List["PreTokenizedInput"]] = None,
102
+ images: Union[ImageInput, List[ImageInput], List[List[ImageInput]]] = None,
103
+ image_seq_len: Optional[int] = None,
104
+ padding: Union[bool, str, PaddingStrategy] = False,
105
+ truncation: Union[bool, str, TruncationStrategy] = None,
106
+ max_length: Optional[int] = None,
107
+ is_split_into_words: bool = False,
108
+ add_special_tokens: bool = True,
109
+ return_tensors: Optional[Union[str, TensorType]] = None,
110
+ ) -> BatchEncoding:
111
+ """
112
+ Processes the input prompts and returns a BatchEncoding.
113
+
114
+ Example:
115
+
116
+ ```python
117
+ >>> import requests
118
+ >>> from transformers import Idefics2Processor
119
+ >>> from transformers.image_utils import load_image
120
+
121
+ >>> processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b", image_seq_len=2)
122
+ >>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
123
+
124
+ >>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
125
+ >>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
126
+
127
+ >>> image1, image2 = load_image(url1), load_image(url2)
128
+ >>> images = [[image1], [image2]]
129
+
130
+ >>> text = [
131
+ ... "<image>In this image, we see",
132
+ ... "bla bla bla<image>",
133
+ ... ]
134
+ >>> outputs = processor(text=text, images=images, return_tensors="pt", padding=True)
135
+ >>> input_ids = outputs.input_ids
136
+ >>> input_tokens = processor.tokenizer.batch_decode(input_ids)
137
+ >>> print(input_tokens)
138
+ ['<s><fake_token_around_image><image><image><fake_token_around_image> In this image, we see', '<s> bla bla bla<fake_token_around_image><image><image><fake_token_around_image>']
139
+ ```
140
+
141
+ Args:
142
+ text (`Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]`, *optional*):
143
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
144
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
145
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
146
+
147
+ Wherever an image token, `<image>` is encountered it is expanded to
148
+ `<fake_token_around_image>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
149
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*):
150
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
151
+ tensor. If is of type `List[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
152
+ image_seq_len (`int`, *optional*):
153
+ The length of the image sequence. If not provided, the default value is used.
154
+ padding (`Union[bool, str, PaddingStrategy]`, *optional*, defaults to `False`):
155
+ Padding strategy applied to the input ids. See [`PreTrainedTokenizerFast.pad`] for more information.
156
+ truncation (`Union[bool, str, TruncationStrategy]`, *optional*):
157
+ Truncation strategy applied to the input ids. See [`PreTrainedTokenizerFast.truncate`] for more information.
158
+ max_length (`int`, *optional*):
159
+ Maximum length of the returned list and optionally padding/truncation length. See
160
+ [`PreTrainedTokenizerFast.__call__`] for more information.
161
+ is_split_into_words (`bool`, *optional*, defaults to `False`):
162
+ Whether the input text is split into words or not. If set to `True`, the tokenizer will skip the
163
+ tokenization process and assume the input is already tokenized.
164
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
165
+ Whether to add special tokens or not. See [`PreTrainedTokenizerFast.__call__`] for more information.
166
+ return_tensors (`Union[str, TensorType]`, *optional*):
167
+ If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
168
+ information.
169
+ """
170
+ image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len
171
+
172
+ n_images_in_text = []
173
+ inputs = BatchFeature()
174
+
175
+ if text is not None:
176
+ if isinstance(text, str):
177
+ text = [text]
178
+ elif not isinstance(text, list) and not isinstance(text[0], str):
179
+ raise ValueError("Invalid input text. Please provide a string, or a list of strings")
180
+
181
+ # Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len`
182
+ fake_image_token = self.fake_image_token.content
183
+ image_token = self.image_token.content
184
+ image_str = f"{fake_image_token}{image_token * image_seq_len}{fake_image_token}"
185
+
186
+ if self.image_processor.do_image_splitting:
187
+ # A single image token is split into 4 patches + 1 original image
188
+ image_str = image_str * 5
189
+
190
+ prompt_strings = []
191
+ for sample in text:
192
+ n_images_in_text.append(sample.count(image_token))
193
+ sample = sample.replace(image_token, image_str)
194
+ # Remove any double fake tokens if images are adjacent
195
+ sample = sample.replace(f"{fake_image_token}{fake_image_token}", f"{fake_image_token}")
196
+ prompt_strings.append(sample)
197
+
198
+ text_inputs = self.tokenizer(
199
+ text=prompt_strings,
200
+ add_special_tokens=add_special_tokens,
201
+ padding=padding,
202
+ truncation=truncation,
203
+ max_length=max_length,
204
+ is_split_into_words=is_split_into_words,
205
+ return_tensors=return_tensors,
206
+ )
207
+ inputs.update(text_inputs)
208
+
209
+ if images is not None:
210
+ if is_image_or_image_url(images):
211
+ images = [[images]]
212
+ elif isinstance(images, list) and is_image_or_image_url(images[0]):
213
+ images = [images]
214
+ elif (
215
+ not isinstance(images, list)
216
+ and not isinstance(images[0], list)
217
+ and not is_image_or_image_url(images[0][0])
218
+ ):
219
+ raise ValueError(
220
+ "Invalid input images. Please provide a single image or a list of images or a list of list of images."
221
+ )
222
+
223
+ n_images_in_images = [len(sample) for sample in images]
224
+ if text is not None and not n_images_in_images == n_images_in_text:
225
+ raise ValueError(
226
+ f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
227
+ )
228
+
229
+ # Load images if they are URLs
230
+ images = [[load_image(im) for im in sample] for sample in images]
231
+ image_inputs = self.image_processor(images, return_tensors=return_tensors)
232
+ inputs.update(image_inputs)
233
+
234
+ return inputs
235
+
236
+ def batch_decode(self, *args, **kwargs):
237
+ """
238
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
239
+ refer to the docstring of this method for more information.
240
+ """
241
+ return self.tokenizer.batch_decode(*args, **kwargs)
242
+
243
+ def decode(self, *args, **kwargs):
244
+ """
245
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
246
+ the docstring of this method for more information.
247
+ """
248
+ return self.tokenizer.decode(*args, **kwargs)
249
+
250
+ @property
251
+ def model_input_names(self):
252
+ tokenizer_input_names = self.tokenizer.model_input_names
253
+ image_processor_input_names = self.image_processor.model_input_names
254
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
255
+
256
+ def apply_chat_template(
257
+ self,
258
+ conversation: Union[List[Dict[str, str]], "Conversation"],
259
+ chat_template: Optional[str] = None,
260
+ tokenize: bool = False,
261
+ **kwargs,
262
+ ) -> str:
263
+ """
264
+ Overrides the tokenizer's `apply_chat_template` method to apply the IDEFICS2 chat template by default
265
+ if no chat template is provided.
266
+
267
+ By default, the output isn't tokenized. This is because the IDEFICS2 chat template is designed to insert
268
+ the image token <image> into the sequence according to the message, but does not handle expanding the image
269
+ tokens to the sequence length or adding the surrounding tokens e.g. <fake_image_token>.
270
+
271
+ Args:
272
+ conversation (`Union[List[Dict, str, str], "Conversation"]`):
273
+ The conversation to format.
274
+ chat_template (`Optional[str]`, *optional*):
275
+ The Jinja template to use for formatting the conversation. If not provided, the default chat template
276
+ is used.
277
+ tokenize (`bool`, *optional*, defaults to `False`):
278
+ Whether to tokenize the output or not.
279
+ **kwargs:
280
+ Additional keyword arguments for the tokenizer's `apply_chat_template` method.
281
+ """
282
+
283
+ if chat_template is None:
284
+ if self.chat_template is not None:
285
+ chat_template = self.chat_template
286
+ else:
287
+ chat_template = self.default_chat_template
288
+
289
+ return self.tokenizer.apply_chat_template(
290
+ conversation, chat_template=chat_template, tokenize=tokenize, **kwargs
291
+ )
292
+
293
+ @property
294
+ def default_chat_template(self):
295
+ """
296
+ This template formats inputs in the form of a chat history. For each message in the chat history:
297
+ * the template will output the role of the speaker followed by the content of the message.
298
+ * content can be a single string or a list of strings and images.
299
+ * If the content element is an image, the template will output a sequence of <image> tokens and <fake_token_around_image> token before and after each image
300
+ * The template will output an <end_of_utterance> token at the end of each message.
301
+
302
+ Example:
303
+
304
+ ```python
305
+ messages = [{
306
+ "role": "user",
307
+ "content": [
308
+ {"type": "text", "text": "What’s in this image?"},
309
+ {"type": "image"},
310
+ {"type": "image"},
311
+ ],
312
+ },
313
+ {
314
+ "role": "assistant",
315
+ "content": [{"type": "text", "text": "This picture depicts Idefix, the dog of Obelix in Asterix and Obelix. Idefix is running on the ground."},]
316
+ }]
317
+ ```
318
+
319
+ Will create outputs like:
320
+ ```
321
+ User: What is in this Image?<image><image><end_of_utterance>
322
+ Assistant: This picture depicts Idefix, the dog of Obelix in Asterix and Obelix. Idefix is running on the ground.<end_of_utterance>
323
+ ```
324
+ """
325
+ # fmt: off
326
+ return (
327
+ "{% for message in messages %}"
328
+ "{{message['role'].capitalize()}}"
329
+ "{% if message['content'][0]['type'] == 'image' %}"
330
+ "{{':'}}"
331
+ "{% else %}"
332
+ "{{': '}}"
333
+ "{% endif %}"
334
+ "{% for line in message['content'] %}"
335
+ "{% if line['type'] == 'text' %}"
336
+ "{{line['text']}}"
337
+ "{% elif line['type'] == 'image' %}"
338
+ "{{ '<image>' }}"
339
+ "{% endif %}"
340
+ "{% endfor %}"
341
+ "<end_of_utterance>\n"
342
+ "{% endfor %}"
343
+
344
+ "{% if add_generation_prompt %}"
345
+ "{{ 'Assistant:' }}"
346
+ "{% endif %}"
347
+ )
348
+ # fmt: on
venv/lib/python3.10/site-packages/transformers/models/llava_next/__init__.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_llava_next": ["LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlavaNextConfig"],
21
+ "processing_llava_next": ["LlavaNextProcessor"],
22
+ }
23
+
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_llava_next"] = [
32
+ "LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "LlavaNextForConditionalGeneration",
34
+ "LlavaNextPreTrainedModel",
35
+ ]
36
+
37
+ try:
38
+ if not is_vision_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["image_processing_llava_next"] = ["LlavaNextImageProcessor"]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_llava_next import LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, LlavaNextConfig
48
+ from .processing_llava_next import LlavaNextProcessor
49
+
50
+ try:
51
+ if not is_torch_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .modeling_llava_next import (
57
+ LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
58
+ LlavaNextForConditionalGeneration,
59
+ LlavaNextPreTrainedModel,
60
+ )
61
+
62
+ try:
63
+ if not is_vision_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .image_processing_llava_next import LlavaNextImageProcessor
69
+
70
+
71
+ else:
72
+ import sys
73
+
74
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/configuration_llava_next.cpython-310.pyc ADDED
Binary file (4.88 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/convert_llava_next_weights_to_hf.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/image_processing_llava_next.cpython-310.pyc ADDED
Binary file (23.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/modeling_llava_next.cpython-310.pyc ADDED
Binary file (23.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/llava_next/__pycache__/processing_llava_next.cpython-310.pyc ADDED
Binary file (6.66 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/llava_next/configuration_llava_next.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ Llava-NeXT model configuration"""
15
+
16
+ from ...configuration_utils import PretrainedConfig
17
+ from ...utils import logging
18
+ from ..auto import CONFIG_MAPPING
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "llava-hf/llava-v1.6-mistral-7b-hf": "https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf/resolve/main/config.json",
25
+ }
26
+
27
+
28
+ class LlavaNextConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`LlavaNextForConditionalGeneration`]. It is used to instantiate an
31
+ Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the [llava-hf/llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf)
33
+ model.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
40
+ The config object or dictionary of the vision backbone.
41
+ text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
42
+ The config object or dictionary of the text backbone.
43
+ ignore_index (`int`, *optional*, defaults to -100):
44
+ The ignore index for the loss function.
45
+ image_token_index (`int`, *optional*, defaults to 32000):
46
+ The image token index to encode the image prompt.
47
+ projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
48
+ The activation function used by the multimodal projector.
49
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
50
+ The feature selection strategy used to select the vision feature from the vision backbone.
51
+ Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
52
+ If `"full"`, the full vision features are used.
53
+ vision_feature_layer (`int`, *optional*, defaults to -2):
54
+ The index of the layer to select the vision feature.
55
+ image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
56
+ A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
57
+ of the form `(height, width)`.
58
+
59
+ Example:
60
+
61
+ ```python
62
+ >>> from transformers import LlavaNextForConditionalGeneration, LlavaNextConfig, CLIPVisionConfig, LlamaConfig
63
+
64
+ >>> # Initializing a CLIP-vision config
65
+ >>> vision_config = CLIPVisionConfig()
66
+
67
+ >>> # Initializing a Llama config
68
+ >>> text_config = LlamaConfig()
69
+
70
+ >>> # Initializing a Llava-Next llava-hf/llava-v1.6-mistral-7b-hf style configuration
71
+ >>> configuration = LlavaNextConfig(vision_config, text_config)
72
+
73
+ >>> # Initializing a model from the llava-hf/llava-v1.6-mistral-7b-hf style configuration
74
+ >>> model = LlavaNextForConditionalGeneration(configuration)
75
+
76
+ >>> # Accessing the model configuration
77
+ >>> configuration = model.config
78
+ ```"""
79
+
80
+ model_type = "llava_next"
81
+ is_composition = False
82
+
83
+ def __init__(
84
+ self,
85
+ vision_config=None,
86
+ text_config=None,
87
+ ignore_index=-100,
88
+ image_token_index=32000,
89
+ projector_hidden_act="gelu",
90
+ vision_feature_select_strategy="default",
91
+ vision_feature_layer=-2,
92
+ image_grid_pinpoints=None,
93
+ **kwargs,
94
+ ):
95
+ self.ignore_index = ignore_index
96
+ self.image_token_index = image_token_index
97
+ self.projector_hidden_act = projector_hidden_act
98
+
99
+ if vision_feature_select_strategy not in ["default", "full"]:
100
+ raise ValueError(
101
+ "vision_feature_select_strategy should be one of 'default', 'full'."
102
+ f"Got: {vision_feature_select_strategy}"
103
+ )
104
+
105
+ self.vision_feature_select_strategy = vision_feature_select_strategy
106
+ self.vision_feature_layer = vision_feature_layer
107
+ image_grid_pinpoints = (
108
+ image_grid_pinpoints
109
+ if image_grid_pinpoints is not None
110
+ else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
111
+ )
112
+ self.image_grid_pinpoints = image_grid_pinpoints
113
+
114
+ if isinstance(vision_config, dict):
115
+ vision_config["model_type"] = (
116
+ vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
117
+ )
118
+ vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
119
+ elif vision_config is None:
120
+ vision_config = CONFIG_MAPPING["clip_vision_model"](
121
+ intermediate_size=4096,
122
+ hidden_size=1024,
123
+ patch_size=14,
124
+ image_size=336,
125
+ num_hidden_layers=24,
126
+ num_attention_heads=16,
127
+ vocab_size=32000,
128
+ projection_dim=768,
129
+ )
130
+
131
+ self.vision_config = vision_config
132
+
133
+ if isinstance(text_config, dict):
134
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
135
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
136
+ elif text_config is None:
137
+ text_config = CONFIG_MAPPING["llama"]()
138
+
139
+ self.text_config = text_config
140
+
141
+ super().__init__(**kwargs)
venv/lib/python3.10/site-packages/transformers/models/llava_next/convert_llava_next_weights_to_hf.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Convert LLaVa-NeXT (LLaVa-1.6) checkpoints from the original repository.
16
+
17
+ URL: https://github.com/haotian-liu/LLaVA/tree/main.
18
+
19
+
20
+ The command used to obtain original logits is the following:
21
+ python llava/eval/run_llava.py --model-path "liuhaotian/llava-v1.6-mistral-7b" --image-file "images/llava_v1_5_radar.jpg" --query "What is shown in this image?" --max_new_tokens 100 --temperature 0
22
+
23
+ Note: logits are tested with torch==2.1.2.
24
+ """
25
+
26
+ import argparse
27
+ import glob
28
+ import json
29
+ from pathlib import Path
30
+
31
+ import requests
32
+ import torch
33
+ from accelerate import init_empty_weights
34
+ from huggingface_hub import hf_hub_download, snapshot_download
35
+ from PIL import Image
36
+ from safetensors import safe_open
37
+
38
+ from transformers import (
39
+ AddedToken,
40
+ AutoConfig,
41
+ AutoTokenizer,
42
+ LlavaNextConfig,
43
+ LlavaNextForConditionalGeneration,
44
+ LlavaNextImageProcessor,
45
+ LlavaNextProcessor,
46
+ )
47
+
48
+
49
+ KEYS_TO_MODIFY_MAPPING = {
50
+ "model.vision_tower.": "",
51
+ "model.mm_projector": "multi_modal_projector",
52
+ "model": "model.model",
53
+ "vision_model.model": "vision_model",
54
+ "lm_head": "language_model.lm_head",
55
+ "model.model": "language_model.model",
56
+ "multi_modal_projector.0": "multi_modal_projector.linear_1",
57
+ "multi_modal_projector.2": "multi_modal_projector.linear_2",
58
+ "language_model.model.image_newline": "image_newline",
59
+ }
60
+
61
+
62
+ def load_original_state_dict(model_id):
63
+ directory_path = snapshot_download(repo_id=model_id, allow_patterns=["*.safetensors"])
64
+
65
+ original_state_dict = {}
66
+ for path in glob.glob(f"{directory_path}/*"):
67
+ if path.endswith(".safetensors"):
68
+ with safe_open(path, framework="pt", device="cpu") as f:
69
+ for key in f.keys():
70
+ original_state_dict[key] = f.get_tensor(key)
71
+
72
+ return original_state_dict
73
+
74
+
75
+ def convert_state_dict_to_hf(state_dict):
76
+ new_state_dict = {}
77
+ for key, value in state_dict.items():
78
+ if key.endswith(".inv_freq"):
79
+ continue
80
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
81
+ if key_to_modify in key:
82
+ key = key.replace(key_to_modify, new_key)
83
+
84
+ new_state_dict[key] = value.to(torch.float16)
85
+ return new_state_dict
86
+
87
+
88
+ def load_image():
89
+ url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true"
90
+ image = Image.open(requests.get(url, stream=True).raw)
91
+ return image
92
+
93
+
94
+ def convert_llava_to_hf(model_id, pytorch_dump_folder_path, push_to_hub=False):
95
+ # load original config
96
+ filepath = hf_hub_download(repo_id=model_id, filename="config.json", repo_type="model")
97
+ # read json
98
+ with open(filepath) as f:
99
+ data = json.load(f)
100
+ print(data)
101
+
102
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
103
+ text_model_id = "mistralai/Mistral-7B-Instruct-v0.2"
104
+ image_token_index = 32000
105
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-7b":
106
+ text_model_id = "lmsys/vicuna-7b-v1.5"
107
+ image_token_index = 32000
108
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-13b":
109
+ text_model_id = "lmsys/vicuna-13b-v1.5"
110
+ image_token_index = 32000
111
+ elif model_id == "liuhaotian/llava-v1.6-34b":
112
+ text_model_id = "NousResearch/Nous-Hermes-2-Yi-34B"
113
+ image_token_index = 64000
114
+ vision_model_id = data["mm_vision_tower"]
115
+
116
+ torch.set_default_dtype(torch.float16)
117
+ text_config = AutoConfig.from_pretrained(text_model_id)
118
+
119
+ use_fast = False if model_id == "liuhaotian/llava-v1.6-34b" else True
120
+ tokenizer = AutoTokenizer.from_pretrained(text_model_id, use_fast=use_fast)
121
+ tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True)
122
+
123
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
124
+ # Mistral-7B doesn't have a padding token set yet
125
+ tokenizer.add_special_tokens({"pad_token": "<pad>"})
126
+
127
+ image_processor = LlavaNextImageProcessor.from_pretrained(vision_model_id)
128
+ processor = LlavaNextProcessor(tokenizer=tokenizer, image_processor=image_processor)
129
+
130
+ config = LlavaNextConfig(
131
+ text_config=text_config.to_dict(),
132
+ image_grid_pinpoints=image_processor.image_grid_pinpoints,
133
+ use_image_newline_parameter=True,
134
+ image_token_index=image_token_index,
135
+ )
136
+
137
+ with init_empty_weights():
138
+ model = LlavaNextForConditionalGeneration(config)
139
+
140
+ # load original state dict
141
+ state_dict = load_original_state_dict(model_id)
142
+ state_dict = convert_state_dict_to_hf(state_dict)
143
+ model.load_state_dict(state_dict, assign=True)
144
+ model.eval()
145
+
146
+ pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data
147
+ mu = torch.mean(pre_expansion_embeddings, dim=0).float()
148
+ n = pre_expansion_embeddings.size()[0]
149
+ sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
150
+ dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
151
+
152
+ # We add an image token so we resize the model
153
+ # Pad to 64 for performance reasons
154
+ pad_shape = 64
155
+ vocab_size = config.text_config.vocab_size
156
+ if model_id == "liuhaotian/llava-v1.6-34b":
157
+ # this one has 3 additional tokens, namely <|startoftext|>, <|endoftext|> and <image>
158
+ num_tokens = vocab_size + 3
159
+ else:
160
+ # this one has 2 additional tokens, namely <image> and <pad>
161
+ num_tokens = vocab_size + 2
162
+ model.resize_token_embeddings(num_tokens, pad_to_multiple_of=pad_shape)
163
+ model.language_model.model.embed_tokens.weight.data[vocab_size:] = torch.stack(
164
+ tuple(
165
+ (dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[vocab_size:].shape[0]))
166
+ ),
167
+ dim=0,
168
+ )
169
+ model.language_model.lm_head.weight.data[vocab_size:] = torch.stack(
170
+ tuple((dist.sample() for _ in range(model.language_model.lm_head.weight.data[vocab_size:].shape[0]))),
171
+ dim=0,
172
+ )
173
+
174
+ device = "cuda:2"
175
+ model.to(device)
176
+
177
+ # prepare inputs
178
+ image = load_image()
179
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
180
+ prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
181
+ elif model_id in ["liuhaotian/llava-v1.6-vicuna-7b", "liuhaotian/llava-v1.6-vicuna-13b"]:
182
+ prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: <image>\nWhat is shown in this image? ASSISTANT:"
183
+ elif model_id == "liuhaotian/llava-v1.6-34b":
184
+ prompt = "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant\n"
185
+ inputs = processor(images=image, text=prompt, return_tensors="pt")
186
+
187
+ # verify inputs
188
+ filepath = hf_hub_download(repo_id="nielsr/test-image", filename="llava_1_6_pixel_values.pt", repo_type="dataset")
189
+ original_pixel_values = torch.load(filepath, map_location="cpu")
190
+ assert torch.allclose(original_pixel_values, inputs.pixel_values.half())
191
+
192
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
193
+ filepath = hf_hub_download(repo_id="nielsr/test-image", filename="llava_1_6_input_ids.pt", repo_type="dataset")
194
+ original_input_ids = torch.load(filepath, map_location="cpu")
195
+ # replace -200 by image_token_index (since we use token ID = 32000 for the image token)
196
+ original_input_ids[original_input_ids == -200] = image_token_index
197
+ print(tokenizer.decode([id for id in original_input_ids.tolist()[0] if id != -200]))
198
+
199
+ assert original_input_ids[0].tolist() == inputs.input_ids[0].tolist()
200
+
201
+ elif model_id == "liuhaotian/llava-v1.6-34b":
202
+ filepath = hf_hub_download(
203
+ repo_id="nielsr/test-image", filename="llava_1_6_34b_input_ids.pt", repo_type="dataset"
204
+ )
205
+ original_input_ids = torch.load(filepath, map_location="cpu")
206
+ # replace -200 by image_token_index
207
+ original_input_ids[original_input_ids == -200] = image_token_index
208
+
209
+ assert original_input_ids[0].tolist() == inputs.input_ids[0].tolist()
210
+
211
+ image_sizes = torch.tensor([[899, 1024]])
212
+ assert image_sizes[0].tolist() == inputs.image_sizes[0].tolist()
213
+
214
+ # verify single forward pass
215
+ print("Single forward pass")
216
+ with torch.inference_mode():
217
+ inputs = inputs.to(device)
218
+ outputs = model(**inputs)
219
+ print("Shape of logits:", outputs.logits.shape)
220
+ print("First values of logits:", outputs.logits[0, :3, :3])
221
+
222
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
223
+ expected_slice = torch.tensor(
224
+ [[-4.8555, -4.6992, -0.1996], [-10.5703, -10.7344, -2.7246], [-7.0391, -7.3672, -0.2634]],
225
+ dtype=torch.float32,
226
+ device=device,
227
+ )
228
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-7b":
229
+ expected_slice = torch.tensor(
230
+ [[1.4883, 0.9976, -0.6992], [-9.7031, -5.7031, -1.5557], [-5.1328, -5.5586, 8.8281]],
231
+ dtype=torch.float32,
232
+ device=device,
233
+ )
234
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-13b":
235
+ expected_slice = torch.tensor(
236
+ [[-0.9614, 7.3125, 0.2106], [-7.2695, -8.5469, 3.6211], [-6.3750, -8.1875, 5.4688]],
237
+ dtype=torch.float32,
238
+ device=device,
239
+ )
240
+ elif model_id == "liuhaotian/llava-v1.6-34b":
241
+ expected_slice = torch.tensor(
242
+ [[-9.0859, -9.1406, 5.9453], [-5.9570, -5.9766, 2.2754], [-5.7305, -5.7539, 4.0000]],
243
+ dtype=torch.float32,
244
+ device=device,
245
+ )
246
+ else:
247
+ raise ValueError(f"Model {model_id} not supported")
248
+
249
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)
250
+ print("Logits are ok!")
251
+
252
+ # verify generation
253
+ output_ids = model.generate(
254
+ **inputs,
255
+ max_new_tokens=100,
256
+ use_cache=True,
257
+ )
258
+
259
+ generated_text = processor.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
260
+
261
+ print("Generated text:", repr(generated_text))
262
+
263
+ if model_id == "liuhaotian/llava-v1.6-mistral-7b":
264
+ expected_text = '[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular radar chart, there are several axes labeled with different metrics or benchmarks, such as "MMM-Vet," "MMM-Bench," "LLaVA-Bench," "SLED-Bench," "'
265
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-7b":
266
+ expected_text = """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\'s questions. USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a graphical representation of a benchmarking study comparing the performance of various models or systems. It\'s a scatter plot with a circular layout, where each point represents a different model or system, and the axes represent different metrics or dimensions of comparison.\n\nThe metrics are likely related to machine learning or artificial intelligence performance, as indicated by the terms like "BLIP-2," "Instruct BLIP," "POE," "QWA," "V"""
267
+ elif model_id == "liuhaotian/llava-v1.6-vicuna-13b":
268
+ expected_text = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: \nWhat is shown in this image? ASSISTANT: The image appears to be a radar chart, also known as a spider chart or star chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular radar chart, there are several variables represented:\n\n- MM-Vet\n- LLa-Va-Bench\n- SEED-Bench\n- MM"
269
+ elif model_id == "liuhaotian/llava-v1.6-34b":
270
+ expected_text = "<|im_start|> system\nAnswer the questions. <|im_start|> user\n\nWhat is shown in this image? <|im_start|> assistant\nThe image appears to be a radar chart, also known as a spider chart, which is a graphical method of displaying multivariate data in the form of a two-dimensional chart of three or more quantitative variables represented on axes starting from the same point.\n\nIn this particular chart, there are several datasets represented by different colors and labeled with various acronyms such as MM-Vet, LLaVA-Bench, SEED-Bench, MM-Bench-CN, MM-"
271
+ else:
272
+ raise ValueError(f"Model {model_id} not supported")
273
+
274
+ assert generated_text == expected_text
275
+ print("Generated text is ok!")
276
+
277
+ # verify batched generation
278
+ print("Batched generation...")
279
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
280
+ cats_image = Image.open(requests.get(url, stream=True).raw)
281
+
282
+ inputs = processor(
283
+ images=[image, cats_image],
284
+ text=[prompt, "[INST] <image>\nHow many cats are there? [/INST]"],
285
+ padding=True,
286
+ return_tensors="pt",
287
+ ).to(device)
288
+
289
+ for k, v in inputs.items():
290
+ print(k, v.shape)
291
+
292
+ print("Image sizes:", inputs.image_sizes)
293
+
294
+ # make sure image_sizes are the same
295
+ # as otherwise batched generation doesn't work
296
+ inputs.image_sizes[1] = inputs.image_sizes[0]
297
+
298
+ print("Batched generation...")
299
+ output_ids = model.generate(
300
+ **inputs,
301
+ max_new_tokens=20,
302
+ use_cache=True,
303
+ )
304
+
305
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
306
+ print(outputs)
307
+
308
+ if pytorch_dump_folder_path is not None:
309
+ print(f"Saving model and processor for {model_id} to {pytorch_dump_folder_path}")
310
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
311
+ model.save_pretrained(pytorch_dump_folder_path)
312
+ processor.save_pretrained(pytorch_dump_folder_path)
313
+
314
+ if push_to_hub:
315
+ repo_id = model_id.split("/")[-1]
316
+ model.push_to_hub(f"llava-hf/{repo_id}-hf")
317
+ processor.push_to_hub(f"llava-hf/{repo_id}-hf")
318
+
319
+
320
+ if __name__ == "__main__":
321
+ parser = argparse.ArgumentParser()
322
+ parser.add_argument(
323
+ "--model_id",
324
+ help="Hub location of the model to convert",
325
+ default="liuhaotian/llava-v1.6-mistral-7b",
326
+ choices=[
327
+ "liuhaotian/llava-v1.6-mistral-7b",
328
+ "liuhaotian/llava-v1.6-vicuna-7b",
329
+ "liuhaotian/llava-v1.6-vicuna-13b",
330
+ "liuhaotian/llava-v1.6-34b",
331
+ ],
332
+ required=False,
333
+ )
334
+ parser.add_argument(
335
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
336
+ )
337
+ parser.add_argument(
338
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
339
+ )
340
+ args = parser.parse_args()
341
+
342
+ convert_llava_to_hf(args.model_id, args.pytorch_dump_folder_path, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/llava_next/image_processing_llava_next.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for LLaVa-NeXT."""
16
+
17
+ import math
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict, select_best_resolution
23
+ from ...image_transforms import (
24
+ convert_to_rgb,
25
+ get_resize_output_image_size,
26
+ pad,
27
+ resize,
28
+ to_channel_dimension_format,
29
+ )
30
+ from ...image_utils import (
31
+ OPENAI_CLIP_MEAN,
32
+ OPENAI_CLIP_STD,
33
+ ChannelDimension,
34
+ ImageInput,
35
+ PILImageResampling,
36
+ get_image_size,
37
+ infer_channel_dimension_format,
38
+ is_scaled_image,
39
+ make_list_of_images,
40
+ to_numpy_array,
41
+ valid_images,
42
+ validate_preprocess_arguments,
43
+ )
44
+ from ...utils import TensorType, is_vision_available, logging
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ if is_vision_available():
51
+ from PIL import Image
52
+
53
+
54
+ def divide_to_patches(image: np.array, patch_size: int, input_data_format) -> List[np.array]:
55
+ """
56
+ Divides an image into patches of a specified size.
57
+
58
+ Args:
59
+ image (`np.array`):
60
+ The input image.
61
+ patch_size (`int`):
62
+ The size of each patch.
63
+ input_data_format (`ChannelDimension` or `str`):
64
+ The channel dimension format of the input image.
65
+
66
+ Returns:
67
+ list: A list of np.array representing the patches.
68
+ """
69
+ patches = []
70
+ height, width = get_image_size(image, channel_dim=input_data_format)
71
+ for i in range(0, height, patch_size):
72
+ for j in range(0, width, patch_size):
73
+ if input_data_format == ChannelDimension.LAST:
74
+ patch = image[i : i + patch_size, j : j + patch_size]
75
+ else:
76
+ patch = image[:, i : i + patch_size, j : j + patch_size]
77
+ patches.append(patch)
78
+
79
+ return patches
80
+
81
+
82
+ def expand_to_square(image: np.array, background_color, input_data_format) -> np.array:
83
+ """
84
+ Expands an image to a square by adding a background color.
85
+ """
86
+
87
+ height, width = get_image_size(image, channel_dim=input_data_format)
88
+ if width == height:
89
+ return image
90
+ elif width > height:
91
+ result = np.ones((width, width, image.shape[2]), dtype=image.dtype) * background_color
92
+ result[(width - height) // 2 : (width - height) // 2 + height, :] = image
93
+ return result
94
+ else:
95
+ result = np.ones((height, height, image.shape[2]), dtype=image.dtype) * background_color
96
+ result[:, (height - width) // 2 : (height - width) // 2 + width] = image
97
+ return result
98
+
99
+
100
+ def _get_patch_output_size(image, target_resolution, input_data_format):
101
+ original_height, original_width = get_image_size(image, channel_dim=input_data_format)
102
+ target_height, target_width = target_resolution
103
+
104
+ scale_w = target_width / original_width
105
+ scale_h = target_height / original_height
106
+
107
+ if scale_w < scale_h:
108
+ new_width = target_width
109
+ new_height = min(math.ceil(original_height * scale_w), target_height)
110
+ else:
111
+ new_height = target_height
112
+ new_width = min(math.ceil(original_width * scale_h), target_width)
113
+
114
+ return new_height, new_width
115
+
116
+
117
+ class LlavaNextImageProcessor(BaseImageProcessor):
118
+ r"""
119
+ Constructs a LLaVa-NeXT image processor. Based on [`CLIPImageProcessor`] with incorporation of additional techniques
120
+ for processing high resolution images as explained in the [LLaVa paper](https://arxiv.org/abs/2310.03744).
121
+
122
+ Args:
123
+ do_resize (`bool`, *optional*, defaults to `True`):
124
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
125
+ `do_resize` in the `preprocess` method.
126
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
127
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
128
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
129
+ method.
130
+ image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`):
131
+ A list of possible resolutions to use for processing high resolution images. The best resolution is selected
132
+ based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
133
+ method.
134
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
135
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
136
+ do_center_crop (`bool`, *optional*, defaults to `True`):
137
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
138
+ `preprocess` method.
139
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
140
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
141
+ method.
142
+ do_rescale (`bool`, *optional*, defaults to `True`):
143
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
144
+ the `preprocess` method.
145
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
146
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
147
+ method.
148
+ do_normalize (`bool`, *optional*, defaults to `True`):
149
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
150
+ image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
151
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
152
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
153
+ image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
154
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
155
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
156
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
157
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
158
+ Whether to convert the image to RGB.
159
+ """
160
+
161
+ model_input_names = ["pixel_values"]
162
+
163
+ def __init__(
164
+ self,
165
+ do_resize: bool = True,
166
+ size: Dict[str, int] = None,
167
+ image_grid_pinpoints: List = None,
168
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
169
+ do_center_crop: bool = True,
170
+ crop_size: Dict[str, int] = None,
171
+ do_rescale: bool = True,
172
+ rescale_factor: Union[int, float] = 1 / 255,
173
+ do_normalize: bool = True,
174
+ image_mean: Optional[Union[float, List[float]]] = None,
175
+ image_std: Optional[Union[float, List[float]]] = None,
176
+ do_convert_rgb: bool = True,
177
+ **kwargs,
178
+ ) -> None:
179
+ super().__init__(**kwargs)
180
+ size = size if size is not None else {"shortest_edge": 224}
181
+ size = get_size_dict(size, default_to_square=False)
182
+ image_grid_pinpoints = (
183
+ image_grid_pinpoints
184
+ if image_grid_pinpoints is not None
185
+ else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
186
+ )
187
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
188
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
189
+
190
+ self.do_resize = do_resize
191
+ self.size = size
192
+ self.image_grid_pinpoints = image_grid_pinpoints
193
+ self.resample = resample
194
+ self.do_center_crop = do_center_crop
195
+ self.crop_size = crop_size
196
+ self.do_rescale = do_rescale
197
+ self.rescale_factor = rescale_factor
198
+ self.do_normalize = do_normalize
199
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
200
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
201
+ self.do_convert_rgb = do_convert_rgb
202
+
203
+ # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize with CLIP->LLaVa
204
+ def resize(
205
+ self,
206
+ image: np.ndarray,
207
+ size: Dict[str, int],
208
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
209
+ data_format: Optional[Union[str, ChannelDimension]] = None,
210
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
211
+ **kwargs,
212
+ ) -> np.ndarray:
213
+ """
214
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
215
+ resized to keep the input aspect ratio.
216
+
217
+ Args:
218
+ image (`np.ndarray`):
219
+ Image to resize.
220
+ size (`Dict[str, int]`):
221
+ Size of the output image.
222
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
223
+ Resampling filter to use when resiizing the image.
224
+ data_format (`str` or `ChannelDimension`, *optional*):
225
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
226
+ input_data_format (`ChannelDimension` or `str`, *optional*):
227
+ The channel dimension format of the input image. If not provided, it will be inferred.
228
+ """
229
+ default_to_square = True
230
+ if "shortest_edge" in size:
231
+ size = size["shortest_edge"]
232
+ default_to_square = False
233
+ elif "height" in size and "width" in size:
234
+ size = (size["height"], size["width"])
235
+ else:
236
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
237
+
238
+ output_size = get_resize_output_image_size(
239
+ image,
240
+ size=size,
241
+ default_to_square=default_to_square,
242
+ input_data_format=input_data_format,
243
+ )
244
+
245
+ return resize(
246
+ image,
247
+ size=output_size,
248
+ resample=resample,
249
+ data_format=data_format,
250
+ input_data_format=input_data_format,
251
+ **kwargs,
252
+ )
253
+
254
+ def _preprocess(
255
+ self,
256
+ images: ImageInput,
257
+ do_resize: bool = None,
258
+ size: Dict[str, int] = None,
259
+ resample: PILImageResampling = None,
260
+ do_center_crop: bool = None,
261
+ crop_size: int = None,
262
+ do_rescale: bool = None,
263
+ rescale_factor: float = None,
264
+ do_normalize: bool = None,
265
+ image_mean: Optional[Union[float, List[float]]] = None,
266
+ image_std: Optional[Union[float, List[float]]] = None,
267
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
268
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
269
+ ) -> Image.Image:
270
+ """
271
+ Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
272
+
273
+ Args:
274
+ images (`ImageInput`):
275
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
276
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
277
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
278
+ Whether to resize the image.
279
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
280
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
281
+ the longest edge resized to keep the input aspect ratio.
282
+ resample (`int`, *optional*, defaults to `self.resample`):
283
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
284
+ has an effect if `do_resize` is set to `True`.
285
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
286
+ Whether to center crop the image.
287
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
288
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
289
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
290
+ Whether to rescale the image.
291
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
292
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
293
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
294
+ Whether to normalize the image.
295
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
296
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
297
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
298
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
299
+ `True`.
300
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
301
+ The channel dimension format for the output image. Can be one of:
302
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
303
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
304
+ - Unset: Use the channel dimension format of the input image.
305
+ input_data_format (`ChannelDimension` or `str`, *optional*):
306
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
307
+ from the input image. Can be one of:
308
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
309
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
310
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
311
+ """
312
+ images = make_list_of_images(images)
313
+
314
+ if do_resize:
315
+ images = [
316
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
317
+ for image in images
318
+ ]
319
+
320
+ if do_center_crop:
321
+ images = [
322
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
323
+ ]
324
+
325
+ if do_rescale:
326
+ images = [
327
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
328
+ for image in images
329
+ ]
330
+
331
+ if do_normalize:
332
+ images = [
333
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
334
+ for image in images
335
+ ]
336
+
337
+ images = [
338
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
339
+ ]
340
+
341
+ return images
342
+
343
+ def _resize_for_patching(
344
+ self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension
345
+ ) -> np.array:
346
+ """
347
+ Resizes an image to a target resolution while maintaining aspect ratio.
348
+
349
+ Args:
350
+ image (np.array):
351
+ The input image.
352
+ target_resolution (tuple):
353
+ The target resolution (height, width) of the image.
354
+ resample (`PILImageResampling`):
355
+ Resampling filter to use if resizing the image.
356
+ input_data_format (`ChannelDimension` or `str`):
357
+ The channel dimension format of the input image.
358
+
359
+ Returns:
360
+ np.array: The resized and padded image.
361
+ """
362
+ new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)
363
+
364
+ # Resize the image
365
+ resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)
366
+
367
+ return resized_image
368
+
369
+ def _pad_for_patching(
370
+ self, image: np.array, target_resolution: tuple, input_data_format: ChannelDimension
371
+ ) -> np.array:
372
+ """
373
+ Pad an image to a target resolution while maintaining aspect ratio.
374
+ """
375
+ target_height, target_width = target_resolution
376
+ new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)
377
+
378
+ paste_x = (target_width - new_width) // 2
379
+ paste_y = (target_height - new_height) // 2
380
+
381
+ padded_image = pad(image, padding=((paste_y, paste_y), (paste_x, paste_x)))
382
+
383
+ return padded_image
384
+
385
+ def get_image_patches(
386
+ self,
387
+ image: np.array,
388
+ grid_pinpoints,
389
+ size: tuple,
390
+ patch_size: int,
391
+ resample: PILImageResampling,
392
+ data_format: ChannelDimension,
393
+ input_data_format: ChannelDimension,
394
+ ) -> List[np.array]:
395
+ """
396
+ Process an image with variable resolutions by dividing it into patches.
397
+
398
+ Args:
399
+ image (np.array):
400
+ The input image to be processed.
401
+ grid_pinpoints (List):
402
+ A string representation of a list of possible resolutions.
403
+ size (`tuple`):
404
+ Size to resize the original image to.
405
+ patch_size (`int`):
406
+ Size of the patches to divide the image into.
407
+ resample (`PILImageResampling`):
408
+ Resampling filter to use if resizing the image.
409
+ data_format (`ChannelDimension` or `str`):
410
+ The channel dimension format for the output image.
411
+ input_data_format (`ChannelDimension` or `str`):
412
+ The channel dimension format of the input image.
413
+
414
+ Returns:
415
+ List[np.array]: A list of NumPy arrays containing the processed image patches.
416
+ """
417
+ if not isinstance(grid_pinpoints, list):
418
+ raise ValueError("grid_pinpoints must be a list of possible resolutions.")
419
+
420
+ possible_resolutions = grid_pinpoints
421
+
422
+ image_size = get_image_size(image, channel_dim=input_data_format)
423
+ best_resolution = select_best_resolution(image_size, possible_resolutions)
424
+ resized_image = self._resize_for_patching(
425
+ image, best_resolution, resample=resample, input_data_format=input_data_format
426
+ )
427
+ padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)
428
+
429
+ patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)
430
+
431
+ # make sure that all patches are in the input data format
432
+ patches = [
433
+ to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format)
434
+ for patch in patches
435
+ ]
436
+
437
+ resized_original_image = resize(
438
+ image,
439
+ size=size,
440
+ resample=resample,
441
+ data_format=data_format,
442
+ input_data_format=input_data_format,
443
+ )
444
+
445
+ image_patches = [resized_original_image] + patches
446
+
447
+ return image_patches
448
+
449
+ def preprocess(
450
+ self,
451
+ images: ImageInput,
452
+ do_resize: bool = None,
453
+ size: Dict[str, int] = None,
454
+ image_grid_pinpoints: List = None,
455
+ resample: PILImageResampling = None,
456
+ do_center_crop: bool = None,
457
+ crop_size: int = None,
458
+ do_rescale: bool = None,
459
+ rescale_factor: float = None,
460
+ do_normalize: bool = None,
461
+ image_mean: Optional[Union[float, List[float]]] = None,
462
+ image_std: Optional[Union[float, List[float]]] = None,
463
+ do_convert_rgb: bool = None,
464
+ return_tensors: Optional[Union[str, TensorType]] = None,
465
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
466
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
467
+ ):
468
+ """
469
+ Args:
470
+ images (`ImageInput`):
471
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
472
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
473
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
474
+ Whether to resize the image.
475
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
476
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
477
+ the longest edge resized to keep the input aspect ratio.
478
+ image_grid_pinpoints (`List` *optional*, defaults to `self.image_grid_pinpoints`):
479
+ A list of possible resolutions to use for processing high resolution images. The best resolution is
480
+ selected based on the original size of the image.
481
+ resample (`int`, *optional*, defaults to `self.resample`):
482
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
483
+ has an effect if `do_resize` is set to `True`.
484
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
485
+ Whether to center crop the image.
486
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
487
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
488
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
489
+ Whether to rescale the image.
490
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
491
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
492
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
493
+ Whether to normalize the image.
494
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
495
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
496
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
497
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
498
+ `True`.
499
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
500
+ Whether to convert the image to RGB.
501
+ return_tensors (`str` or `TensorType`, *optional*):
502
+ The type of tensors to return. Can be one of:
503
+ - Unset: Return a list of `np.ndarray`.
504
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
505
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
506
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
507
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
508
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
509
+ The channel dimension format for the output image. Can be one of:
510
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
511
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
512
+ - Unset: Use the channel dimension format of the input image.
513
+ input_data_format (`ChannelDimension` or `str`, *optional*):
514
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
515
+ from the input image. Can be one of:
516
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
517
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
518
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
519
+ """
520
+ do_resize = do_resize if do_resize is not None else self.do_resize
521
+ size = size if size is not None else self.size
522
+ size = get_size_dict(size, param_name="size", default_to_square=False)
523
+ image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else self.image_grid_pinpoints
524
+ resample = resample if resample is not None else self.resample
525
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
526
+ crop_size = crop_size if crop_size is not None else self.crop_size
527
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
528
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
529
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
530
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
531
+ image_mean = image_mean if image_mean is not None else self.image_mean
532
+ image_std = image_std if image_std is not None else self.image_std
533
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
534
+
535
+ images = make_list_of_images(images)
536
+
537
+ if not valid_images(images):
538
+ raise ValueError(
539
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
540
+ "torch.Tensor, tf.Tensor or jax.ndarray."
541
+ )
542
+
543
+ validate_preprocess_arguments(
544
+ do_rescale=do_rescale,
545
+ rescale_factor=rescale_factor,
546
+ do_normalize=do_normalize,
547
+ image_mean=image_mean,
548
+ image_std=image_std,
549
+ do_center_crop=do_center_crop,
550
+ crop_size=crop_size,
551
+ do_resize=do_resize,
552
+ size=size,
553
+ resample=resample,
554
+ )
555
+
556
+ if do_convert_rgb:
557
+ images = [convert_to_rgb(image) for image in images]
558
+
559
+ # All transformations expect numpy arrays.
560
+ images = [to_numpy_array(image) for image in images]
561
+
562
+ if is_scaled_image(images[0]) and do_rescale:
563
+ logger.warning_once(
564
+ "It looks like you are trying to rescale already rescaled images. If the input"
565
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
566
+ )
567
+
568
+ if input_data_format is None:
569
+ # We assume that all images have the same channel dimension format.
570
+ input_data_format = infer_channel_dimension_format(images[0])
571
+
572
+ new_images = []
573
+ image_sizes = [get_image_size(image, channel_dim=input_data_format) for image in images]
574
+ for image in images:
575
+ # convert image into a list of patches
576
+ # we intentially use the same data format as the input data format
577
+ image_patches = self.get_image_patches(
578
+ image,
579
+ image_grid_pinpoints,
580
+ size=(size["shortest_edge"], size["shortest_edge"]),
581
+ patch_size=crop_size["height"],
582
+ resample=resample,
583
+ data_format=input_data_format,
584
+ input_data_format=input_data_format,
585
+ )
586
+
587
+ # preprocess patches
588
+ pixel_values = self._preprocess(
589
+ image_patches,
590
+ do_resize=do_resize,
591
+ size=size,
592
+ resample=resample,
593
+ do_center_crop=do_center_crop,
594
+ crop_size=crop_size,
595
+ do_rescale=do_rescale,
596
+ rescale_factor=rescale_factor,
597
+ do_normalize=do_normalize,
598
+ image_mean=image_mean,
599
+ image_std=image_std,
600
+ data_format=data_format,
601
+ input_data_format=input_data_format,
602
+ )
603
+ pixel_values = np.array(pixel_values)
604
+ new_images.append(pixel_values)
605
+
606
+ data = {"pixel_values": new_images, "image_sizes": image_sizes}
607
+
608
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/llava_next/modeling_llava_next.py ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Llava-NeXT model."""
16
+
17
+ from dataclasses import dataclass
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+
24
+ from ... import PreTrainedModel
25
+ from ...activations import ACT2FN
26
+ from ...cache_utils import Cache
27
+ from ...image_processing_utils import select_best_resolution
28
+ from ...modeling_outputs import ModelOutput
29
+ from ...utils import (
30
+ add_start_docstrings,
31
+ add_start_docstrings_to_model_forward,
32
+ logging,
33
+ replace_return_docstrings,
34
+ )
35
+ from ..auto import AutoModel, AutoModelForCausalLM
36
+ from .configuration_llava_next import LlavaNextConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CONFIG_FOR_DOC = "LlavaNextConfig"
42
+
43
+ LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
44
+ "llava-hf/llava-v1.6-mistral-7b-hf",
45
+ # See all LLaVA-NeXT models at https://huggingface.co/models?filter=llava_next
46
+ ]
47
+
48
+
49
+ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
50
+ """
51
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
52
+
53
+ Args:
54
+ image_size (`tuple`):
55
+ The size of the input image in the format (width, height).
56
+ grid_pinpoints (`List`):
57
+ A list containing possible resolutions. Each item in the list should be a tuple or list
58
+ of the form `(height, width)`.
59
+ patch_size (`int`):
60
+ The size of each image patch.
61
+
62
+ Returns:
63
+ tuple: The shape of the image patch grid in the format (width, height).
64
+ """
65
+ if not isinstance(grid_pinpoints, list):
66
+ raise ValueError("grid_pinpoints should be a list of tuples or lists")
67
+
68
+ height, width = select_best_resolution(image_size, grid_pinpoints)
69
+ return height // patch_size, width // patch_size
70
+
71
+
72
+ def unpad_image(tensor, original_size):
73
+ """
74
+ Unpads a PyTorch tensor of a padded and resized image.
75
+
76
+ Args:
77
+ tensor (`torch.Tensor`):
78
+ The image tensor, assumed to be of shape (num_channels, height, width).
79
+ original_size (`tuple`):
80
+ The original size of the image (height, width).
81
+
82
+ Returns:
83
+ `torch.Tensor`: The unpadded image tensor.
84
+ """
85
+ original_height, original_width = original_size
86
+ current_height, current_width = tensor.shape[1:]
87
+
88
+ original_aspect_ratio = original_width / original_height
89
+ current_aspect_ratio = current_width / current_height
90
+
91
+ if original_aspect_ratio > current_aspect_ratio:
92
+ scale_factor = current_width / original_width
93
+ new_height = int(original_height * scale_factor)
94
+ padding = (current_height - new_height) // 2
95
+ unpadded_tensor = tensor[:, padding : current_height - padding, :]
96
+ else:
97
+ scale_factor = current_height / original_height
98
+ new_width = int(original_width * scale_factor)
99
+ padding = (current_width - new_width) // 2
100
+ unpadded_tensor = tensor[:, :, padding : current_width - padding]
101
+
102
+ return unpadded_tensor
103
+
104
+
105
+ @dataclass
106
+ # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->LlavaNext
107
+ class LlavaNextCausalLMOutputWithPast(ModelOutput):
108
+ """
109
+ Base class for LlavaNext causal language model (or autoregressive) outputs.
110
+
111
+ Args:
112
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
113
+ Language modeling loss (for next-token prediction).
114
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
115
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
116
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
117
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
118
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
119
+
120
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
121
+ `past_key_values` input) to speed up sequential decoding.
122
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
123
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
124
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
125
+
126
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
127
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
128
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
129
+ sequence_length)`.
130
+
131
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
132
+ heads.
133
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
134
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
135
+ sequence_length, hidden_size)`.
136
+
137
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
138
+ """
139
+
140
+ loss: Optional[torch.FloatTensor] = None
141
+ logits: torch.FloatTensor = None
142
+ past_key_values: Optional[List[torch.FloatTensor]] = None
143
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
144
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
145
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
146
+
147
+
148
+ # Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->LlavaNext
149
+ class LlavaNextMultiModalProjector(nn.Module):
150
+ def __init__(self, config: LlavaNextConfig):
151
+ super().__init__()
152
+
153
+ self.linear_1 = nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True)
154
+ self.act = ACT2FN[config.projector_hidden_act]
155
+ self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
156
+
157
+ def forward(self, image_features):
158
+ hidden_states = self.linear_1(image_features)
159
+ hidden_states = self.act(hidden_states)
160
+ hidden_states = self.linear_2(hidden_states)
161
+ return hidden_states
162
+
163
+
164
+ LLAVA_NEXT_START_DOCSTRING = r"""
165
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
166
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
167
+ etc.)
168
+
169
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
170
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
171
+ and behavior.
172
+
173
+ Parameters:
174
+ config ([`LlavaNextConfig`] or [`LlavaNextVisionConfig`]):
175
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
176
+ load the weights associated with the model, only the configuration. Check out the
177
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
178
+ """
179
+
180
+
181
+ @add_start_docstrings(
182
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
183
+ LLAVA_NEXT_START_DOCSTRING,
184
+ )
185
+ # Copied from transformers.models.llava.modeling_llava.LlavaPreTrainedModel with Llava->LlavaNext,llava->llava_next
186
+ class LlavaNextPreTrainedModel(PreTrainedModel):
187
+ config_class = LlavaNextConfig
188
+ base_model_prefix = "model"
189
+ supports_gradient_checkpointing = True
190
+ _no_split_modules = ["LlavaNextVisionAttention"]
191
+ _skip_keys_device_placement = "past_key_values"
192
+ _supports_flash_attn_2 = True
193
+
194
+ def _init_weights(self, module):
195
+ # important: this ported version of LlavaNext isn't meant for training from scratch - only
196
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
197
+ # https://github.com/haotian-liu/LLaVA/tree/main/llava_next should serve for that purpose
198
+ std = (
199
+ self.config.initializer_range
200
+ if hasattr(self.config, "initializer_range")
201
+ else self.config.text_config.initializer_range
202
+ )
203
+
204
+ if hasattr(module, "class_embedding"):
205
+ module.class_embedding.data.normal_(mean=0.0, std=std)
206
+
207
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
208
+ module.weight.data.normal_(mean=0.0, std=std)
209
+ if module.bias is not None:
210
+ module.bias.data.zero_()
211
+ elif isinstance(module, nn.Embedding):
212
+ module.weight.data.normal_(mean=0.0, std=std)
213
+ if module.padding_idx is not None:
214
+ module.weight.data[module.padding_idx].zero_()
215
+
216
+ @property
217
+ def _supports_sdpa(self):
218
+ """
219
+ Retrieve language_model's attribute to check whether the model supports
220
+ SDPA or not.
221
+ """
222
+ return self.language_model._supports_sdpa
223
+
224
+
225
+ LLAVA_NEXT_INPUTS_DOCSTRING = r"""
226
+ Args:
227
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
228
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
229
+ it.
230
+
231
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
232
+ [`PreTrainedTokenizer.__call__`] for details.
233
+
234
+ [What are input IDs?](../glossary#input-ids)
235
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
236
+ The tensors corresponding to the input images. Pixel values can be obtained using
237
+ [`AutoImageProcessor`]. See [`LlavaNextImageProcessor.__call__`] for details. [`LlavaProcessor`] uses
238
+ [`LlavaNextImageProcessor`] for processing images.
239
+ image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`, *optional*):
240
+ The sizes of the images in the batch, being (height, width) for each image.
241
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
242
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
243
+
244
+ - 1 for tokens that are **not masked**,
245
+ - 0 for tokens that are **masked**.
246
+
247
+ [What are attention masks?](../glossary#attention-mask)
248
+
249
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
250
+ [`PreTrainedTokenizer.__call__`] for details.
251
+
252
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
253
+ `past_key_values`).
254
+
255
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
256
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
257
+ information on the default strategy.
258
+
259
+ - 1 indicates the head is **not masked**,
260
+ - 0 indicates the head is **masked**.
261
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
262
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
263
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
264
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
265
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
266
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
267
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
268
+
269
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
270
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
271
+
272
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
273
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
274
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
275
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
276
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
277
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
278
+ model's internal embedding lookup matrix.
279
+ vision_feature_layer (`int`, *optional*, defaults to -2):
280
+ The index of the layer to select the vision feature.
281
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
282
+ The feature selection strategy used to select the vision feature from the vision backbone.
283
+ Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
284
+ If `"full"`, the full vision features are used.
285
+ use_cache (`bool`, *optional*):
286
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
287
+ `past_key_values`).
288
+ output_attentions (`bool`, *optional*):
289
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
290
+ tensors for more detail.
291
+ output_hidden_states (`bool`, *optional*):
292
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
293
+ more detail.
294
+ return_dict (`bool`, *optional*):
295
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
296
+ """
297
+
298
+
299
+ @add_start_docstrings(
300
+ """The LLAVA-NeXT model which consists of a vision backbone and a language model.""",
301
+ LLAVA_NEXT_START_DOCSTRING,
302
+ )
303
+ class LlavaNextForConditionalGeneration(LlavaNextPreTrainedModel):
304
+ def __init__(self, config: LlavaNextConfig):
305
+ super().__init__(config)
306
+ self.vision_tower = AutoModel.from_config(config.vision_config)
307
+
308
+ self.multi_modal_projector = LlavaNextMultiModalProjector(config)
309
+
310
+ self.image_newline = nn.Parameter(torch.empty(config.text_config.hidden_size, dtype=self.dtype))
311
+
312
+ self.vocab_size = config.text_config.vocab_size
313
+ self.language_model = AutoModelForCausalLM.from_config(
314
+ config.text_config, attn_implementation=config._attn_implementation
315
+ )
316
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
317
+ self.post_init()
318
+
319
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_input_embeddings
320
+ def get_input_embeddings(self):
321
+ return self.language_model.get_input_embeddings()
322
+
323
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_input_embeddings
324
+ def set_input_embeddings(self, value):
325
+ self.language_model.set_input_embeddings(value)
326
+
327
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_output_embeddings
328
+ def get_output_embeddings(self):
329
+ return self.language_model.get_output_embeddings()
330
+
331
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_output_embeddings
332
+ def set_output_embeddings(self, new_embeddings):
333
+ self.language_model.set_output_embeddings(new_embeddings)
334
+
335
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_decoder
336
+ def set_decoder(self, decoder):
337
+ self.language_model.set_decoder(decoder)
338
+
339
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_decoder
340
+ def get_decoder(self):
341
+ return self.language_model.get_decoder()
342
+
343
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.tie_weights
344
+ def tie_weights(self):
345
+ return self.language_model.tie_weights()
346
+
347
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.resize_token_embeddings
348
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
349
+ model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
350
+ # update vocab size
351
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
352
+ self.vocab_size = model_embeds.num_embeddings
353
+ return model_embeds
354
+
355
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration._merge_input_ids_with_image_features
356
+ def _merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids, attention_mask, labels):
357
+ num_images, num_image_patches, embed_dim = image_features.shape
358
+ batch_size, sequence_length = input_ids.shape
359
+ left_padding = not torch.sum(input_ids[:, -1] == torch.tensor(self.pad_token_id))
360
+ # 1. Create a mask to know where special image tokens are
361
+ special_image_token_mask = input_ids == self.config.image_token_index
362
+ num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
363
+ # Compute the maximum embed dimension
364
+ max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length
365
+ batch_indices, non_image_indices = torch.where(input_ids != self.config.image_token_index)
366
+
367
+ # 2. Compute the positions where text should be written
368
+ # Calculate new positions for text tokens in merged image-text sequence.
369
+ # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens.
370
+ # `torch.cumsum` computes how each image token shifts subsequent text token positions.
371
+ # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
372
+ new_token_positions = torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1
373
+ nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1]
374
+ if left_padding:
375
+ new_token_positions += nb_image_pad[:, None] # offset for left padding
376
+ text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
377
+
378
+ # 3. Create the full embedding, already padded to the maximum position
379
+ final_embedding = torch.zeros(
380
+ batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
381
+ )
382
+ final_attention_mask = torch.zeros(
383
+ batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device
384
+ )
385
+ if labels is not None:
386
+ final_labels = torch.full(
387
+ (batch_size, max_embed_dim), self.config.ignore_index, dtype=input_ids.dtype, device=input_ids.device
388
+ )
389
+ # In case the Vision model or the Language model has been offloaded to CPU, we need to manually
390
+ # set the corresponding tensors into their correct target device.
391
+ target_device = inputs_embeds.device
392
+ batch_indices, non_image_indices, text_to_overwrite = (
393
+ batch_indices.to(target_device),
394
+ non_image_indices.to(target_device),
395
+ text_to_overwrite.to(target_device),
396
+ )
397
+ attention_mask = attention_mask.to(target_device)
398
+
399
+ # 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"]
400
+ # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
401
+ final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
402
+ final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
403
+ if labels is not None:
404
+ final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices]
405
+
406
+ # 5. Fill the embeddings corresponding to the images. Anything that is still zeros needs filling
407
+ image_to_overwrite = torch.all(final_embedding == 0, dim=-1)
408
+ image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None].to(target_device)
409
+
410
+ if image_to_overwrite.sum() != image_features.shape[:-1].numel():
411
+ raise ValueError(
412
+ f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while"
413
+ f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation."
414
+ )
415
+
416
+ final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device)
417
+ final_attention_mask |= image_to_overwrite
418
+ position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)
419
+
420
+ # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens.
421
+ batch_indices, pad_indices = torch.where(input_ids == self.pad_token_id)
422
+ indices_to_mask = new_token_positions[batch_indices, pad_indices]
423
+
424
+ final_embedding[batch_indices, indices_to_mask] = 0
425
+
426
+ if labels is None:
427
+ final_labels = None
428
+
429
+ return final_embedding, final_attention_mask, final_labels, position_ids
430
+
431
+ @add_start_docstrings_to_model_forward(LLAVA_NEXT_INPUTS_DOCSTRING)
432
+ @replace_return_docstrings(output_type=LlavaNextCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
433
+ def forward(
434
+ self,
435
+ input_ids: torch.LongTensor = None,
436
+ pixel_values: torch.FloatTensor = None,
437
+ image_sizes: Optional[torch.LongTensor] = None,
438
+ attention_mask: Optional[torch.Tensor] = None,
439
+ position_ids: Optional[torch.LongTensor] = None,
440
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
441
+ inputs_embeds: Optional[torch.FloatTensor] = None,
442
+ vision_feature_layer: Optional[int] = None,
443
+ vision_feature_select_strategy: Optional[str] = None,
444
+ labels: Optional[torch.LongTensor] = None,
445
+ use_cache: Optional[bool] = None,
446
+ output_attentions: Optional[bool] = None,
447
+ output_hidden_states: Optional[bool] = None,
448
+ return_dict: Optional[bool] = None,
449
+ ) -> Union[Tuple, LlavaNextCausalLMOutputWithPast]:
450
+ r"""
451
+ Args:
452
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
453
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
454
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
455
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
456
+
457
+ Returns:
458
+
459
+ Example:
460
+
461
+ ```python
462
+ >>> from PIL import Image
463
+ >>> import requests
464
+ >>> from transformers import AutoProcessor, LlavaNextForConditionalGeneration
465
+
466
+ >>> model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
467
+ >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
468
+
469
+ >>> prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
470
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
471
+ >>> image = Image.open(requests.get(url, stream=True).raw)
472
+
473
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
474
+
475
+ >>> # Generate
476
+ >>> generate_ids = model.generate(**inputs, max_length=30)
477
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
478
+ "[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot (...)"
479
+ ```"""
480
+
481
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
482
+ output_hidden_states = (
483
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
484
+ )
485
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
486
+ vision_feature_layer = (
487
+ vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
488
+ )
489
+ vision_feature_select_strategy = (
490
+ vision_feature_select_strategy
491
+ if vision_feature_select_strategy is not None
492
+ else self.config.vision_feature_select_strategy
493
+ )
494
+
495
+ if inputs_embeds is None:
496
+ # 1. Extract the input embeddings
497
+ inputs_embeds = self.get_input_embeddings()(input_ids)
498
+
499
+ # 2. Merge text and images
500
+ if pixel_values is not None and input_ids.shape[1] != 1:
501
+ batch_size, num_patches, num_channels, height, width = pixel_values.shape
502
+ reshaped_pixel_values = pixel_values.view(batch_size * num_patches, num_channels, height, width)
503
+ image_features = self.vision_tower(reshaped_pixel_values, output_hidden_states=True)
504
+
505
+ selected_image_feature = image_features.hidden_states[vision_feature_layer]
506
+
507
+ if vision_feature_select_strategy == "default":
508
+ selected_image_feature = selected_image_feature[:, 1:]
509
+ elif vision_feature_select_strategy == "full":
510
+ selected_image_feature = selected_image_feature
511
+
512
+ image_features = self.multi_modal_projector(selected_image_feature)
513
+
514
+ # split up image_features for each of the individual images
515
+ # hence we get a list of image_features, each of shape (5, num_patches, hidden_size)
516
+ # if we assume each image has 5 image features (base image + 4 patches)
517
+ split_sizes = [image.shape[0] for image in pixel_values]
518
+ image_features = torch.split(image_features, split_sizes, dim=0)
519
+
520
+ # NOTE we only support multimodal_patch_merge_type == "spatial_unpad"
521
+ height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size
522
+
523
+ new_image_features = []
524
+ for image_idx, image_feature in enumerate(image_features):
525
+ if image_feature.shape[0] > 1:
526
+ base_image_feature = image_feature[0]
527
+ image_feature = image_feature[1:]
528
+
529
+ if height * width != base_image_feature.shape[0]:
530
+ raise ValueError("The number of patches is not consistent with the image size.")
531
+ num_patch_height, num_patch_width = get_anyres_image_grid_shape(
532
+ image_sizes[image_idx],
533
+ self.config.image_grid_pinpoints,
534
+ self.config.vision_config.image_size,
535
+ )
536
+ image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
537
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
538
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
539
+ image_feature = unpad_image(image_feature, image_sizes[image_idx])
540
+ image_feature = torch.cat(
541
+ (
542
+ image_feature,
543
+ self.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1),
544
+ ),
545
+ dim=-1,
546
+ )
547
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
548
+ image_feature = torch.cat((base_image_feature, image_feature), dim=0)
549
+ else:
550
+ image_feature = image_feature[0]
551
+ image_feature = torch.cat((image_feature, self.image_newline[None]), dim=0)
552
+ new_image_features.append(image_feature)
553
+ image_features = torch.stack(new_image_features, dim=0)
554
+
555
+ inputs_embeds, attention_mask, labels, position_ids = self._merge_input_ids_with_image_features(
556
+ image_features, inputs_embeds, input_ids, attention_mask, labels
557
+ )
558
+ if labels is None:
559
+ labels = torch.full_like(attention_mask, self.config.ignore_index).to(torch.long)
560
+
561
+ # In case input_ids.shape[1] == 1 & pixel_values==None & past_key_values != None, we are in the case of
562
+ # generation with cache
563
+ elif past_key_values is not None and pixel_values is not None and input_ids.shape[1] == 1:
564
+ # Retrieve the first layer to inspect the logits and mask out the hidden states
565
+ # that are set to 0
566
+ first_layer_past_key_value = past_key_values[0][0][:, :, :, 0]
567
+
568
+ # Sum all dimensions of head_dim (-2) to avoid random errors such as: https://github.com/huggingface/transformers/pull/28032#issuecomment-1863691941
569
+ batch_index, non_attended_tokens = torch.where(first_layer_past_key_value.float().sum(-2) == 0)
570
+
571
+ # Get the target length
572
+ target_length = input_ids.shape[1]
573
+ past_length = first_layer_past_key_value.shape[-1]
574
+
575
+ extended_attention_mask = torch.ones(
576
+ (attention_mask.shape[0], past_length),
577
+ dtype=attention_mask.dtype,
578
+ device=attention_mask.device,
579
+ )
580
+
581
+ # Filter out only the tokens that can be un-attended, this can happen
582
+ # if one uses Llava + Fused modules where the cache on the
583
+ # first iteration is already big enough, or if one passes custom cache
584
+ valid_indices = non_attended_tokens < extended_attention_mask.size(-1)
585
+ new_batch_index = batch_index[valid_indices]
586
+ new_non_attended_tokens = non_attended_tokens[valid_indices]
587
+
588
+ # Zero-out the places where we don't need to attend
589
+ extended_attention_mask[new_batch_index, new_non_attended_tokens] = 0
590
+
591
+ attention_mask = torch.cat((extended_attention_mask, attention_mask[:, -target_length:]), dim=1)
592
+ position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
593
+
594
+ outputs = self.language_model(
595
+ attention_mask=attention_mask,
596
+ position_ids=position_ids,
597
+ past_key_values=past_key_values,
598
+ inputs_embeds=inputs_embeds,
599
+ use_cache=use_cache,
600
+ output_attentions=output_attentions,
601
+ output_hidden_states=output_hidden_states,
602
+ return_dict=return_dict,
603
+ )
604
+
605
+ logits = outputs[0]
606
+
607
+ loss = None
608
+ if labels is not None:
609
+ # Shift so that tokens < n predict n
610
+ if attention_mask is not None:
611
+ shift_attention_mask = attention_mask[..., 1:]
612
+ shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
613
+ shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
614
+ else:
615
+ shift_logits = logits[..., :-1, :].contiguous()
616
+ shift_labels = labels[..., 1:].contiguous()
617
+ # Flatten the tokens
618
+ loss_fct = nn.CrossEntropyLoss()
619
+ loss = loss_fct(
620
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
621
+ )
622
+
623
+ if not return_dict:
624
+ output = (logits,) + outputs[1:]
625
+ return (loss,) + output if loss is not None else output
626
+
627
+ return LlavaNextCausalLMOutputWithPast(
628
+ loss=loss,
629
+ logits=logits,
630
+ past_key_values=outputs.past_key_values,
631
+ hidden_states=outputs.hidden_states,
632
+ attentions=outputs.attentions,
633
+ )
634
+
635
+ def prepare_inputs_for_generation(
636
+ self,
637
+ input_ids,
638
+ past_key_values=None,
639
+ inputs_embeds=None,
640
+ pixel_values=None,
641
+ image_sizes=None,
642
+ attention_mask=None,
643
+ **kwargs,
644
+ ):
645
+ if past_key_values is not None:
646
+ if isinstance(past_key_values, Cache):
647
+ cache_length = past_key_values.get_seq_length()
648
+ past_length = past_key_values.seen_tokens
649
+ else:
650
+ cache_length = past_length = past_key_values[0][0].shape[2]
651
+
652
+ # Keep only the unprocessed tokens:
653
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
654
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
655
+ # input)
656
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
657
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
658
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
659
+ # input_ids based on the past_length.
660
+ elif past_length < input_ids.shape[1]:
661
+ input_ids = input_ids[:, past_length:]
662
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
663
+ elif self.config.image_token_index in input_ids:
664
+ input_ids = input_ids[:, input_ids.shape[1] - 1 :]
665
+ # If the cache has seen more tokens than it can hold, then the cache has a size limit. Let's discard the
666
+ # older attention values, as their corresponding values are not part of the input.
667
+ if cache_length < past_length and attention_mask is not None:
668
+ attention_mask = attention_mask[:, -(cache_length + input_ids.shape[1]) :]
669
+
670
+ position_ids = kwargs.get("position_ids", None)
671
+ if attention_mask is not None and position_ids is None:
672
+ # create position_ids on the fly for batch generation
673
+ position_ids = attention_mask.long().cumsum(-1) - 1
674
+ position_ids.masked_fill_(attention_mask == 0, 1)
675
+ if past_key_values:
676
+ position_ids = position_ids[:, -input_ids.shape[1] :]
677
+
678
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
679
+ if inputs_embeds is not None and past_key_values is None:
680
+ model_inputs = {"inputs_embeds": inputs_embeds}
681
+ else:
682
+ model_inputs = {"input_ids": input_ids}
683
+
684
+ model_inputs.update(
685
+ {
686
+ "position_ids": position_ids,
687
+ "past_key_values": past_key_values,
688
+ "use_cache": kwargs.get("use_cache"),
689
+ "attention_mask": attention_mask,
690
+ "pixel_values": pixel_values,
691
+ "image_sizes": image_sizes,
692
+ }
693
+ )
694
+ return model_inputs
695
+
696
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration._reorder_cache
697
+ def _reorder_cache(self, *args, **kwargs):
698
+ return self.language_model._reorder_cache(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/llava_next/processing_llava_next.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for LLaVa-NeXT.
17
+ """
18
+
19
+
20
+ from typing import List, Optional, Union
21
+
22
+ from ...feature_extraction_utils import BatchFeature
23
+ from ...image_utils import ImageInput
24
+ from ...processing_utils import ProcessorMixin
25
+ from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
26
+ from ...utils import TensorType
27
+
28
+
29
+ class LlavaNextProcessor(ProcessorMixin):
30
+ r"""
31
+ Constructs a LLaVa-NeXT processor which wraps a LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor.
32
+
33
+ [`LlavaNextProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`] and [`LlamaTokenizerFast`]. See the
34
+ [`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information.
35
+
36
+ Args:
37
+ image_processor ([`LlavaNextImageProcessor`], *optional*):
38
+ The image processor is a required input.
39
+ tokenizer ([`LlamaTokenizerFast`], *optional*):
40
+ The tokenizer is a required input.
41
+ """
42
+
43
+ attributes = ["image_processor", "tokenizer"]
44
+ image_processor_class = "LlavaNextImageProcessor"
45
+ tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
46
+
47
+ def __init__(self, image_processor=None, tokenizer=None):
48
+ super().__init__(image_processor, tokenizer)
49
+
50
+ def __call__(
51
+ self,
52
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
53
+ images: ImageInput = None,
54
+ padding: Union[bool, str, PaddingStrategy] = False,
55
+ truncation: Union[bool, str, TruncationStrategy] = None,
56
+ max_length=None,
57
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
58
+ ) -> BatchFeature:
59
+ """
60
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
61
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
62
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
63
+ LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
64
+ of the above two methods for more information.
65
+
66
+ Args:
67
+ text (`str`, `List[str]`, `List[List[str]]`):
68
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
69
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
70
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
71
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
72
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
73
+ tensor. Both channels-first and channels-last formats are supported.
74
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
75
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
76
+ index) among:
77
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
78
+ sequence if provided).
79
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
80
+ acceptable input length for the model if that argument is not provided.
81
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
82
+ lengths).
83
+ max_length (`int`, *optional*):
84
+ Maximum length of the returned list and optionally padding length (see above).
85
+ truncation (`bool`, *optional*):
86
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
87
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
88
+ If set, will return tensors of a particular framework. Acceptable values are:
89
+
90
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
91
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
92
+ - `'np'`: Return NumPy `np.ndarray` objects.
93
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
94
+
95
+ Returns:
96
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
97
+
98
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
99
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
100
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
101
+ `None`).
102
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
103
+ """
104
+ if images is not None:
105
+ image_inputs = self.image_processor(images, return_tensors=return_tensors)
106
+ else:
107
+ image_inputs = {}
108
+ text_inputs = self.tokenizer(
109
+ text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
110
+ )
111
+
112
+ return BatchFeature(data={**text_inputs, **image_inputs})
113
+
114
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
115
+ def batch_decode(self, *args, **kwargs):
116
+ """
117
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
118
+ refer to the docstring of this method for more information.
119
+ """
120
+ return self.tokenizer.batch_decode(*args, **kwargs)
121
+
122
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
123
+ def decode(self, *args, **kwargs):
124
+ """
125
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
126
+ the docstring of this method for more information.
127
+ """
128
+ return self.tokenizer.decode(*args, **kwargs)
129
+
130
+ @property
131
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
132
+ def model_input_names(self):
133
+ tokenizer_input_names = self.tokenizer.model_input_names
134
+ image_processor_input_names = self.image_processor.model_input_names
135
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
venv/lib/python3.10/site-packages/transformers/models/pix2struct/__init__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_pix2struct": [
21
+ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "Pix2StructConfig",
23
+ "Pix2StructTextConfig",
24
+ "Pix2StructVisionConfig",
25
+ ],
26
+ "processing_pix2struct": ["Pix2StructProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_vision_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["image_processing_pix2struct"] = ["Pix2StructImageProcessor"]
36
+
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_pix2struct"] = [
45
+ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "Pix2StructPreTrainedModel",
47
+ "Pix2StructForConditionalGeneration",
48
+ "Pix2StructVisionModel",
49
+ "Pix2StructTextModel",
50
+ ]
51
+
52
+ if TYPE_CHECKING:
53
+ from .configuration_pix2struct import (
54
+ PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
55
+ Pix2StructConfig,
56
+ Pix2StructTextConfig,
57
+ Pix2StructVisionConfig,
58
+ )
59
+ from .processing_pix2struct import Pix2StructProcessor
60
+
61
+ try:
62
+ if not is_vision_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ from .image_processing_pix2struct import Pix2StructImageProcessor
68
+
69
+ try:
70
+ if not is_torch_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .modeling_pix2struct import (
76
+ PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
77
+ Pix2StructForConditionalGeneration,
78
+ Pix2StructPreTrainedModel,
79
+ Pix2StructTextModel,
80
+ Pix2StructVisionModel,
81
+ )
82
+
83
+ else:
84
+ import sys
85
+
86
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/configuration_pix2struct.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/convert_pix2struct_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (3.92 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/image_processing_pix2struct.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/modeling_pix2struct.cpython-310.pyc ADDED
Binary file (52.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/processing_pix2struct.cpython-310.pyc ADDED
Binary file (4.73 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/pix2struct/configuration_pix2struct.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Pix2Struct model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class Pix2StructTextConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`Pix2StructTextModel`]. It is used to instantiate
33
+ a Pix2Struct text model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the Pix2Struct text decoder used by
35
+ the [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50244):
42
+ Vocabulary size of the `Pix2Struct` text model. Defines the number of different tokens that can be
43
+ represented by the `inputs_ids` passed when calling [`Pix2StructTextModel`].
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ d_kv (`int`, *optional*, defaults to 64):
47
+ Dimensionality of the key, query, value projections in each attention head.
48
+ d_ff (`int`, *optional*, defaults to 2048):
49
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
50
+ num_layers (`int`, *optional*, defaults to 12):
51
+ Number of hidden layers in the Transformer encoder.
52
+ num_heads (`int`, *optional*, defaults to 12):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
55
+ The number of buckets to use for each attention layer.
56
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
57
+ The maximum distance of the longer sequences for the bucket separation.
58
+ dropout_rate (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-6):
61
+ The epsilon used by the layer normalization layers.
62
+ initializer_factor (`float`, *optional*, defaults to 1.0):
63
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
64
+ testing).
65
+ dense_act_fn (`Union[Callable, str]`, *optional*, defaults to `"gelu_new"`):
66
+ The non-linear activation function (function or string).
67
+ decoder_start_token_id (`int`, *optional*, defaults to 0):
68
+ The id of the `decoder_start_token_id` token.
69
+ use_cache (`bool`, *optional*, defaults to `False`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models).
71
+ pad_token_id (`int`, *optional*, defaults to 0):
72
+ The id of the `padding` token.
73
+ eos_token_id (`int`, *optional*, defaults to 1):
74
+ The id of the `end-of-sequence` token.
75
+
76
+ Example:
77
+
78
+ ```python
79
+ >>> from transformers import Pix2StructTextConfig, Pix2StructTextModel
80
+
81
+ >>> # Initializing a Pix2StructTextConfig with google/pix2struct-base style configuration
82
+ >>> configuration = Pix2StructTextConfig()
83
+
84
+ >>> # Initializing a Pix2StructTextModel (with random weights) from the google/pix2struct-base style configuration
85
+ >>> model = Pix2StructTextModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "pix2struct_text_model"
92
+ keys_to_ignore_at_inference = ["past_key_values"]
93
+ attribute_map = {
94
+ "hidden_size": "hidden_size",
95
+ "num_attention_heads": "num_heads",
96
+ "num_hidden_layers": "num_layers",
97
+ }
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_size=50244,
102
+ hidden_size=768,
103
+ d_kv=64,
104
+ d_ff=2048,
105
+ num_layers=12,
106
+ num_heads=12,
107
+ relative_attention_num_buckets=32,
108
+ relative_attention_max_distance=128,
109
+ dropout_rate=0.1,
110
+ layer_norm_epsilon=1e-6,
111
+ initializer_factor=1.0,
112
+ dense_act_fn="gelu_new",
113
+ decoder_start_token_id=0,
114
+ use_cache=False,
115
+ pad_token_id=0,
116
+ eos_token_id=1,
117
+ tie_word_embeddings=False,
118
+ is_decoder=True,
119
+ **kwargs,
120
+ ):
121
+ self.vocab_size = vocab_size
122
+ self.hidden_size = hidden_size
123
+ self.d_kv = d_kv
124
+ self.d_ff = d_ff
125
+ self.num_layers = num_layers
126
+ self.num_heads = num_heads
127
+ self.relative_attention_num_buckets = relative_attention_num_buckets
128
+ self.relative_attention_max_distance = relative_attention_max_distance
129
+ self.dropout_rate = dropout_rate
130
+ self.layer_norm_epsilon = layer_norm_epsilon
131
+ self.initializer_factor = initializer_factor
132
+ self.use_cache = use_cache
133
+
134
+ self.eos_token_id = eos_token_id
135
+ self.decoder_start_token_id = decoder_start_token_id
136
+
137
+ # for backwards compatibility
138
+ self.dense_act_fn = dense_act_fn
139
+
140
+ super().__init__(
141
+ pad_token_id=pad_token_id,
142
+ eos_token_id=eos_token_id,
143
+ decoder_start_token_id=decoder_start_token_id,
144
+ tie_word_embeddings=tie_word_embeddings,
145
+ is_decoder=is_decoder,
146
+ **kwargs,
147
+ )
148
+
149
+ @classmethod
150
+ def from_pretrained(
151
+ cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs
152
+ ) -> "PretrainedConfig":
153
+ cls._set_token_in_kwargs(kwargs)
154
+
155
+ config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs)
156
+
157
+ # get the text config dict if we are loading from Pix2StructConfig
158
+ if config_dict.get("model_type") == "pix2struct":
159
+ config_dict = config_dict["text_config"]
160
+
161
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
162
+ logger.warning(
163
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
164
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
165
+ )
166
+
167
+ return cls.from_dict(config_dict, **kwargs)
168
+
169
+
170
+ class Pix2StructVisionConfig(PretrainedConfig):
171
+ r"""
172
+ This is the configuration class to store the configuration of a [`Pix2StructVisionModel`]. It is used to
173
+ instantiate a Pix2Struct vision model according to the specified arguments, defining the model architecture.
174
+ Instantiating a configuration defaults will yield a similar configuration to that of the Pix2Struct-base
175
+ [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.
176
+
177
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
178
+ documentation from [`PretrainedConfig`] for more information.
179
+
180
+ Args:
181
+ hidden_size (`int`, *optional*, defaults to 768):
182
+ Dimensionality of the encoder layers and the pooler layer.
183
+ patch_embed_hidden_size (`int`, *optional*, defaults to 768):
184
+ Dimensionality of the input patch_embedding layer in the Transformer encoder.
185
+ d_ff (`int`, *optional*, defaults to 2048):
186
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
187
+ d_kv (`int`, *optional*, defaults to 64):
188
+ Dimensionality of the key, query, value projections per attention head.
189
+ num_hidden_layers (`int`, *optional*, defaults to 12):
190
+ Number of hidden layers in the Transformer encoder.
191
+ num_attention_heads (`int`, *optional*, defaults to 12):
192
+ Number of attention heads for each attention layer in the Transformer encoder.
193
+ dense_act_fn (`str` or `function`, *optional*, defaults to `"gelu_new"`):
194
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
195
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
196
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
197
+ The epsilon used by the layer normalization layers.
198
+ dropout_rate (`float`, *optional*, defaults to 0.0):
199
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
200
+ attention_dropout (`float`, *optional*, defaults to 0.0):
201
+ The dropout ratio for the attention probabilities.
202
+ initializer_range (`float`, *optional*, defaults to 1e-10):
203
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
204
+ initializer_factor (`float`, *optional*, defaults to 1.0):
205
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
206
+ testing).
207
+ seq_len (`int`, *optional*, defaults to 4096):
208
+ Maximum sequence length (here number of patches) supported by the model.
209
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
210
+ The number of buckets to use for each attention layer.
211
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
212
+ The maximum distance (in tokens) to use for each attention layer.
213
+
214
+ Example:
215
+
216
+ ```python
217
+ >>> from transformers import Pix2StructVisionConfig, Pix2StructVisionModel
218
+
219
+ >>> # Initializing a Pix2StructVisionConfig with google/pix2struct-base style configuration
220
+ >>> configuration = Pix2StructVisionConfig()
221
+
222
+ >>> # Initializing a Pix2StructVisionModel (with random weights) from the google/pix2struct-base style configuration
223
+ >>> model = Pix2StructVisionModel(configuration)
224
+
225
+ >>> # Accessing the model configuration
226
+ >>> configuration = model.config
227
+ ```"""
228
+
229
+ model_type = "pix2struct_vision_model"
230
+
231
+ def __init__(
232
+ self,
233
+ hidden_size=768,
234
+ patch_embed_hidden_size=768,
235
+ d_ff=2048,
236
+ d_kv=64,
237
+ num_hidden_layers=12,
238
+ num_attention_heads=12,
239
+ dense_act_fn="gelu_new",
240
+ layer_norm_eps=1e-6,
241
+ dropout_rate=0.0,
242
+ attention_dropout=0.0,
243
+ initializer_range=1e-10,
244
+ initializer_factor=1.0,
245
+ seq_len=4096,
246
+ relative_attention_num_buckets=32,
247
+ relative_attention_max_distance=128,
248
+ **kwargs,
249
+ ):
250
+ super().__init__(**kwargs)
251
+
252
+ self.hidden_size = hidden_size
253
+ self.patch_embed_hidden_size = patch_embed_hidden_size
254
+ self.d_ff = d_ff
255
+ self.dropout_rate = dropout_rate
256
+ self.num_hidden_layers = num_hidden_layers
257
+ self.num_attention_heads = num_attention_heads
258
+ self.initializer_range = initializer_range
259
+ self.initializer_factor = initializer_factor
260
+ self.attention_dropout = attention_dropout
261
+ self.layer_norm_eps = layer_norm_eps
262
+ self.dense_act_fn = dense_act_fn
263
+ self.seq_len = seq_len
264
+ self.relative_attention_num_buckets = relative_attention_num_buckets
265
+ self.relative_attention_max_distance = relative_attention_max_distance
266
+ self.d_kv = d_kv
267
+
268
+ @classmethod
269
+ def from_pretrained(
270
+ cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs
271
+ ) -> "PretrainedConfig":
272
+ cls._set_token_in_kwargs(kwargs)
273
+
274
+ config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs)
275
+
276
+ # get the vision config dict if we are loading from Pix2StructConfig
277
+ if config_dict.get("model_type") == "pix2struct":
278
+ config_dict = config_dict["vision_config"]
279
+
280
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
281
+ logger.warning(
282
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
283
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
284
+ )
285
+
286
+ return cls.from_dict(config_dict, **kwargs)
287
+
288
+
289
+ class Pix2StructConfig(PretrainedConfig):
290
+ r"""
291
+ [`Pix2StructConfig`] is the configuration class to store the configuration of a
292
+ [`Pix2StructForConditionalGeneration`]. It is used to instantiate a Pix2Struct model according to the specified
293
+ arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will
294
+ yield a similar configuration to that of the Pix2Struct-base
295
+ [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.
296
+
297
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
298
+ documentation from [`PretrainedConfig`] for more information.
299
+
300
+ Args:
301
+ text_config (`dict`, *optional*):
302
+ Dictionary of configuration options used to initialize [`Pix2StructTextConfig`].
303
+ vision_config (`dict`, *optional*):
304
+ Dictionary of configuration options used to initialize [`Pix2StructVisionConfig`].
305
+ initializer_factor (`float`, *optional*, defaults to 1.0):
306
+ Factor to multiply the initialization range with.
307
+ initializer_range (`float`, *optional*, defaults to 0.02):
308
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
309
+ is_vqa (`bool`, *optional*, defaults to `False`):
310
+ Whether the model has been fine-tuned for VQA or not.
311
+ kwargs (*optional*):
312
+ Dictionary of keyword arguments.
313
+
314
+ Example:
315
+
316
+ ```python
317
+ >>> from transformers import Pix2StructConfig, Pix2StructForConditionalGeneration
318
+
319
+ >>> # Initializing a Pix2StructConfig with google/pix2struct-base style configuration
320
+ >>> configuration = Pix2StructConfig()
321
+
322
+ >>> # Initializing a Pix2StructForConditionalGeneration (with random weights) from the google/pix2struct-base style configuration
323
+ >>> model = Pix2StructForConditionalGeneration(configuration)
324
+
325
+ >>> # Accessing the model configuration
326
+ >>> configuration = model.config
327
+
328
+ >>> # We can also initialize a Pix2StructConfig from a Pix2StructTextConfig and a Pix2StructVisionConfig
329
+
330
+ >>> # Initializing a Pix2Struct text and Pix2Struct vision configuration
331
+ >>> config_text = Pix2StructTextConfig()
332
+ >>> config_vision = Pix2StructVisionConfig()
333
+
334
+ >>> config = Pix2StructConfig.from_text_vision_configs(config_text, config_vision)
335
+ ```"""
336
+
337
+ model_type = "pix2struct"
338
+
339
+ def __init__(
340
+ self,
341
+ text_config=None,
342
+ vision_config=None,
343
+ initializer_factor=1.0,
344
+ initializer_range=0.02,
345
+ is_vqa=False,
346
+ tie_word_embeddings=False,
347
+ is_encoder_decoder=True,
348
+ **kwargs,
349
+ ):
350
+ super().__init__(tie_word_embeddings=tie_word_embeddings, is_encoder_decoder=is_encoder_decoder, **kwargs)
351
+
352
+ if text_config is None:
353
+ text_config = {}
354
+ logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values.")
355
+
356
+ if vision_config is None:
357
+ vision_config = {}
358
+ logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values.")
359
+
360
+ self.text_config = Pix2StructTextConfig(**text_config)
361
+ self.vision_config = Pix2StructVisionConfig(**vision_config)
362
+
363
+ self.decoder_start_token_id = self.text_config.decoder_start_token_id
364
+ self.pad_token_id = self.text_config.pad_token_id
365
+ self.eos_token_id = self.text_config.eos_token_id
366
+
367
+ self.initializer_factor = initializer_factor
368
+ self.initializer_range = initializer_range
369
+
370
+ self.text_config.initializer_range = self.initializer_range
371
+ self.vision_config.initializer_range = self.initializer_range
372
+
373
+ self.is_vqa = is_vqa
374
+
375
+ @classmethod
376
+ def from_text_vision_configs(
377
+ cls, text_config: Pix2StructTextConfig, vision_config: Pix2StructVisionConfig, **kwargs
378
+ ):
379
+ r"""
380
+ Instantiate a [`Pix2StructConfig`] (or a derived class) from pix2struct text model configuration and pix2struct
381
+ vision model configuration.
382
+
383
+ Returns:
384
+ [`Pix2StructConfig`]: An instance of a configuration object
385
+ """
386
+
387
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
venv/lib/python3.10/site-packages/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import argparse
16
+ import os
17
+ import re
18
+
19
+ import torch
20
+ from flax.traverse_util import flatten_dict
21
+ from t5x import checkpoints
22
+
23
+ from transformers import (
24
+ AutoTokenizer,
25
+ Pix2StructConfig,
26
+ Pix2StructForConditionalGeneration,
27
+ Pix2StructImageProcessor,
28
+ Pix2StructProcessor,
29
+ Pix2StructTextConfig,
30
+ Pix2StructVisionConfig,
31
+ )
32
+
33
+
34
+ def get_flax_param(t5x_checkpoint_path):
35
+ flax_params = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
36
+ flax_params = flatten_dict(flax_params)
37
+ return flax_params
38
+
39
+
40
+ def rename_and_convert_flax_params(flax_dict):
41
+ converted_dict = {}
42
+
43
+ CONVERSION_MAPPING = {
44
+ "token_embedder": "embeddings",
45
+ "encoder_norm": "layernorm",
46
+ "kernel": "weight",
47
+ ".out": ".output",
48
+ "scale": "weight",
49
+ "embedders_0.pos_embedding": "row_embedder.weight",
50
+ "embedders_1.pos_embedding": "column_embedder.weight",
51
+ }
52
+
53
+ DECODER_CONVERSION_MAPPING = {
54
+ "query": "attention.query",
55
+ "key": "attention.key",
56
+ "value": "attention.value",
57
+ "output.dense": "output",
58
+ "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
59
+ "pre_self_attention_layer_norm": "self_attention.layer_norm",
60
+ "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
61
+ "mlp.": "mlp.DenseReluDense.",
62
+ "pre_mlp_layer_norm": "mlp.layer_norm",
63
+ "self_attention.o": "self_attention.attention.o",
64
+ "decoder.embeddings.embedding": "decoder.embed_tokens.weight",
65
+ "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
66
+ "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
67
+ "decoder.logits_dense.weight": "decoder.lm_head.weight",
68
+ }
69
+
70
+ for key in flax_dict.keys():
71
+ if "target" in key:
72
+ # remove the first prefix from the key
73
+ new_key = ".".join(key[1:])
74
+
75
+ # rename the key
76
+ for old, new in CONVERSION_MAPPING.items():
77
+ new_key = new_key.replace(old, new)
78
+
79
+ if "decoder" in new_key:
80
+ for old, new in DECODER_CONVERSION_MAPPING.items():
81
+ new_key = new_key.replace(old, new)
82
+
83
+ if "layers" in new_key and "decoder" not in new_key:
84
+ # use regex to replace the layer number
85
+ new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key)
86
+ new_key = new_key.replace("encoder", "encoder.encoder")
87
+
88
+ elif "layers" in new_key and "decoder" in new_key:
89
+ # use regex to replace the layer number
90
+ new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key)
91
+
92
+ converted_dict[new_key] = flax_dict[key]
93
+
94
+ converted_torch_dict = {}
95
+ # convert converted_dict into torch format
96
+ for key in converted_dict.keys():
97
+ if ("embed_tokens" not in key) and ("embedder" not in key):
98
+ converted_torch_dict[key] = torch.from_numpy(converted_dict[key].T)
99
+ else:
100
+ converted_torch_dict[key] = torch.from_numpy(converted_dict[key])
101
+
102
+ return converted_torch_dict
103
+
104
+
105
+ def convert_pix2struct_original_pytorch_checkpoint_to_hf(
106
+ t5x_checkpoint_path, pytorch_dump_folder_path, use_large=False, is_vqa=False
107
+ ):
108
+ flax_params = get_flax_param(t5x_checkpoint_path)
109
+
110
+ if not use_large:
111
+ encoder_config = Pix2StructVisionConfig()
112
+ decoder_config = Pix2StructTextConfig()
113
+ else:
114
+ encoder_config = Pix2StructVisionConfig(
115
+ hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18
116
+ )
117
+ decoder_config = Pix2StructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18)
118
+ config = Pix2StructConfig(
119
+ vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=is_vqa
120
+ )
121
+
122
+ model = Pix2StructForConditionalGeneration(config)
123
+
124
+ torch_params = rename_and_convert_flax_params(flax_params)
125
+ model.load_state_dict(torch_params)
126
+
127
+ tok = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer")
128
+ image_processor = Pix2StructImageProcessor()
129
+ processor = Pix2StructProcessor(image_processor=image_processor, tokenizer=tok)
130
+
131
+ if use_large:
132
+ processor.image_processor.max_patches = 4096
133
+
134
+ processor.image_processor.is_vqa = True
135
+
136
+ # mkdir if needed
137
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
138
+
139
+ model.save_pretrained(pytorch_dump_folder_path)
140
+ processor.save_pretrained(pytorch_dump_folder_path)
141
+
142
+ print("Model saved in {}".format(pytorch_dump_folder_path))
143
+
144
+
145
+ if __name__ == "__main__":
146
+ parser = argparse.ArgumentParser()
147
+ parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
148
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
149
+ parser.add_argument("--use_large", action="store_true", help="Use large model.")
150
+ parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
151
+ args = parser.parse_args()
152
+
153
+ convert_pix2struct_original_pytorch_checkpoint_to_hf(
154
+ args.t5x_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
155
+ )
venv/lib/python3.10/site-packages/transformers/models/pix2struct/image_processing_pix2struct.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Pix2Struct."""
16
+ import io
17
+ import math
18
+ from typing import Dict, Optional, Union
19
+
20
+ import numpy as np
21
+ from huggingface_hub import hf_hub_download
22
+
23
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
24
+ from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
25
+ from ...image_utils import (
26
+ ChannelDimension,
27
+ ImageInput,
28
+ get_image_size,
29
+ infer_channel_dimension_format,
30
+ make_list_of_images,
31
+ to_numpy_array,
32
+ valid_images,
33
+ )
34
+ from ...utils import TensorType, is_torch_available, is_vision_available, logging
35
+ from ...utils.import_utils import requires_backends
36
+
37
+
38
+ if is_vision_available():
39
+ import textwrap
40
+
41
+ from PIL import Image, ImageDraw, ImageFont
42
+
43
+ if is_torch_available():
44
+ import torch
45
+
46
+ logger = logging.get_logger(__name__)
47
+ DEFAULT_FONT_PATH = "ybelkada/fonts"
48
+
49
+
50
+ # adapted from: https://discuss.pytorch.org/t/tf-image-extract-patches-in-pytorch/171409/2
51
+ def torch_extract_patches(image_tensor, patch_height, patch_width):
52
+ """
53
+ Utiliy function to extract patches from a given image tensor. Returns a tensor of shape (1, `patch_height`,
54
+ `patch_width`, `num_channels`x `patch_height` x `patch_width`)
55
+
56
+ Args:
57
+ image_tensor (torch.Tensor):
58
+ The image tensor to extract patches from.
59
+ patch_height (int):
60
+ The height of the patches to extract.
61
+ patch_width (int):
62
+ The width of the patches to extract.
63
+ """
64
+ requires_backends(torch_extract_patches, ["torch"])
65
+
66
+ image_tensor = image_tensor.unsqueeze(0)
67
+ patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width))
68
+ patches = patches.reshape(image_tensor.size(0), image_tensor.size(1), patch_height, patch_width, -1)
69
+ patches = patches.permute(0, 4, 2, 3, 1).reshape(
70
+ image_tensor.size(2) // patch_height,
71
+ image_tensor.size(3) // patch_width,
72
+ image_tensor.size(1) * patch_height * patch_width,
73
+ )
74
+ return patches.unsqueeze(0)
75
+
76
+
77
+ # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L106
78
+ def render_text(
79
+ text: str,
80
+ text_size: int = 36,
81
+ text_color: str = "black",
82
+ background_color: str = "white",
83
+ left_padding: int = 5,
84
+ right_padding: int = 5,
85
+ top_padding: int = 5,
86
+ bottom_padding: int = 5,
87
+ font_bytes: Optional[bytes] = None,
88
+ font_path: Optional[str] = None,
89
+ ) -> Image.Image:
90
+ """
91
+ Render text. This script is entirely adapted from the original script that can be found here:
92
+ https://github.com/google-research/pix2struct/blob/main/pix2struct/preprocessing/preprocessing_utils.py
93
+
94
+ Args:
95
+ text (`str`, *optional*, defaults to ):
96
+ Text to render.
97
+ text_size (`int`, *optional*, defaults to 36):
98
+ Size of the text.
99
+ text_color (`str`, *optional*, defaults to `"black"`):
100
+ Color of the text.
101
+ background_color (`str`, *optional*, defaults to `"white"`):
102
+ Color of the background.
103
+ left_padding (`int`, *optional*, defaults to 5):
104
+ Padding on the left.
105
+ right_padding (`int`, *optional*, defaults to 5):
106
+ Padding on the right.
107
+ top_padding (`int`, *optional*, defaults to 5):
108
+ Padding on the top.
109
+ bottom_padding (`int`, *optional*, defaults to 5):
110
+ Padding on the bottom.
111
+ font_bytes (`bytes`, *optional*):
112
+ Bytes of the font to use. If `None`, the default font will be used.
113
+ font_path (`str`, *optional*):
114
+ Path to the font to use. If `None`, the default font will be used.
115
+ """
116
+ requires_backends(render_text, "vision")
117
+ # Add new lines so that each line is no more than 80 characters.
118
+
119
+ wrapper = textwrap.TextWrapper(width=80)
120
+ lines = wrapper.wrap(text=text)
121
+ wrapped_text = "\n".join(lines)
122
+
123
+ if font_bytes is not None and font_path is None:
124
+ font = io.BytesIO(font_bytes)
125
+ elif font_path is not None:
126
+ font = font_path
127
+ else:
128
+ font = hf_hub_download(DEFAULT_FONT_PATH, "Arial.TTF")
129
+ font = ImageFont.truetype(font, encoding="UTF-8", size=text_size)
130
+
131
+ # Use a temporary canvas to determine the width and height in pixels when
132
+ # rendering the text.
133
+ temp_draw = ImageDraw.Draw(Image.new("RGB", (1, 1), background_color))
134
+ _, _, text_width, text_height = temp_draw.textbbox((0, 0), wrapped_text, font)
135
+
136
+ # Create the actual image with a bit of padding around the text.
137
+ image_width = text_width + left_padding + right_padding
138
+ image_height = text_height + top_padding + bottom_padding
139
+ image = Image.new("RGB", (image_width, image_height), background_color)
140
+ draw = ImageDraw.Draw(image)
141
+ draw.text(xy=(left_padding, top_padding), text=wrapped_text, fill=text_color, font=font)
142
+ return image
143
+
144
+
145
+ # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L87
146
+ def render_header(
147
+ image: np.ndarray, header: str, input_data_format: Optional[Union[str, ChildProcessError]] = None, **kwargs
148
+ ):
149
+ """
150
+ Renders the input text as a header on the input image.
151
+
152
+ Args:
153
+ image (`np.ndarray`):
154
+ The image to render the header on.
155
+ header (`str`):
156
+ The header text.
157
+ data_format (`Union[ChannelDimension, str]`, *optional*):
158
+ The data format of the image. Can be either "ChannelDimension.channels_first" or
159
+ "ChannelDimension.channels_last".
160
+
161
+ Returns:
162
+ `np.ndarray`: The image with the header rendered.
163
+ """
164
+ requires_backends(render_header, "vision")
165
+
166
+ # Convert to PIL image if necessary
167
+ image = to_pil_image(image, input_data_format=input_data_format)
168
+
169
+ header_image = render_text(header, **kwargs)
170
+ new_width = max(header_image.width, image.width)
171
+
172
+ new_height = int(image.height * (new_width / image.width))
173
+ new_header_height = int(header_image.height * (new_width / header_image.width))
174
+
175
+ new_image = Image.new("RGB", (new_width, new_height + new_header_height), "white")
176
+ new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0))
177
+ new_image.paste(image.resize((new_width, new_height)), (0, new_header_height))
178
+
179
+ # Convert back to the original framework if necessary
180
+ new_image = to_numpy_array(new_image)
181
+
182
+ if infer_channel_dimension_format(new_image) == ChannelDimension.LAST:
183
+ new_image = to_channel_dimension_format(new_image, ChannelDimension.LAST)
184
+
185
+ return new_image
186
+
187
+
188
+ class Pix2StructImageProcessor(BaseImageProcessor):
189
+ r"""
190
+ Constructs a Pix2Struct image processor.
191
+
192
+ Args:
193
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
194
+ Whether to convert the image to RGB.
195
+ do_normalize (`bool`, *optional*, defaults to `True`):
196
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
197
+ method. According to Pix2Struct paper and code, the image is normalized with its own mean and standard
198
+ deviation.
199
+ patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 16, "width": 16}`):
200
+ The patch size to use for the image. According to Pix2Struct paper and code, the patch size is 16x16.
201
+ max_patches (`int`, *optional*, defaults to 2048):
202
+ The maximum number of patches to extract from the image as per the [Pix2Struct
203
+ paper](https://arxiv.org/pdf/2210.03347.pdf).
204
+ is_vqa (`bool`, *optional*, defaults to `False`):
205
+ Whether or not the image processor is for the VQA task. If `True` and `header_text` is passed in, text is
206
+ rendered onto the input images.
207
+ """
208
+
209
+ model_input_names = ["flattened_patches"]
210
+
211
+ def __init__(
212
+ self,
213
+ do_convert_rgb: bool = True,
214
+ do_normalize: bool = True,
215
+ patch_size: Dict[str, int] = None,
216
+ max_patches: int = 2048,
217
+ is_vqa: bool = False,
218
+ **kwargs,
219
+ ) -> None:
220
+ super().__init__(**kwargs)
221
+ self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
222
+ self.do_normalize = do_normalize
223
+ self.do_convert_rgb = do_convert_rgb
224
+ self.max_patches = max_patches
225
+ self.is_vqa = is_vqa
226
+
227
+ def extract_flattened_patches(
228
+ self,
229
+ image: np.ndarray,
230
+ max_patches: int,
231
+ patch_size: dict,
232
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
233
+ **kwargs,
234
+ ) -> np.ndarray:
235
+ """
236
+ Extract flattened patches from an image.
237
+
238
+ Args:
239
+ image (`np.ndarray`):
240
+ Image to extract flattened patches from.
241
+ max_patches (`int`):
242
+ Maximum number of patches to extract.
243
+ patch_size (`dict`):
244
+ Dictionary containing the patch height and width.
245
+
246
+ Returns:
247
+ result (`np.ndarray`):
248
+ A sequence of `max_patches` flattened patches.
249
+ """
250
+ requires_backends(self.extract_flattened_patches, "torch")
251
+
252
+ # convert to torch
253
+ image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format)
254
+ image = torch.from_numpy(image)
255
+
256
+ patch_height, patch_width = patch_size["height"], patch_size["width"]
257
+ image_height, image_width = get_image_size(image, ChannelDimension.FIRST)
258
+
259
+ # maximize scale s.t.
260
+ scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
261
+ num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1)
262
+ num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1)
263
+ resized_height = max(num_feasible_rows * patch_height, 1)
264
+ resized_width = max(num_feasible_cols * patch_width, 1)
265
+
266
+ image = torch.nn.functional.interpolate(
267
+ image.unsqueeze(0),
268
+ size=(resized_height, resized_width),
269
+ mode="bilinear",
270
+ align_corners=False,
271
+ antialias=True,
272
+ ).squeeze(0)
273
+
274
+ # [1, rows, columns, patch_height * patch_width * image_channels]
275
+ patches = torch_extract_patches(image, patch_height, patch_width)
276
+
277
+ patches_shape = patches.shape
278
+ rows = patches_shape[1]
279
+ columns = patches_shape[2]
280
+ depth = patches_shape[3]
281
+
282
+ # [rows * columns, patch_height * patch_width * image_channels]
283
+ patches = patches.reshape([rows * columns, depth])
284
+
285
+ # [rows * columns, 1]
286
+ row_ids = torch.arange(rows).reshape([rows, 1]).repeat(1, columns).reshape([rows * columns, 1])
287
+ col_ids = torch.arange(columns).reshape([1, columns]).repeat(rows, 1).reshape([rows * columns, 1])
288
+
289
+ # Offset by 1 so the ids do not contain zeros, which represent padding.
290
+ row_ids += 1
291
+ col_ids += 1
292
+
293
+ # Prepare additional patch features.
294
+ # [rows * columns, 1]
295
+ row_ids = row_ids.to(torch.float32)
296
+ col_ids = col_ids.to(torch.float32)
297
+
298
+ # [rows * columns, 2 + patch_height * patch_width * image_channels]
299
+ result = torch.cat([row_ids, col_ids, patches], -1)
300
+
301
+ # [max_patches, 2 + patch_height * patch_width * image_channels]
302
+ result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - (rows * columns)]).float()
303
+
304
+ result = to_numpy_array(result)
305
+
306
+ return result
307
+
308
+ def normalize(
309
+ self,
310
+ image: np.ndarray,
311
+ data_format: Optional[Union[str, ChannelDimension]] = None,
312
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
313
+ **kwargs,
314
+ ) -> np.ndarray:
315
+ """
316
+ Normalize an image. image = (image - image_mean) / image_std.
317
+
318
+ The image std is to mimic the tensorflow implementation of the `per_image_standardization`:
319
+ https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization
320
+
321
+ Args:
322
+ image (`np.ndarray`):
323
+ Image to normalize.
324
+ data_format (`str` or `ChannelDimension`, *optional*):
325
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
326
+ image is used.
327
+ input_data_format (`str` or `ChannelDimension`, *optional*):
328
+ The channel dimension format of the input image. If not provided, it will be inferred.
329
+ """
330
+ if image.dtype == np.uint8:
331
+ image = image.astype(np.float32)
332
+
333
+ # take mean across the whole `image`
334
+ mean = np.mean(image)
335
+ std = np.std(image)
336
+ adjusted_stddev = max(std, 1.0 / math.sqrt(np.prod(image.shape)))
337
+
338
+ return normalize(
339
+ image,
340
+ mean=mean,
341
+ std=adjusted_stddev,
342
+ data_format=data_format,
343
+ input_data_format=input_data_format,
344
+ **kwargs,
345
+ )
346
+
347
+ def preprocess(
348
+ self,
349
+ images: ImageInput,
350
+ header_text: Optional[str] = None,
351
+ do_convert_rgb: bool = None,
352
+ do_normalize: Optional[bool] = None,
353
+ max_patches: Optional[int] = None,
354
+ patch_size: Optional[Dict[str, int]] = None,
355
+ return_tensors: Optional[Union[str, TensorType]] = None,
356
+ data_format: ChannelDimension = ChannelDimension.FIRST,
357
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
358
+ **kwargs,
359
+ ) -> ImageInput:
360
+ """
361
+ Preprocess an image or batch of images. The processor first computes the maximum possible number of
362
+ aspect-ratio preserving patches of size `patch_size` that can be extracted from the image. It then pads the
363
+ image with zeros to make the image respect the constraint of `max_patches`. Before extracting the patches the
364
+ images are standardized following the tensorflow implementation of `per_image_standardization`
365
+ (https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization).
366
+
367
+
368
+ Args:
369
+ images (`ImageInput`):
370
+ Image to preprocess. Expects a single or batch of images.
371
+ header_text (`Union[List[str], str]`, *optional*):
372
+ Text to render as a header. Only has an effect if `image_processor.is_vqa` is `True`.
373
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
374
+ Whether to convert the image to RGB.
375
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
376
+ Whether to normalize the image.
377
+ max_patches (`int`, *optional*, defaults to `self.max_patches`):
378
+ Maximum number of patches to extract.
379
+ patch_size (`dict`, *optional*, defaults to `self.patch_size`):
380
+ Dictionary containing the patch height and width.
381
+ return_tensors (`str` or `TensorType`, *optional*):
382
+ The type of tensors to return. Can be one of:
383
+ - Unset: Return a list of `np.ndarray`.
384
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
385
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
386
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
387
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
388
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
389
+ The channel dimension format for the output image. Can be one of:
390
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
391
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
392
+ - Unset: Use the channel dimension format of the input image.
393
+ input_data_format (`ChannelDimension` or `str`, *optional*):
394
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
395
+ from the input image. Can be one of:
396
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
397
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
398
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
399
+ """
400
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
401
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
402
+ patch_size = patch_size if patch_size is not None else self.patch_size
403
+ max_patches = max_patches if max_patches is not None else self.max_patches
404
+ is_vqa = self.is_vqa
405
+
406
+ if kwargs.get("data_format", None) is not None:
407
+ raise ValueError("data_format is not an accepted input as the outputs are ")
408
+
409
+ images = make_list_of_images(images)
410
+
411
+ if not valid_images(images):
412
+ raise ValueError(
413
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
414
+ "torch.Tensor, tf.Tensor or jax.ndarray."
415
+ )
416
+
417
+ # PIL RGBA images are converted to RGB
418
+ if do_convert_rgb:
419
+ images = [convert_to_rgb(image) for image in images]
420
+
421
+ # All transformations expect numpy arrays.
422
+ images = [to_numpy_array(image) for image in images]
423
+
424
+ if input_data_format is None:
425
+ # We assume that all images have the same channel dimension format.
426
+ input_data_format = infer_channel_dimension_format(images[0])
427
+
428
+ if is_vqa:
429
+ if header_text is None:
430
+ raise ValueError("A header text must be provided for VQA models.")
431
+ font_bytes = kwargs.pop("font_bytes", None)
432
+ font_path = kwargs.pop("font_path", None)
433
+
434
+ if isinstance(header_text, str):
435
+ header_text = [header_text] * len(images)
436
+
437
+ images = [
438
+ render_header(image, header_text[i], font_bytes=font_bytes, font_path=font_path)
439
+ for i, image in enumerate(images)
440
+ ]
441
+
442
+ if do_normalize:
443
+ images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]
444
+
445
+ # convert to torch tensor and permute
446
+ images = [
447
+ self.extract_flattened_patches(
448
+ image=image, max_patches=max_patches, patch_size=patch_size, input_data_format=input_data_format
449
+ )
450
+ for image in images
451
+ ]
452
+
453
+ # create attention mask in numpy
454
+ attention_masks = [(image.sum(axis=-1) != 0).astype(np.float32) for image in images]
455
+
456
+ encoded_outputs = BatchFeature(
457
+ data={"flattened_patches": images, "attention_mask": attention_masks}, tensor_type=return_tensors
458
+ )
459
+
460
+ return encoded_outputs
venv/lib/python3.10/site-packages/transformers/models/pix2struct/modeling_pix2struct.py ADDED
@@ -0,0 +1,1786 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. & Google team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Pix2Struct modeling file"""
16
+
17
+ import math
18
+ from typing import Dict, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutput,
27
+ BaseModelOutputWithPooling,
28
+ CausalLMOutputWithCrossAttentions,
29
+ Seq2SeqLMOutput,
30
+ Seq2SeqModelOutput,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...pytorch_utils import ALL_LAYERNORM_LAYERS
34
+ from ...utils import (
35
+ DUMMY_INPUTS,
36
+ DUMMY_MASK,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ is_torch_fx_proxy,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_pix2struct import Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ # General docstring
49
+ _CONFIG_FOR_DOC = "Pix2StructConfig"
50
+
51
+
52
+ from ..deprecated._archive_maps import PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ # Adapted from transformers.models.t5.modeling_t5.T5LayerNorm with T5->Pix2Struct
56
+ class Pix2StructLayerNorm(nn.Module):
57
+ def __init__(self, hidden_size, eps=1e-6):
58
+ """
59
+ Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
60
+ """
61
+ super().__init__()
62
+ self.weight = nn.Parameter(torch.ones(hidden_size))
63
+ self.variance_epsilon = eps
64
+
65
+ def forward(self, hidden_states):
66
+ # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
67
+ # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
68
+ # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
69
+ # half-precision inputs is done in fp32
70
+
71
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
72
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
73
+
74
+ # convert into half-precision if necessary
75
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
76
+ hidden_states = hidden_states.to(self.weight.dtype)
77
+
78
+ return self.weight * hidden_states
79
+
80
+
81
+ try:
82
+ from apex.normalization import FusedRMSNorm
83
+
84
+ Pix2StructLayerNorm = FusedRMSNorm # noqa
85
+
86
+ logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of Pix2StructLayerNorm")
87
+ except ImportError:
88
+ # using the normal Pix2StructLayerNorm
89
+ pass
90
+ except Exception:
91
+ logger.warning("Discovered apex but it failed to load, falling back to Pix2StructLayerNorm")
92
+ pass
93
+
94
+ ALL_LAYERNORM_LAYERS.append(Pix2StructLayerNorm)
95
+
96
+
97
+ class Pix2StructVisionEmbeddings(nn.Module):
98
+ r"""
99
+ Construct the embeddings from patch. In `Pix2Struct` the input is different from classic Vision-transformer models.
100
+ Here the input is a sequence of `seq_len` flattened patches that also combines padding patches (tokens). Each patch
101
+ is represented by a vector of `hidden_size` values.
102
+ """
103
+
104
+ def __init__(self, config: Pix2StructConfig) -> None:
105
+ super().__init__()
106
+ self.patch_projection = nn.Linear(config.patch_embed_hidden_size, config.hidden_size)
107
+
108
+ self.row_embedder = nn.Embedding(config.seq_len, config.hidden_size)
109
+ self.column_embedder = nn.Embedding(config.seq_len, config.hidden_size)
110
+
111
+ self.dropout = nn.Dropout(config.dropout_rate)
112
+
113
+ def forward(self, flattened_patches: torch.Tensor) -> torch.Tensor:
114
+ # the row and column indices are stored in the first and second position of the flattened_patches
115
+ # flattened_patches: `batch_size`, `seq_len`, `hidden_size` + 2
116
+ row_indices = flattened_patches[:, :, 0].long()
117
+ col_indices = flattened_patches[:, :, 1].long()
118
+
119
+ flattened_patches = flattened_patches[:, :, 2:]
120
+
121
+ embeddings = self.patch_projection(flattened_patches)
122
+ row_embeddings = self.row_embedder(row_indices)
123
+ col_embeddings = self.column_embedder(col_indices)
124
+
125
+ # sum all embeddings together
126
+ embeddings = embeddings + row_embeddings + col_embeddings
127
+
128
+ embeddings = self.dropout(embeddings)
129
+
130
+ return embeddings
131
+
132
+
133
+ class Pix2StructVisionAttention(nn.Module):
134
+ def __init__(self, config):
135
+ super().__init__()
136
+ self.hidden_size = config.hidden_size
137
+ self.key_value_proj_dim = config.d_kv
138
+ self.n_heads = config.num_attention_heads
139
+ self.dropout = config.attention_dropout
140
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
141
+
142
+ # Mesh TensorFlow initialization to avoid scaling before softmax
143
+ self.query = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
144
+ self.key = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
145
+ self.value = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
146
+ self.output = nn.Linear(self.inner_dim, self.hidden_size, bias=False)
147
+
148
+ self.gradient_checkpointing = False
149
+
150
+ def forward(
151
+ self,
152
+ hidden_states,
153
+ attention_mask=None,
154
+ position_bias=None,
155
+ layer_head_mask=None,
156
+ output_attentions=False,
157
+ ):
158
+ """
159
+ Self-attention block
160
+ """
161
+ # Input is (batch_size, seq_length, dim)
162
+ # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
163
+ # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
164
+ batch_size, seq_length = hidden_states.shape[:2]
165
+
166
+ def to_projection_shape(states):
167
+ """projection"""
168
+ return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
169
+
170
+ # get query states
171
+ # (batch_size, n_heads, seq_length, dim_per_head)
172
+ query_states = to_projection_shape(self.query(hidden_states))
173
+
174
+ # get key/value states
175
+ key_states = to_projection_shape(self.key(hidden_states))
176
+ value_states = to_projection_shape(self.value(hidden_states))
177
+
178
+ # compute scores
179
+ # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
180
+ scores = torch.matmul(query_states, key_states.transpose(3, 2))
181
+
182
+ if position_bias is None:
183
+ position_bias = torch.zeros(
184
+ (1, self.n_heads, seq_length, seq_length), device=scores.device, dtype=scores.dtype
185
+ )
186
+ if self.gradient_checkpointing and self.training:
187
+ position_bias.requires_grad = True
188
+
189
+ if attention_mask is None:
190
+ attention_mask = torch.ones((batch_size, seq_length), device=scores.device, dtype=scores.dtype)
191
+
192
+ if attention_mask.dim() == 2:
193
+ position_bias = position_bias + attention_mask[:, None, None, :].to(position_bias.device)
194
+ else:
195
+ # (batch_size, n_heads, seq_length, key_length)
196
+ position_bias = position_bias + attention_mask.to(position_bias.device)
197
+ position_bias = 1 - position_bias
198
+
199
+ position_bias_masked = position_bias.masked_fill(position_bias == 1, torch.finfo(scores.dtype).min)
200
+ scores += position_bias_masked
201
+ scores = torch.max(scores, torch.tensor(torch.finfo(scores.dtype).min))
202
+
203
+ # (batch_size, n_heads, seq_length, key_length)
204
+ attn_weights = nn.functional.softmax(scores, dim=-1, dtype=torch.float32).type_as(scores)
205
+
206
+ # (batch_size, n_heads, seq_length, key_length)
207
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
208
+
209
+ # Mask heads if we want to
210
+ if layer_head_mask is not None:
211
+ attn_weights = attn_weights * layer_head_mask
212
+
213
+ attn_output = torch.matmul(attn_weights, value_states)
214
+
215
+ # (batch_size, seq_length, dim)
216
+ attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
217
+
218
+ attn_output = self.output(attn_output)
219
+
220
+ outputs = (attn_output,) + (position_bias,)
221
+
222
+ if output_attentions:
223
+ outputs = outputs + (attn_weights,)
224
+ return outputs
225
+
226
+
227
+ # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5DenseGatedActDense->Pix2StructVisionMlp,T5Config->Pix2StructVisionConfig,config.d_model->config.hidden_size,dropout_rate->dropout_rate
228
+ class Pix2StructVisionMlp(nn.Module):
229
+ def __init__(self, config: Pix2StructVisionConfig):
230
+ super().__init__()
231
+ self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
232
+ self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
233
+ self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False)
234
+ self.dropout = nn.Dropout(config.dropout_rate)
235
+ self.act = ACT2FN[config.dense_act_fn]
236
+
237
+ def forward(self, hidden_states):
238
+ hidden_gelu = self.act(self.wi_0(hidden_states))
239
+ hidden_linear = self.wi_1(hidden_states)
240
+ hidden_states = hidden_gelu * hidden_linear
241
+ hidden_states = self.dropout(hidden_states)
242
+
243
+ # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
244
+ # See https://github.com/huggingface/transformers/issues/20287
245
+ # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
246
+ if (
247
+ isinstance(self.wo.weight, torch.Tensor)
248
+ and hidden_states.dtype != self.wo.weight.dtype
249
+ and self.wo.weight.dtype != torch.int8
250
+ ):
251
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
252
+
253
+ hidden_states = self.wo(hidden_states)
254
+ return hidden_states
255
+
256
+
257
+ class Pix2StructVisionLayer(nn.Module):
258
+ def __init__(self, config: Pix2StructConfig) -> None:
259
+ super().__init__()
260
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
261
+ self.seq_len_dim = 1
262
+ self.attention = Pix2StructVisionAttention(config)
263
+ self.mlp = Pix2StructVisionMlp(config)
264
+ self.pre_mlp_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
265
+ self.pre_attention_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
266
+
267
+ def forward(
268
+ self,
269
+ hidden_states: torch.Tensor,
270
+ attention_mask: Optional[torch.Tensor] = None,
271
+ head_mask: Optional[torch.Tensor] = None,
272
+ output_attentions: bool = False,
273
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
274
+ residual = hidden_states
275
+
276
+ # in Pix2StructVision, layernorm is applied before self-attention
277
+ hidden_states = self.pre_attention_layer_norm(hidden_states)
278
+
279
+ self_attention_outputs = self.attention(
280
+ hidden_states,
281
+ attention_mask=attention_mask,
282
+ layer_head_mask=head_mask,
283
+ output_attentions=output_attentions,
284
+ )
285
+ attention_output = self_attention_outputs[0]
286
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
287
+
288
+ # first residual connection
289
+ hidden_states = attention_output + residual
290
+
291
+ # in Pix2StructVision, layernorm is also applied after self-attention
292
+ layer_output = self.pre_mlp_layer_norm(hidden_states)
293
+ layer_output = self.mlp(layer_output) + hidden_states # second residual connection
294
+
295
+ outputs = (layer_output,) + outputs
296
+
297
+ return outputs
298
+
299
+
300
+ class Pix2StructVisionEncoder(nn.Module):
301
+ def __init__(self, config: Pix2StructConfig) -> None:
302
+ super().__init__()
303
+ self.config = config
304
+ self.layer = nn.ModuleList([Pix2StructVisionLayer(config) for _ in range(config.num_hidden_layers)])
305
+ self.gradient_checkpointing = False
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states: torch.Tensor,
310
+ attention_mask: Optional[torch.Tensor] = None,
311
+ head_mask: Optional[torch.Tensor] = None,
312
+ output_attentions: bool = False,
313
+ output_hidden_states: bool = False,
314
+ return_dict: bool = True,
315
+ ) -> Union[tuple, BaseModelOutput]:
316
+ all_hidden_states = () if output_hidden_states else None
317
+ all_self_attentions = () if output_attentions else None
318
+
319
+ for i, layer_module in enumerate(self.layer):
320
+ if output_hidden_states:
321
+ all_hidden_states = all_hidden_states + (hidden_states,)
322
+
323
+ layer_head_mask = head_mask[i] if head_mask is not None else None
324
+
325
+ if self.gradient_checkpointing and self.training:
326
+ layer_outputs = self._gradient_checkpointing_func(
327
+ layer_module.__call__,
328
+ hidden_states,
329
+ attention_mask,
330
+ layer_head_mask,
331
+ output_attentions,
332
+ )
333
+ else:
334
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
335
+
336
+ hidden_states = layer_outputs[0]
337
+
338
+ if output_attentions:
339
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
340
+
341
+ if output_hidden_states:
342
+ all_hidden_states = all_hidden_states + (hidden_states,)
343
+
344
+ if not return_dict:
345
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
346
+ return BaseModelOutput(
347
+ last_hidden_state=hidden_states,
348
+ hidden_states=all_hidden_states,
349
+ attentions=all_self_attentions,
350
+ )
351
+
352
+
353
+ class Pix2StructPreTrainedModel(PreTrainedModel):
354
+ """
355
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
356
+ models.
357
+ """
358
+
359
+ config_class = Pix2StructConfig
360
+
361
+ @property
362
+ def dummy_inputs(self):
363
+ input_ids = torch.tensor(DUMMY_INPUTS)
364
+ input_mask = torch.tensor(DUMMY_MASK)
365
+ dummy_inputs = {
366
+ "decoder_input_ids": input_ids,
367
+ "input_ids": input_ids,
368
+ "decoder_attention_mask": input_mask,
369
+ }
370
+ return dummy_inputs
371
+
372
+ def _init_weights(self, module):
373
+ """Initialize the weights"""
374
+ factor = self.config.initializer_factor # Used for testing weights initialization
375
+ if isinstance(module, Pix2StructLayerNorm):
376
+ module.weight.data.fill_(factor * 1.0)
377
+ elif isinstance(module, Pix2StructTextDenseGatedActDense):
378
+ hidden_size = (
379
+ self.config.text_config.hidden_size
380
+ if isinstance(self.config, Pix2StructConfig)
381
+ else self.config.hidden_size
382
+ )
383
+ d_ff = self.config.text_config.d_ff if isinstance(self.config, Pix2StructConfig) else self.config.d_ff
384
+
385
+ module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
386
+ if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
387
+ module.wi_0.bias.data.zero_()
388
+ module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
389
+ if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
390
+ module.wi_1.bias.data.zero_()
391
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((d_ff) ** -0.5))
392
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
393
+ module.wo.bias.data.zero_()
394
+ elif isinstance(module, Pix2StructTextAttention):
395
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
396
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
397
+ hidden_size = (
398
+ self.config.text_config.hidden_size
399
+ if isinstance(self.config, Pix2StructConfig)
400
+ else self.config.hidden_size
401
+ )
402
+ key_value_proj_dim = (
403
+ self.config.text_config.d_kv if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size
404
+ )
405
+ n_heads = (
406
+ self.config.text_config.num_heads
407
+ if isinstance(self.config, Pix2StructConfig)
408
+ else self.config.num_heads
409
+ )
410
+
411
+ module.query.weight.data.normal_(mean=0.0, std=factor * ((hidden_size * key_value_proj_dim) ** -0.5))
412
+ module.key.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5))
413
+ module.value.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5))
414
+ module.output.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
415
+ if module.has_relative_attention_bias:
416
+ module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
417
+ elif isinstance(module, nn.Embedding):
418
+ hidden_size = (
419
+ self.config.text_config.hidden_size
420
+ if isinstance(self.config, Pix2StructConfig)
421
+ else self.config.hidden_size
422
+ )
423
+
424
+ module.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
425
+ if module.padding_idx is not None:
426
+ module.weight.data[module.padding_idx].zero_()
427
+ elif isinstance(module, Pix2StructTextModel):
428
+ hidden_size = (
429
+ self.config.text_config.hidden_size
430
+ if isinstance(self.config, Pix2StructConfig)
431
+ else self.config.hidden_size
432
+ )
433
+
434
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
435
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
436
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
437
+ # `trunc_normal_cpu` not implemented in `half` issues
438
+ module.weight.data = nn.init.trunc_normal_(
439
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
440
+ ).to(module.weight.dtype)
441
+ if module.bias is not None:
442
+ module.bias.data.zero_()
443
+ elif isinstance(module, Pix2StructLayerNorm):
444
+ if module.weight is not None:
445
+ module.weight.data.fill_(1.0)
446
+ elif isinstance(module, nn.Embedding):
447
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
448
+ if module.padding_idx is not None:
449
+ module.weight.data[module.padding_idx].zero_()
450
+
451
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->Pix2Struct
452
+ def _shift_right(self, input_ids):
453
+ decoder_start_token_id = self.config.decoder_start_token_id
454
+ pad_token_id = self.config.pad_token_id
455
+
456
+ if decoder_start_token_id is None:
457
+ raise ValueError(
458
+ "self.model.config.decoder_start_token_id has to be defined. In Pix2Struct it is usually set to the pad_token_id. "
459
+ "See Pix2Struct docs for more information."
460
+ )
461
+
462
+ # shift inputs to the right
463
+ if is_torch_fx_proxy(input_ids):
464
+ # Item assignment is not supported natively for proxies.
465
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
466
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
467
+ else:
468
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
469
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
470
+ shifted_input_ids[..., 0] = decoder_start_token_id
471
+
472
+ if pad_token_id is None:
473
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
474
+ # replace possible -100 values in labels by `pad_token_id`
475
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
476
+
477
+ return shifted_input_ids
478
+
479
+
480
+ PIX2STRUCT_VISION_START_DOCSTRING = r"""
481
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
482
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
483
+ behavior.
484
+
485
+ Parameters:
486
+ config ([`Pix2StructConfig`]): Model configuration class with all the parameters of the model.
487
+ Initializing with a config file does not load the weights associated with the model, only the
488
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
489
+ """
490
+
491
+ PIX2STRUCT_VISION_INPUTS_DOCSTRING = r"""
492
+ Args:
493
+ flattened_patches (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_channels x patch_height x patch_width)`):
494
+ Flattened and padded pixel values. These values can be obtained using [`AutoImageProcessor`]. See
495
+ [`Pix2StructVisionImageProcessor.__call__`] for details. Check the [original
496
+ paper](https://arxiv.org/abs/2210.03347) (figure 5) for more details.
497
+
498
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
499
+ Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
500
+
501
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
502
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
503
+
504
+ - 1 indicates the head is **not masked**,
505
+ - 0 indicates the head is **masked**.
506
+
507
+ output_attentions (`bool`, *optional*):
508
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
509
+ tensors for more detail.
510
+ output_hidden_states (`bool`, *optional*):
511
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
512
+ more detail.
513
+ return_dict (`bool`, *optional*):
514
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
515
+ """
516
+
517
+
518
+ @add_start_docstrings(
519
+ "The bare Pix2StructVision Model transformer outputting raw hidden-states without any specific head on top.",
520
+ PIX2STRUCT_VISION_START_DOCSTRING,
521
+ )
522
+ class Pix2StructVisionModel(Pix2StructPreTrainedModel):
523
+ config_class = Pix2StructVisionConfig
524
+ main_input_name = "flattened_patches"
525
+ supports_gradient_checkpointing = True
526
+ _no_split_modules = ["Pix2StructVisionLayer"]
527
+
528
+ def __init__(self, config: Pix2StructConfig):
529
+ super().__init__(config)
530
+ self.config = config
531
+
532
+ self.embeddings = Pix2StructVisionEmbeddings(config)
533
+ self.encoder = Pix2StructVisionEncoder(config)
534
+
535
+ self.layernorm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
536
+
537
+ # Initialize weights and apply final processing
538
+ self.post_init()
539
+
540
+ def get_input_embeddings(self):
541
+ return self.embeddings.patch_projection
542
+
543
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
544
+ """
545
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
546
+ class PreTrainedModel
547
+ """
548
+ for layer, heads in heads_to_prune.items():
549
+ self.encoder.layer[layer].attention.prune_heads(heads)
550
+
551
+ @add_start_docstrings_to_model_forward(PIX2STRUCT_VISION_INPUTS_DOCSTRING)
552
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
553
+ def forward(
554
+ self,
555
+ flattened_patches: Optional[torch.Tensor] = None,
556
+ attention_mask: Optional[torch.Tensor] = None,
557
+ head_mask: Optional[torch.Tensor] = None,
558
+ output_attentions: Optional[bool] = None,
559
+ output_hidden_states: Optional[bool] = None,
560
+ return_dict: Optional[bool] = None,
561
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
562
+ r"""
563
+ Returns:
564
+
565
+ Example:
566
+
567
+ ```python
568
+ >>> import requests
569
+ >>> from PIL import Image
570
+ >>> from transformers import AutoProcessor, Pix2StructVisionModel
571
+
572
+ >>> image_processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
573
+ >>> model = Pix2StructVisionModel.from_pretrained("google/pix2struct-textcaps-base")
574
+
575
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
576
+ >>> image = Image.open(requests.get(url, stream=True).raw)
577
+
578
+ >>> inputs = image_processor(images=image, return_tensors="pt")
579
+ >>> with torch.no_grad():
580
+ ... outputs = model(**inputs)
581
+
582
+ >>> last_hidden_states = outputs.last_hidden_state
583
+ >>> list(last_hidden_states.shape)
584
+ [1, 2048, 768]
585
+ ```
586
+ """
587
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
588
+ output_hidden_states = (
589
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
590
+ )
591
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
592
+
593
+ if flattened_patches is None:
594
+ raise ValueError("You have to specify flattened_patches")
595
+
596
+ if attention_mask is None:
597
+ # check where `flattened_patches` is not 0
598
+ attention_mask = (flattened_patches.sum(dim=-1) != 0).float()
599
+
600
+ # Prepare head mask if needed
601
+ # 1.0 in head_mask indicate we keep the head
602
+ # attention_probs has shape bsz x n_heads x N x N
603
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
604
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
605
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
606
+
607
+ embedding_output = self.embeddings(flattened_patches)
608
+
609
+ encoder_outputs = self.encoder(
610
+ embedding_output,
611
+ attention_mask=attention_mask,
612
+ head_mask=head_mask,
613
+ output_attentions=output_attentions,
614
+ output_hidden_states=output_hidden_states,
615
+ return_dict=return_dict,
616
+ )
617
+ sequence_output = encoder_outputs[0]
618
+ sequence_output = self.layernorm(sequence_output)
619
+
620
+ if not return_dict:
621
+ head_outputs = (sequence_output,)
622
+ return head_outputs + encoder_outputs[1:]
623
+
624
+ return BaseModelOutput(
625
+ last_hidden_state=sequence_output,
626
+ hidden_states=encoder_outputs.hidden_states,
627
+ attentions=encoder_outputs.attentions,
628
+ )
629
+
630
+
631
+ # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->Pix2StructText,d_model->hidden_size
632
+ class Pix2StructTextDenseGatedActDense(nn.Module):
633
+ def __init__(self, config: Pix2StructTextConfig):
634
+ super().__init__()
635
+ self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
636
+ self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
637
+ self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False)
638
+ self.dropout = nn.Dropout(config.dropout_rate)
639
+ self.act = ACT2FN[config.dense_act_fn]
640
+
641
+ def forward(self, hidden_states):
642
+ hidden_gelu = self.act(self.wi_0(hidden_states))
643
+ hidden_linear = self.wi_1(hidden_states)
644
+ hidden_states = hidden_gelu * hidden_linear
645
+ hidden_states = self.dropout(hidden_states)
646
+
647
+ # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
648
+ # See https://github.com/huggingface/transformers/issues/20287
649
+ # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
650
+ if (
651
+ isinstance(self.wo.weight, torch.Tensor)
652
+ and hidden_states.dtype != self.wo.weight.dtype
653
+ and self.wo.weight.dtype != torch.int8
654
+ ):
655
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
656
+
657
+ hidden_states = self.wo(hidden_states)
658
+ return hidden_states
659
+
660
+
661
+ class Pix2StructTextLayerFF(nn.Module):
662
+ def __init__(self, config: Pix2StructTextConfig):
663
+ super().__init__()
664
+ self.DenseReluDense = Pix2StructTextDenseGatedActDense(config)
665
+
666
+ self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
667
+ self.dropout = nn.Dropout(config.dropout_rate)
668
+
669
+ # Copied from transformers.models.t5.modeling_t5.T5LayerFF.forward
670
+ def forward(self, hidden_states):
671
+ forwarded_states = self.layer_norm(hidden_states)
672
+ forwarded_states = self.DenseReluDense(forwarded_states)
673
+ hidden_states = hidden_states + self.dropout(forwarded_states)
674
+ return hidden_states
675
+
676
+
677
+ class Pix2StructTextAttention(nn.Module):
678
+ def __init__(self, config: Pix2StructTextConfig, has_relative_attention_bias=False):
679
+ super().__init__()
680
+ self.has_relative_attention_bias = has_relative_attention_bias
681
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
682
+ self.relative_attention_max_distance = config.relative_attention_max_distance
683
+ self.hidden_size = config.hidden_size
684
+ self.key_value_proj_dim = config.d_kv
685
+ self.n_heads = config.num_heads
686
+ self.dropout = config.dropout_rate
687
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
688
+
689
+ # Mesh TensorFlow initialization to avoid scaling before softmax
690
+ self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
691
+ self.key = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
692
+ self.value = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
693
+ self.output = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
694
+
695
+ if self.has_relative_attention_bias:
696
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
697
+ self.pruned_heads = set()
698
+ self.gradient_checkpointing = False
699
+
700
+ @staticmethod
701
+ # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
702
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
703
+ """
704
+ Adapted from Mesh Tensorflow:
705
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
706
+
707
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
708
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
709
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
710
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
711
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
712
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
713
+
714
+ Args:
715
+ relative_position: an int32 Tensor
716
+ bidirectional: a boolean - whether the attention is bidirectional
717
+ num_buckets: an integer
718
+ max_distance: an integer
719
+
720
+ Returns:
721
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
722
+ """
723
+ relative_buckets = 0
724
+ if bidirectional:
725
+ num_buckets //= 2
726
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
727
+ relative_position = torch.abs(relative_position)
728
+ else:
729
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
730
+ # now relative_position is in the range [0, inf)
731
+
732
+ # half of the buckets are for exact increments in positions
733
+ max_exact = num_buckets // 2
734
+ is_small = relative_position < max_exact
735
+
736
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
737
+ relative_position_if_large = max_exact + (
738
+ torch.log(relative_position.float() / max_exact)
739
+ / math.log(max_distance / max_exact)
740
+ * (num_buckets - max_exact)
741
+ ).to(torch.long)
742
+ relative_position_if_large = torch.min(
743
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
744
+ )
745
+
746
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
747
+ return relative_buckets
748
+
749
+ # Adapted from transformers.models.t5.modeling_t5.T5Attention.compute_bias
750
+ def compute_bias(self, query_length, key_length, device=None):
751
+ """Compute binned relative position bias"""
752
+ if device is None:
753
+ device = self.relative_attention_bias.weight.device
754
+ context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
755
+ memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
756
+ relative_position = memory_position - context_position # shape (query_length, key_length)
757
+ relative_position_bucket = self._relative_position_bucket(
758
+ relative_position, # shape (query_length, key_length)
759
+ bidirectional=False,
760
+ num_buckets=self.relative_attention_num_buckets,
761
+ max_distance=self.relative_attention_max_distance,
762
+ )
763
+ values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
764
+ values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
765
+ return values
766
+
767
+ def forward(
768
+ self,
769
+ hidden_states,
770
+ mask=None,
771
+ key_value_states=None,
772
+ position_bias=None,
773
+ past_key_value=None,
774
+ layer_head_mask=None,
775
+ query_length=None,
776
+ use_cache=False,
777
+ output_attentions=False,
778
+ ):
779
+ """
780
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
781
+ """
782
+ # Input is (batch_size, seq_length, dim)
783
+ # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
784
+ # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
785
+ batch_size, seq_length = hidden_states.shape[:2]
786
+
787
+ real_seq_length = seq_length
788
+
789
+ if past_key_value is not None:
790
+ if len(past_key_value) != 2:
791
+ raise ValueError(
792
+ f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
793
+ )
794
+ real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
795
+
796
+ key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
797
+
798
+ def to_projection_shape(states):
799
+ """projection"""
800
+ return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
801
+
802
+ def project(hidden_states, proj_layer, key_value_states, past_key_value):
803
+ """projects hidden states correctly to key/query states"""
804
+ if key_value_states is None:
805
+ # self-attn
806
+ # (batch_size, n_heads, seq_length, dim_per_head)
807
+ hidden_states = to_projection_shape(proj_layer(hidden_states))
808
+ elif past_key_value is None:
809
+ # cross-attn
810
+ # (batch_size, n_heads, seq_length, dim_per_head)
811
+ hidden_states = to_projection_shape(proj_layer(key_value_states))
812
+
813
+ if past_key_value is not None:
814
+ if key_value_states is None:
815
+ # self-attn
816
+ # (batch_size, n_heads, key_length, dim_per_head)
817
+ hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
818
+ elif past_key_value.shape[2] != key_value_states.shape[1]:
819
+ # checking that the `sequence_length` of the `past_key_value` is the same as
820
+ # the provided `key_value_states` to support prefix tuning
821
+ # cross-attn
822
+ # (batch_size, n_heads, seq_length, dim_per_head)
823
+ hidden_states = to_projection_shape(proj_layer(key_value_states))
824
+ else:
825
+ # cross-attn
826
+ hidden_states = past_key_value
827
+ return hidden_states
828
+
829
+ # get query states
830
+ # (batch_size, n_heads, seq_length, dim_per_head)
831
+ query_states = to_projection_shape(self.query(hidden_states))
832
+
833
+ # get key/value states
834
+ key_states = project(
835
+ hidden_states, self.key, key_value_states, past_key_value[0] if past_key_value is not None else None
836
+ )
837
+ value_states = project(
838
+ hidden_states, self.value, key_value_states, past_key_value[1] if past_key_value is not None else None
839
+ )
840
+
841
+ # compute scores
842
+ scores = torch.matmul(
843
+ query_states, key_states.transpose(3, 2)
844
+ ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
845
+
846
+ if position_bias is None:
847
+ if not self.has_relative_attention_bias:
848
+ position_bias = torch.zeros(
849
+ (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
850
+ )
851
+ if self.gradient_checkpointing and self.training:
852
+ position_bias.requires_grad = True
853
+ else:
854
+ position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
855
+
856
+ # if key and values are already calculated
857
+ # we want only the last query position bias
858
+ if past_key_value is not None:
859
+ position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
860
+
861
+ if mask is not None:
862
+ position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
863
+
864
+ if self.pruned_heads:
865
+ mask = torch.ones(position_bias.shape[1])
866
+ mask[list(self.pruned_heads)] = 0
867
+ position_bias_masked = position_bias[:, mask.bool()]
868
+ else:
869
+ position_bias_masked = position_bias
870
+
871
+ scores += position_bias_masked
872
+ # (batch_size, n_heads, seq_length, key_length)
873
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
874
+
875
+ # (batch_size, n_heads, seq_length, key_length)
876
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
877
+
878
+ # Mask heads if we want to
879
+ if layer_head_mask is not None:
880
+ attn_weights = attn_weights * layer_head_mask
881
+
882
+ attn_output = torch.matmul(attn_weights, value_states)
883
+ # (batch_size, seq_length, dim)
884
+ attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
885
+
886
+ attn_output = self.output(attn_output)
887
+
888
+ present_key_value_state = (key_states, value_states) if use_cache else None
889
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
890
+
891
+ if output_attentions:
892
+ outputs = outputs + (attn_weights,)
893
+ return outputs
894
+
895
+
896
+ # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.SelfAttention->self.attention,config.d_model->config.hidden_size
897
+ class Pix2StructTextLayerSelfAttention(nn.Module):
898
+ def __init__(self, config, has_relative_attention_bias=False):
899
+ super().__init__()
900
+ self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=has_relative_attention_bias)
901
+ self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
902
+ self.dropout = nn.Dropout(config.dropout_rate)
903
+
904
+ def forward(
905
+ self,
906
+ hidden_states,
907
+ attention_mask=None,
908
+ position_bias=None,
909
+ layer_head_mask=None,
910
+ past_key_value=None,
911
+ use_cache=False,
912
+ output_attentions=False,
913
+ ):
914
+ normed_hidden_states = self.layer_norm(hidden_states)
915
+ attention_output = self.attention(
916
+ normed_hidden_states,
917
+ mask=attention_mask,
918
+ position_bias=position_bias,
919
+ layer_head_mask=layer_head_mask,
920
+ past_key_value=past_key_value,
921
+ use_cache=use_cache,
922
+ output_attentions=output_attentions,
923
+ )
924
+ hidden_states = hidden_states + self.dropout(attention_output[0])
925
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
926
+ return outputs
927
+
928
+
929
+ # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.EncDecAttention->self.attention,config.d_model->config.hidden_size
930
+ class Pix2StructTextLayerCrossAttention(nn.Module):
931
+ def __init__(self, config):
932
+ super().__init__()
933
+ self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=False)
934
+ self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
935
+ self.dropout = nn.Dropout(config.dropout_rate)
936
+
937
+ def forward(
938
+ self,
939
+ hidden_states,
940
+ key_value_states,
941
+ attention_mask=None,
942
+ position_bias=None,
943
+ layer_head_mask=None,
944
+ past_key_value=None,
945
+ use_cache=False,
946
+ query_length=None,
947
+ output_attentions=False,
948
+ ):
949
+ normed_hidden_states = self.layer_norm(hidden_states)
950
+ attention_output = self.attention(
951
+ normed_hidden_states,
952
+ mask=attention_mask,
953
+ key_value_states=key_value_states,
954
+ position_bias=position_bias,
955
+ layer_head_mask=layer_head_mask,
956
+ past_key_value=past_key_value,
957
+ use_cache=use_cache,
958
+ query_length=query_length,
959
+ output_attentions=output_attentions,
960
+ )
961
+ layer_output = hidden_states + self.dropout(attention_output[0])
962
+ outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
963
+ return outputs
964
+
965
+
966
+ class Pix2StructTextBlock(nn.Module):
967
+ def __init__(self, config, has_relative_attention_bias=False):
968
+ super().__init__()
969
+
970
+ self.self_attention = Pix2StructTextLayerSelfAttention(
971
+ config, has_relative_attention_bias=has_relative_attention_bias
972
+ )
973
+
974
+ self.encoder_decoder_attention = Pix2StructTextLayerCrossAttention(config)
975
+
976
+ self.mlp = Pix2StructTextLayerFF(config)
977
+
978
+ def forward(
979
+ self,
980
+ hidden_states,
981
+ attention_mask=None,
982
+ position_bias=None,
983
+ encoder_hidden_states=None,
984
+ encoder_attention_mask=None,
985
+ encoder_decoder_position_bias=None,
986
+ layer_head_mask=None,
987
+ cross_attn_layer_head_mask=None,
988
+ past_key_value=None,
989
+ use_cache=False,
990
+ output_attentions=False,
991
+ return_dict=True,
992
+ ):
993
+ if past_key_value is not None:
994
+ expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
995
+
996
+ if len(past_key_value) != expected_num_past_key_values:
997
+ raise ValueError(
998
+ f"There should be {expected_num_past_key_values} past states. "
999
+ f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
1000
+ f"Got {len(past_key_value)} past key / value states"
1001
+ )
1002
+
1003
+ self_attn_past_key_value = past_key_value[:2]
1004
+ cross_attn_past_key_value = past_key_value[2:]
1005
+ else:
1006
+ self_attn_past_key_value, cross_attn_past_key_value = None, None
1007
+
1008
+ self_attention_outputs = self.self_attention(
1009
+ hidden_states,
1010
+ attention_mask=attention_mask,
1011
+ position_bias=position_bias,
1012
+ layer_head_mask=layer_head_mask,
1013
+ past_key_value=self_attn_past_key_value,
1014
+ use_cache=use_cache,
1015
+ output_attentions=output_attentions,
1016
+ )
1017
+ hidden_states, present_key_value_state = self_attention_outputs[:2]
1018
+ attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
1019
+
1020
+ # clamp inf values to enable fp16 training
1021
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
1022
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
1023
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
1024
+
1025
+ do_cross_attention = encoder_hidden_states is not None
1026
+ if do_cross_attention:
1027
+ # the actual query length is unknown for cross attention
1028
+ # if using past key value states. Need to inject it here
1029
+ if present_key_value_state is not None:
1030
+ query_length = present_key_value_state[0].shape[2]
1031
+ else:
1032
+ query_length = None
1033
+
1034
+ cross_attention_outputs = self.encoder_decoder_attention(
1035
+ hidden_states,
1036
+ key_value_states=encoder_hidden_states,
1037
+ attention_mask=encoder_attention_mask,
1038
+ position_bias=encoder_decoder_position_bias,
1039
+ layer_head_mask=cross_attn_layer_head_mask,
1040
+ past_key_value=cross_attn_past_key_value,
1041
+ query_length=query_length,
1042
+ use_cache=use_cache,
1043
+ output_attentions=output_attentions,
1044
+ )
1045
+ hidden_states = cross_attention_outputs[0]
1046
+
1047
+ # clamp inf values to enable fp16 training
1048
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
1049
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
1050
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
1051
+
1052
+ # Combine self attn and cross attn key value states
1053
+ if present_key_value_state is not None:
1054
+ present_key_value_state = present_key_value_state + cross_attention_outputs[1]
1055
+
1056
+ # Keep cross-attention outputs and relative position weights
1057
+ attention_outputs = attention_outputs + cross_attention_outputs[2:]
1058
+
1059
+ # Apply Feed Forward layer
1060
+ hidden_states = self.mlp(hidden_states)
1061
+
1062
+ # clamp inf values to enable fp16 training
1063
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
1064
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
1065
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
1066
+
1067
+ outputs = (hidden_states,)
1068
+
1069
+ if use_cache:
1070
+ outputs = outputs + (present_key_value_state,) + attention_outputs
1071
+ else:
1072
+ outputs = outputs + attention_outputs
1073
+
1074
+ return outputs
1075
+
1076
+
1077
+ PIX2STRUCT_START_DOCSTRING = r"""
1078
+
1079
+ The Pix2Struct model was proposed in [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language
1080
+ Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu,
1081
+ Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. It's an encoder decoder
1082
+ transformer pre-trained in a image-to-text setting.
1083
+
1084
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1085
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1086
+ etc.)
1087
+
1088
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1089
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1090
+ and behavior.
1091
+
1092
+ Parameters:
1093
+ config (Union[`Pix2StructConfig`, `Pix2StructTextConfig`]):
1094
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1095
+ load the weights associated with the model, only the configuration. Check out the
1096
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1097
+ """
1098
+
1099
+ PIX2STRUCT_TEXT_INPUTS_DOCSTRING = r"""
1100
+ Args:
1101
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1102
+ Indices of input sequence tokens in the vocabulary. Pix2StructText is a model with relative position
1103
+ embeddings so you should be able to pad the inputs on both the right and the left.
1104
+
1105
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1106
+ [`PreTrainedTokenizer.__call__`] for detail.
1107
+
1108
+ [What are input IDs?](../glossary#input-ids)
1109
+
1110
+ To know more on how to prepare `input_ids` for pretraining take a look a [Pix2StructText
1111
+ Training](./t5#training).
1112
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1113
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1114
+
1115
+ - 1 for tokens that are **not masked**,
1116
+ - 0 for tokens that are **masked**.
1117
+
1118
+ [What are attention masks?](../glossary#attention-mask)
1119
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1120
+ Indices of decoder input sequence tokens in the vocabulary.
1121
+
1122
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1123
+ [`PreTrainedTokenizer.__call__`] for details.
1124
+
1125
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
1126
+
1127
+ Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
1128
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1129
+ `past_key_values`).
1130
+
1131
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText
1132
+ Training](./t5#training).
1133
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1134
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
1135
+ be used by default.
1136
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1137
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1138
+ 1]`:
1139
+
1140
+ - 1 indicates the head is **not masked**,
1141
+ - 0 indicates the head is **masked**.
1142
+
1143
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1144
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1145
+ 1]`:
1146
+
1147
+ - 1 indicates the head is **not masked**,
1148
+ - 0 indicates the head is **masked**.
1149
+
1150
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1151
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
1152
+ `[0, 1]`:
1153
+
1154
+ - 1 indicates the head is **not masked**,
1155
+ - 0 indicates the head is **masked**.
1156
+
1157
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
1158
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
1159
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
1160
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
1161
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1162
+ Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding.
1163
+
1164
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1165
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1166
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1167
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1168
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1169
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1170
+ model's internal embedding lookup matrix.
1171
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
1172
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
1173
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
1174
+ input (see `past_key_values`). This is useful if you want more control over how to convert
1175
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
1176
+
1177
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
1178
+ of `inputs_embeds`.
1179
+
1180
+ use_cache (`bool`, *optional*):
1181
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1182
+ `past_key_values`).
1183
+
1184
+ output_attentions (`bool`, *optional*):
1185
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1186
+ tensors for more detail.
1187
+ output_hidden_states (`bool`, *optional*):
1188
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1189
+ more detail.
1190
+ return_dict (`bool`, *optional*):
1191
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1192
+ """
1193
+
1194
+ PIX2STRUCT_INPUTS_DOCSTRING = r"""
1195
+ Args:
1196
+ flattened_patches (`torch.FloatTensor` of shape `(batch_size, seq_length, hidden_size)`):
1197
+ Flattened pixel patches. the `hidden_size` is obtained by the following formula: `hidden_size` =
1198
+ `num_channels` * `patch_size` * `patch_size`
1199
+
1200
+ The process of flattening the pixel patches is done by `Pix2StructProcessor`.
1201
+
1202
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1203
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1204
+
1205
+ - 1 for tokens that are **not masked**,
1206
+ - 0 for tokens that are **masked**.
1207
+
1208
+ [What are attention masks?](../glossary#attention-mask)
1209
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1210
+ Indices of decoder input sequence tokens in the vocabulary.
1211
+
1212
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1213
+ [`PreTrainedTokenizer.__call__`] for details.
1214
+
1215
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
1216
+
1217
+ Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
1218
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1219
+ `past_key_values`).
1220
+
1221
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText
1222
+ Training](./t5#training).
1223
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1224
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
1225
+ be used by default.
1226
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1227
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1228
+ 1]`:
1229
+
1230
+ - 1 indicates the head is **not masked**,
1231
+ - 0 indicates the head is **masked**.
1232
+
1233
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1234
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1235
+ 1]`:
1236
+
1237
+ - 1 indicates the head is **not masked**,
1238
+ - 0 indicates the head is **masked**.
1239
+
1240
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1241
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
1242
+ `[0, 1]`:
1243
+
1244
+ - 1 indicates the head is **not masked**,
1245
+ - 0 indicates the head is **masked**.
1246
+
1247
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
1248
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
1249
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
1250
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
1251
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1252
+ Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding.
1253
+
1254
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1255
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1256
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1257
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
1258
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
1259
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
1260
+ input (see `past_key_values`). This is useful if you want more control over how to convert
1261
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
1262
+
1263
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
1264
+ of `inputs_embeds`.
1265
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1266
+ Labels for computing the masked language modeling loss for the decoder.
1267
+
1268
+ use_cache (`bool`, *optional*):
1269
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1270
+ `past_key_values`).
1271
+
1272
+ output_attentions (`bool`, *optional*):
1273
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1274
+ tensors for more detail.
1275
+ output_hidden_states (`bool`, *optional*):
1276
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1277
+ more detail.
1278
+ return_dict (`bool`, *optional*):
1279
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1280
+ """
1281
+
1282
+
1283
+ @add_start_docstrings(
1284
+ "The standalone text decoder of Pix2Struct",
1285
+ PIX2STRUCT_START_DOCSTRING,
1286
+ )
1287
+ class Pix2StructTextModel(Pix2StructPreTrainedModel):
1288
+ config_class = Pix2StructTextConfig
1289
+ _no_split_modules = ["Pix2StructTextBlock"]
1290
+ _tied_weights_keys = ["lm_head.weight"]
1291
+ supports_gradient_checkpointing = True
1292
+
1293
+ def __init__(self, config):
1294
+ super().__init__(config)
1295
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
1296
+
1297
+ self.layer = nn.ModuleList(
1298
+ [Pix2StructTextBlock(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
1299
+ )
1300
+ self.final_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
1301
+ self.dropout = nn.Dropout(config.dropout_rate)
1302
+
1303
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1304
+
1305
+ # Initialize weights and apply final processing
1306
+ self.post_init()
1307
+ self.gradient_checkpointing = False
1308
+
1309
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._reorder_cache
1310
+ def _reorder_cache(self, past_key_values, beam_idx):
1311
+ # if decoder past is not included in output
1312
+ # speedy decoding is disabled and no need to reorder
1313
+ if past_key_values is None:
1314
+ logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
1315
+ return past_key_values
1316
+
1317
+ reordered_decoder_past = ()
1318
+ for layer_past_states in past_key_values:
1319
+ # get the correct batch idx from layer past batch dim
1320
+ # batch dim of `past` is at 2nd position
1321
+ reordered_layer_past_states = ()
1322
+ for layer_past_state in layer_past_states:
1323
+ # need to set correct `past` for each of the four key / value states
1324
+ reordered_layer_past_states = reordered_layer_past_states + (
1325
+ layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
1326
+ )
1327
+
1328
+ if reordered_layer_past_states[0].shape != layer_past_states[0].shape:
1329
+ raise ValueError(
1330
+ f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched"
1331
+ )
1332
+ if len(reordered_layer_past_states) != len(layer_past_states):
1333
+ raise ValueError(
1334
+ f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched"
1335
+ )
1336
+
1337
+ reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
1338
+ return reordered_decoder_past
1339
+
1340
+ def get_input_embeddings(self):
1341
+ return self.embed_tokens
1342
+
1343
+ def set_input_embeddings(self, new_embeddings):
1344
+ self.embed_tokens = new_embeddings
1345
+
1346
+ def get_output_embeddings(self):
1347
+ return self.lm_head
1348
+
1349
+ def set_output_embeddings(self, new_embeddings):
1350
+ self.lm_head = new_embeddings
1351
+
1352
+ @add_start_docstrings_to_model_forward(PIX2STRUCT_TEXT_INPUTS_DOCSTRING)
1353
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1354
+ def forward(
1355
+ self,
1356
+ input_ids: Optional[torch.LongTensor] = None,
1357
+ attention_mask: Optional[torch.FloatTensor] = None,
1358
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1359
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1360
+ inputs_embeds: Optional[torch.LongTensor] = None,
1361
+ head_mask: Optional[torch.FloatTensor] = None,
1362
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1363
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1364
+ use_cache: Optional[bool] = None,
1365
+ output_attentions: Optional[bool] = None,
1366
+ output_hidden_states: Optional[bool] = None,
1367
+ labels: Optional[torch.LongTensor] = None,
1368
+ return_dict: Optional[bool] = None,
1369
+ **kwargs,
1370
+ ) -> Union[Tuple[torch.FloatTensor, ...], CausalLMOutputWithCrossAttentions]:
1371
+ r"""
1372
+ Returns:
1373
+
1374
+ Example:
1375
+
1376
+ ```python
1377
+ >>> from transformers import AutoProcessor, Pix2StructTextModel
1378
+
1379
+ >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
1380
+ >>> model = Pix2StructTextModel.from_pretrained("google/pix2struct-textcaps-base")
1381
+
1382
+ >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")
1383
+ >>> outputs = model(**inputs)
1384
+ >>> loss = outputs.loss
1385
+ ```
1386
+ """
1387
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1388
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1389
+ output_hidden_states = (
1390
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1391
+ )
1392
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1393
+
1394
+ if input_ids is not None and inputs_embeds is not None:
1395
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1396
+ elif input_ids is not None:
1397
+ input_shape = input_ids.size()
1398
+ input_ids = input_ids.view(-1, input_shape[-1])
1399
+ elif inputs_embeds is not None:
1400
+ input_shape = inputs_embeds.size()[:-1]
1401
+ else:
1402
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1403
+
1404
+ if inputs_embeds is None:
1405
+ assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
1406
+ inputs_embeds = self.embed_tokens(input_ids)
1407
+
1408
+ batch_size, seq_length = input_shape
1409
+
1410
+ # required mask seq length can be calculated via length of past
1411
+ mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
1412
+
1413
+ if attention_mask is None:
1414
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
1415
+ if encoder_attention_mask is None and encoder_hidden_states is not None:
1416
+ encoder_seq_length = encoder_hidden_states.shape[1]
1417
+ encoder_attention_mask = torch.ones(
1418
+ batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
1419
+ )
1420
+
1421
+ # initialize past_key_values with `None` if past does not exist
1422
+ if past_key_values is None:
1423
+ past_key_values = [None] * len(self.layer)
1424
+
1425
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1426
+ # ourselves in which case we just need to make it broadcastable to all heads.
1427
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
1428
+
1429
+ # If a 2D or 3D attention mask is provided for the cross-attention
1430
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1431
+ if encoder_hidden_states is not None:
1432
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1433
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1434
+ if encoder_attention_mask is None:
1435
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
1436
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1437
+ else:
1438
+ encoder_extended_attention_mask = None
1439
+
1440
+ # Prepare head mask if needed
1441
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
1442
+ cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
1443
+ present_key_value_states = () if use_cache else None
1444
+ all_hidden_states = () if output_hidden_states else None
1445
+ all_attentions = () if output_attentions else None
1446
+ all_cross_attentions = () if (output_attentions) else None
1447
+ position_bias = None
1448
+ encoder_decoder_position_bias = None
1449
+
1450
+ hidden_states = self.dropout(inputs_embeds)
1451
+
1452
+ for i, (layer_module, past_key_value) in enumerate(zip(self.layer, past_key_values)):
1453
+ layer_head_mask = head_mask[i]
1454
+ cross_attn_layer_head_mask = cross_attn_head_mask[i]
1455
+ if output_hidden_states:
1456
+ all_hidden_states = all_hidden_states + (hidden_states,)
1457
+
1458
+ if self.gradient_checkpointing and self.training:
1459
+ if use_cache:
1460
+ logger.warning(
1461
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1462
+ )
1463
+ use_cache = False
1464
+ layer_outputs = self._gradient_checkpointing_func(
1465
+ layer_module.forward,
1466
+ hidden_states,
1467
+ extended_attention_mask,
1468
+ position_bias,
1469
+ encoder_hidden_states,
1470
+ encoder_extended_attention_mask,
1471
+ encoder_decoder_position_bias,
1472
+ layer_head_mask,
1473
+ cross_attn_layer_head_mask,
1474
+ None, # past_key_value is always None with gradient checkpointing
1475
+ use_cache,
1476
+ output_attentions,
1477
+ )
1478
+ else:
1479
+ layer_outputs = layer_module(
1480
+ hidden_states,
1481
+ attention_mask=extended_attention_mask,
1482
+ position_bias=position_bias,
1483
+ encoder_hidden_states=encoder_hidden_states,
1484
+ encoder_attention_mask=encoder_extended_attention_mask,
1485
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
1486
+ layer_head_mask=layer_head_mask,
1487
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
1488
+ past_key_value=past_key_value,
1489
+ use_cache=use_cache,
1490
+ output_attentions=output_attentions,
1491
+ )
1492
+
1493
+ # layer_outputs is a tuple with:
1494
+ # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
1495
+ if use_cache is False:
1496
+ layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
1497
+
1498
+ hidden_states, present_key_value_state = layer_outputs[:2]
1499
+
1500
+ # We share the position biases between the layers - the first layer store them
1501
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
1502
+ # (cross-attention position bias), (cross-attention weights)
1503
+ position_bias = layer_outputs[2]
1504
+ if encoder_hidden_states is not None:
1505
+ encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
1506
+ # append next layer key value states
1507
+ if use_cache:
1508
+ present_key_value_states = present_key_value_states + (present_key_value_state,)
1509
+
1510
+ if output_attentions:
1511
+ all_attentions = all_attentions + (layer_outputs[3],)
1512
+ if encoder_hidden_states is not None:
1513
+ all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
1514
+
1515
+ hidden_states = self.final_layer_norm(hidden_states)
1516
+ hidden_states = self.dropout(hidden_states)
1517
+
1518
+ logits = self.lm_head(hidden_states)
1519
+
1520
+ # Add last layer
1521
+ if output_hidden_states:
1522
+ all_hidden_states = all_hidden_states + (hidden_states,)
1523
+
1524
+ loss = None
1525
+ if labels is not None:
1526
+ # move labels to correct device to enable model parallelism
1527
+ labels = labels.to(logits.device)
1528
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100, reduction="mean")
1529
+
1530
+ loss = loss_fct(logits.contiguous().view(-1, logits.size(-1)), labels.contiguous().view(-1))
1531
+
1532
+ if not return_dict:
1533
+ return tuple(
1534
+ v
1535
+ for v in [
1536
+ loss,
1537
+ logits,
1538
+ present_key_value_states,
1539
+ all_hidden_states,
1540
+ all_attentions,
1541
+ all_cross_attentions,
1542
+ ]
1543
+ if v is not None
1544
+ )
1545
+ return CausalLMOutputWithCrossAttentions(
1546
+ loss=loss,
1547
+ logits=logits,
1548
+ past_key_values=present_key_value_states,
1549
+ hidden_states=all_hidden_states,
1550
+ attentions=all_attentions,
1551
+ cross_attentions=all_cross_attentions,
1552
+ )
1553
+
1554
+
1555
+ @add_start_docstrings(
1556
+ "A conditional generation model with a language modeling head. Can be used for sequence generation tasks.",
1557
+ PIX2STRUCT_START_DOCSTRING,
1558
+ )
1559
+ class Pix2StructForConditionalGeneration(Pix2StructPreTrainedModel):
1560
+ config_class = Pix2StructConfig
1561
+ main_input_name = "flattened_patches"
1562
+ _tied_weights_keys = ["decoder.lm_head.weight"]
1563
+
1564
+ def __init__(self, config: Pix2StructConfig):
1565
+ super().__init__(config)
1566
+
1567
+ self.encoder = Pix2StructVisionModel(config.vision_config)
1568
+ self.decoder = Pix2StructTextModel(config.text_config)
1569
+
1570
+ self.is_vqa = config.is_vqa
1571
+
1572
+ # Initialize weights and apply final processing
1573
+ self.post_init()
1574
+
1575
+ def get_input_embeddings(self):
1576
+ return self.decoder.get_input_embeddings()
1577
+
1578
+ def set_input_embeddings(self, new_embeddings):
1579
+ self.decoder.set_input_embeddings(new_embeddings)
1580
+
1581
+ def get_output_embeddings(self) -> nn.Module:
1582
+ return self.decoder.get_output_embeddings()
1583
+
1584
+ def set_output_embeddings(self, new_embeddings):
1585
+ self.decoder.set_output_embeddings(new_embeddings)
1586
+
1587
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
1588
+ model_embeds = self.decoder.resize_token_embeddings(new_num_tokens)
1589
+
1590
+ # update vocab size
1591
+ self.config.text_config.vocab_size = new_num_tokens
1592
+
1593
+ return model_embeds
1594
+
1595
+ def get_decoder(self):
1596
+ return self.decoder
1597
+
1598
+ def get_encoder(self):
1599
+ return self.encoder
1600
+
1601
+ @add_start_docstrings_to_model_forward(PIX2STRUCT_INPUTS_DOCSTRING)
1602
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
1603
+ def forward(
1604
+ self,
1605
+ flattened_patches: Optional[torch.FloatTensor] = None,
1606
+ attention_mask: Optional[torch.FloatTensor] = None,
1607
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1608
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1609
+ head_mask: Optional[torch.FloatTensor] = None,
1610
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
1611
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1612
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1613
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1614
+ labels: Optional[torch.LongTensor] = None,
1615
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
1616
+ use_cache: Optional[bool] = None,
1617
+ output_attentions: Optional[bool] = None,
1618
+ output_hidden_states: Optional[bool] = None,
1619
+ return_dict: Optional[bool] = None,
1620
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
1621
+ r"""
1622
+ Returns:
1623
+
1624
+ Example:
1625
+
1626
+ Inference:
1627
+
1628
+ ```python
1629
+ >>> from PIL import Image
1630
+ >>> import requests
1631
+ >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
1632
+
1633
+ >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
1634
+ >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base")
1635
+
1636
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1637
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1638
+
1639
+ >>> inputs = processor(images=image, return_tensors="pt")
1640
+
1641
+ >>> # autoregressive generation
1642
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=50)
1643
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1644
+ >>> print(generated_text)
1645
+ A stop sign is on a street corner.
1646
+
1647
+ >>> # conditional generation
1648
+ >>> text = "A picture of"
1649
+ >>> inputs = processor(text=text, images=image, return_tensors="pt", add_special_tokens=False)
1650
+
1651
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=50)
1652
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1653
+ >>> print(generated_text)
1654
+ A picture of a stop sign with a red stop sign
1655
+ ```
1656
+
1657
+ Training:
1658
+
1659
+ ```python
1660
+ >>> from PIL import Image
1661
+ >>> import requests
1662
+ >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
1663
+
1664
+ >>> processor = AutoProcessor.from_pretrained("google/pix2struct-base")
1665
+ >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-base")
1666
+
1667
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1668
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1669
+ >>> text = "A stop sign is on the street corner."
1670
+
1671
+ >>> inputs = processor(images=image, return_tensors="pt")
1672
+ >>> labels = processor(text=text, return_tensors="pt").input_ids
1673
+
1674
+ >>> # forward pass
1675
+ >>> outputs = model(**inputs, labels=labels)
1676
+ >>> loss = outputs.loss
1677
+ >>> print(f"{loss.item():.5f}")
1678
+ 5.94282
1679
+ ```"""
1680
+ use_cache = use_cache if use_cache is not None else self.config.text_config.use_cache
1681
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1682
+
1683
+ # Encode if needed (training, first prediction pass)
1684
+ if encoder_outputs is None:
1685
+ encoder_outputs = self.encoder(
1686
+ flattened_patches=flattened_patches,
1687
+ attention_mask=attention_mask,
1688
+ head_mask=head_mask,
1689
+ output_attentions=output_attentions,
1690
+ output_hidden_states=output_hidden_states,
1691
+ return_dict=return_dict,
1692
+ )
1693
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1694
+ encoder_outputs = BaseModelOutput(
1695
+ last_hidden_state=encoder_outputs[0],
1696
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1697
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1698
+ )
1699
+
1700
+ hidden_states = encoder_outputs[0]
1701
+
1702
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
1703
+ # get decoder inputs from shifting lm labels to the right
1704
+ decoder_input_ids = self._shift_right(labels)
1705
+ decoder_attention_mask = (
1706
+ decoder_attention_mask
1707
+ if decoder_attention_mask is not None
1708
+ else decoder_input_ids.ne(self.config.pad_token_id).float()
1709
+ )
1710
+ # Always attend to the first token
1711
+ decoder_attention_mask[:, 0] = 1
1712
+
1713
+ # Decode
1714
+ decoder_outputs = self.decoder(
1715
+ input_ids=decoder_input_ids,
1716
+ attention_mask=decoder_attention_mask,
1717
+ inputs_embeds=decoder_inputs_embeds,
1718
+ past_key_values=past_key_values,
1719
+ encoder_hidden_states=hidden_states,
1720
+ encoder_attention_mask=attention_mask,
1721
+ head_mask=decoder_head_mask,
1722
+ cross_attn_head_mask=cross_attn_head_mask,
1723
+ use_cache=use_cache,
1724
+ output_attentions=output_attentions,
1725
+ output_hidden_states=output_hidden_states,
1726
+ labels=labels,
1727
+ return_dict=return_dict,
1728
+ )
1729
+
1730
+ if not return_dict:
1731
+ return decoder_outputs + encoder_outputs
1732
+
1733
+ return Seq2SeqLMOutput(
1734
+ loss=decoder_outputs.loss,
1735
+ logits=decoder_outputs.logits,
1736
+ past_key_values=decoder_outputs.past_key_values,
1737
+ decoder_hidden_states=decoder_outputs.hidden_states,
1738
+ decoder_attentions=decoder_outputs.attentions,
1739
+ cross_attentions=decoder_outputs.cross_attentions,
1740
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1741
+ encoder_hidden_states=encoder_outputs.hidden_states,
1742
+ encoder_attentions=encoder_outputs.attentions,
1743
+ )
1744
+
1745
+ def prepare_inputs_for_generation(
1746
+ self,
1747
+ input_ids,
1748
+ flattened_patches: Optional[torch.FloatTensor] = None,
1749
+ attention_mask: Optional[torch.FloatTensor] = None,
1750
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1751
+ past_key_values=None,
1752
+ head_mask=None,
1753
+ decoder_head_mask=None,
1754
+ cross_attn_head_mask=None,
1755
+ use_cache=None,
1756
+ encoder_outputs=None,
1757
+ **kwargs,
1758
+ ):
1759
+ if decoder_attention_mask is None:
1760
+ decoder_attention_mask = torch.ones_like(input_ids).to(input_ids.device)
1761
+
1762
+ # cut decoder_input_ids if past_key_values is used
1763
+ if past_key_values is not None:
1764
+ past_length = past_key_values[0][0].shape[2]
1765
+
1766
+ # Some generation methods already pass only the last input ID
1767
+ if input_ids.shape[1] > past_length:
1768
+ remove_prefix_length = past_length
1769
+ else:
1770
+ # Default to old behavior: keep only final ID
1771
+ remove_prefix_length = input_ids.shape[1] - 1
1772
+
1773
+ input_ids = input_ids[:, remove_prefix_length:]
1774
+
1775
+ return {
1776
+ "flattened_patches": flattened_patches,
1777
+ "decoder_input_ids": input_ids,
1778
+ "past_key_values": past_key_values,
1779
+ "encoder_outputs": encoder_outputs,
1780
+ "attention_mask": attention_mask,
1781
+ "decoder_attention_mask": decoder_attention_mask,
1782
+ "head_mask": head_mask,
1783
+ "decoder_head_mask": decoder_head_mask,
1784
+ "cross_attn_head_mask": cross_attn_head_mask,
1785
+ "use_cache": use_cache,
1786
+ }
venv/lib/python3.10/site-packages/transformers/models/pix2struct/processing_pix2struct.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Pix2Struct.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
23
+ from ...utils import TensorType
24
+
25
+
26
+ class Pix2StructProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a PIX2STRUCT processor which wraps a BERT tokenizer and PIX2STRUCT image processor into a single
29
+ processor.
30
+
31
+ [`Pix2StructProcessor`] offers all the functionalities of [`Pix2StructImageProcessor`] and [`T5TokenizerFast`]. See
32
+ the docstring of [`~Pix2StructProcessor.__call__`] and [`~Pix2StructProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor (`Pix2StructImageProcessor`):
36
+ An instance of [`Pix2StructImageProcessor`]. The image processor is a required input.
37
+ tokenizer (Union[`T5TokenizerFast`, `T5Tokenizer`]):
38
+ An instance of ['T5TokenizerFast`] or ['T5Tokenizer`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "Pix2StructImageProcessor"
43
+ tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
44
+
45
+ def __init__(self, image_processor, tokenizer):
46
+ tokenizer.return_token_type_ids = False
47
+ super().__init__(image_processor, tokenizer)
48
+
49
+ def __call__(
50
+ self,
51
+ images=None,
52
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
53
+ add_special_tokens: bool = True,
54
+ padding: Union[bool, str, PaddingStrategy] = False,
55
+ truncation: Union[bool, str, TruncationStrategy] = None,
56
+ max_length: Optional[int] = None,
57
+ max_patches: Optional[int] = 2048,
58
+ stride: int = 0,
59
+ pad_to_multiple_of: Optional[int] = None,
60
+ return_attention_mask: Optional[bool] = None,
61
+ return_overflowing_tokens: bool = False,
62
+ return_special_tokens_mask: bool = False,
63
+ return_offsets_mapping: bool = False,
64
+ return_token_type_ids: bool = False,
65
+ return_length: bool = False,
66
+ verbose: bool = True,
67
+ return_tensors: Optional[Union[str, TensorType]] = None,
68
+ **kwargs,
69
+ ) -> BatchEncoding:
70
+ """
71
+ This method uses [`Pix2StructImageProcessor.preprocess`] method to prepare image(s) for the model, and
72
+ [`T5TokenizerFast.__call__`] to prepare text for the model.
73
+
74
+ Please refer to the docstring of the above two methods for more information.
75
+ """
76
+ if images is None and text is None:
77
+ raise ValueError("You have to specify either images or text.")
78
+
79
+ # Get only text
80
+ if images is None and not self.image_processor.is_vqa:
81
+ self.current_processor = self.tokenizer
82
+ text_encoding = self.tokenizer(
83
+ text=text,
84
+ add_special_tokens=add_special_tokens,
85
+ padding=padding,
86
+ truncation=truncation,
87
+ max_length=max_length,
88
+ stride=stride,
89
+ pad_to_multiple_of=pad_to_multiple_of,
90
+ return_attention_mask=return_attention_mask,
91
+ return_overflowing_tokens=return_overflowing_tokens,
92
+ return_special_tokens_mask=return_special_tokens_mask,
93
+ return_offsets_mapping=return_offsets_mapping,
94
+ return_token_type_ids=return_token_type_ids,
95
+ return_length=return_length,
96
+ verbose=verbose,
97
+ return_tensors=return_tensors,
98
+ **kwargs,
99
+ )
100
+ return text_encoding
101
+
102
+ if not self.image_processor.is_vqa:
103
+ # add pixel_values
104
+ encoding_image_processor = self.image_processor(
105
+ images, return_tensors=return_tensors, max_patches=max_patches, **kwargs
106
+ )
107
+ else:
108
+ # add pixel_values and bbox
109
+ encoding_image_processor = self.image_processor(
110
+ images, return_tensors=return_tensors, max_patches=max_patches, header_text=text, **kwargs
111
+ )
112
+
113
+ if text is not None and not self.image_processor.is_vqa:
114
+ text_encoding = self.tokenizer(
115
+ text=text,
116
+ add_special_tokens=add_special_tokens,
117
+ padding=padding,
118
+ truncation=truncation,
119
+ max_length=max_length,
120
+ stride=stride,
121
+ pad_to_multiple_of=pad_to_multiple_of,
122
+ return_attention_mask=return_attention_mask,
123
+ return_overflowing_tokens=return_overflowing_tokens,
124
+ return_special_tokens_mask=return_special_tokens_mask,
125
+ return_offsets_mapping=return_offsets_mapping,
126
+ return_token_type_ids=return_token_type_ids,
127
+ return_length=return_length,
128
+ verbose=verbose,
129
+ return_tensors=return_tensors,
130
+ **kwargs,
131
+ )
132
+
133
+ if "attention_mask" in text_encoding:
134
+ text_encoding["decoder_attention_mask"] = text_encoding.pop("attention_mask")
135
+ if "input_ids" in text_encoding:
136
+ text_encoding["decoder_input_ids"] = text_encoding.pop("input_ids")
137
+ else:
138
+ text_encoding = None
139
+
140
+ if text_encoding is not None:
141
+ encoding_image_processor.update(text_encoding)
142
+
143
+ return encoding_image_processor
144
+
145
+ def batch_decode(self, *args, **kwargs):
146
+ """
147
+ This method forwards all its arguments to Pix2StructTokenizerFast's [`~PreTrainedTokenizer.batch_decode`].
148
+ Please refer to the docstring of this method for more information.
149
+ """
150
+ return self.tokenizer.batch_decode(*args, **kwargs)
151
+
152
+ def decode(self, *args, **kwargs):
153
+ """
154
+ This method forwards all its arguments to Pix2StructTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please
155
+ refer to the docstring of this method for more information.
156
+ """
157
+ return self.tokenizer.decode(*args, **kwargs)
158
+
159
+ @property
160
+ def model_input_names(self):
161
+ tokenizer_input_names = self.tokenizer.model_input_names
162
+ image_processor_input_names = self.image_processor.model_input_names
163
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
venv/lib/python3.10/site-packages/transformers/models/resnet/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_resnet"] = [
36
+ "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "ResNetForImageClassification",
38
+ "ResNetModel",
39
+ "ResNetPreTrainedModel",
40
+ "ResNetBackbone",
41
+ ]
42
+
43
+ try:
44
+ if not is_tf_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_tf_resnet"] = [
50
+ "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "TFResNetForImageClassification",
52
+ "TFResNetModel",
53
+ "TFResNetPreTrainedModel",
54
+ ]
55
+
56
+ try:
57
+ if not is_flax_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ _import_structure["modeling_flax_resnet"] = [
63
+ "FlaxResNetForImageClassification",
64
+ "FlaxResNetModel",
65
+ "FlaxResNetPreTrainedModel",
66
+ ]
67
+
68
+ if TYPE_CHECKING:
69
+ from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
70
+
71
+ try:
72
+ if not is_torch_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ from .modeling_resnet import (
78
+ RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
79
+ ResNetBackbone,
80
+ ResNetForImageClassification,
81
+ ResNetModel,
82
+ ResNetPreTrainedModel,
83
+ )
84
+
85
+ try:
86
+ if not is_tf_available():
87
+ raise OptionalDependencyNotAvailable()
88
+ except OptionalDependencyNotAvailable:
89
+ pass
90
+ else:
91
+ from .modeling_tf_resnet import (
92
+ TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
93
+ TFResNetForImageClassification,
94
+ TFResNetModel,
95
+ TFResNetPreTrainedModel,
96
+ )
97
+
98
+ try:
99
+ if not is_flax_available():
100
+ raise OptionalDependencyNotAvailable()
101
+ except OptionalDependencyNotAvailable:
102
+ pass
103
+ else:
104
+ from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
105
+
106
+
107
+ else:
108
+ import sys
109
+
110
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)